From 111481cef94589e1dca804d08828b0813bb94cd6 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 1 Aug 2023 08:09:10 +0000 Subject: [PATCH 001/122] fix state mem leak --- source/libs/stream/src/streamBackendRocksdb.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/source/libs/stream/src/streamBackendRocksdb.c b/source/libs/stream/src/streamBackendRocksdb.c index 8534f3b0a1..f6161a4767 100644 --- a/source/libs/stream/src/streamBackendRocksdb.c +++ b/source/libs/stream/src/streamBackendRocksdb.c @@ -1625,9 +1625,11 @@ int32_t streamStateSessionGetKVByCur_rocksdb(SStreamStateCur* pCur, SSessionKey* if (pVLen != NULL) *pVLen = len; if (pKTmp->opNum != pCur->number) { + taosMemoryFree(val); return -1; } if (pKey->groupId != 0 && pKey->groupId != pKTmp->key.groupId) { + taosMemoryFree(val); return -1; } *pKey = pKTmp->key; From fa36001c9f767b6a998338aa254dcd4e0b9b414b Mon Sep 17 00:00:00 2001 From: "chao.feng" Date: Tue, 1 Aug 2023 17:32:06 +0800 Subject: [PATCH 002/122] add CI test case for ts-3479 by charles --- .../0-others/user_privilege_multi_users.py | 126 ++++++++++++++++++ 1 file changed, 126 insertions(+) create mode 100644 tests/system-test/0-others/user_privilege_multi_users.py diff --git a/tests/system-test/0-others/user_privilege_multi_users.py b/tests/system-test/0-others/user_privilege_multi_users.py new file mode 100644 index 0000000000..8812f42e7b --- /dev/null +++ b/tests/system-test/0-others/user_privilege_multi_users.py @@ -0,0 +1,126 @@ +from itertools import product +import taos +import random +from taos.tmq import * +from util.cases import * +from util.common import * +from util.log import * +from util.sql import * +from util.sqlset import * + + +class TDTestCase: + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + # init the tdsql + tdSql.init(conn.cursor()) + self.setsql = TDSetSql() + # user info + self.userNum = 100 + self.basic_username = "user" + self.password = "pwd" + + # db info + self.dbname = "user_privilege_multi_users" + self.stbname = 'stb' + self.ctbname_num = 100 + self.column_dict = { + 'ts': 'timestamp', + 'col1': 'float', + 'col2': 'int', + } + self.tag_dict = { + 'ctbname': 'binary(10)' + } + + self.privilege_list = [] + + def prepare_data(self): + """Create the db and data for test + """ + # create datebase + tdSql.execute(f"create database {self.dbname}") + tdLog.debug("sql:" + f"create database {self.dbname}") + tdSql.execute(f"use {self.dbname}") + tdLog.debug("sql:" + f"use {self.dbname}") + + # create super table + tdSql.execute(self.setsql.set_create_stable_sql(self.stbname, self.column_dict, self.tag_dict)) + tdLog.debug("Create stable {} successfully".format(self.stbname)) + for ctbIndex in range(self.ctbname_num): + ctname = f"ctb{ctbIndex}" + tdSql.execute(f"create table {ctname} using {self.stbname} tags('{ctname}')") + tdLog.debug("sql:" + f"create table {ctname} using {self.stbname} tags('{ctname}')") + + def create_multiusers(self): + """Create the user for test + """ + for userIndex in range(self.userNum): + username = f"{self.basic_username}{userIndex}" + tdSql.execute(f'create user {username} pass "{self.password}"') + tdLog.debug("sql:" + f'create user {username} pass "{self.password}"') + + def grant_privilege(self): + """Add the privilege for the users + """ + try: + for userIndex in range(self.userNum): + username = f"{self.basic_username}{userIndex}" + privilege = random.choice(["read", "write", "all"]) + condition = f"ctbname='ctb{userIndex}'" + self.privilege_list.append({ + "username": username, + "privilege": privilege, + "condition": condition + }) + tdSql.execute(f'grant {privilege} on {self.dbname}.{self.stbname} with {condition} to {username}') + tdLog.debug("sql:" + f'grant {privilege} on {self.dbname}.{self.stbname} with {condition} to {username}') + except Exception as ex: + tdLog.exit(ex) + + def remove_privilege(self): + """Remove the privilege for the users + """ + try: + for item in self.privilege_list: + username = item["username"] + privilege = item["privilege"] + condition = item["condition"] + tdSql.execute(f'revoke {privilege} on {self.dbname}.{self.stbname} with {condition} from {username}') + tdLog.debug("sql:" + f'revoke {privilege} on {self.dbname}.{self.stbname} with {condition} from {username}') + except Exception as ex: + tdLog.exit(ex) + + def run(self): + """ + Check the information from information_schema.ins_user_privileges + """ + self.create_multiusers() + self.prepare_data() + # grant privilege to users + self.grant_privilege() + # check information_schema.ins_user_privileges + tdSql.query("select * from information_schema.ins_user_privileges;") + tdLog.debug("Current information_schema.ins_user_privileges values: {}".format(tdSql.queryResult)) + if len(tdSql.queryResult) >= self.userNum: + tdLog.debug("case passed") + else: + tdLog.exit("The privilege number in information_schema.ins_user_privileges is incorrect") + + def stop(self): + # remove the privilege + self.remove_privilege() + # clear env + tdSql.execute(f"drop database {self.dbname}") + # remove the users + for userIndex in range(self.userNum): + username = f"{self.basic_username}{userIndex}" + tdSql.execute(f'drop user {username}') + # close the connection + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) From 72dc46600ed384f4618fb38058ceabc874d8db40 Mon Sep 17 00:00:00 2001 From: wangjiaming0909 <604227650@qq.com> Date: Wed, 2 Aug 2023 10:09:50 +0800 Subject: [PATCH 003/122] fix: memory leak when drop db which acquired by user with grant/revoke --- source/dnode/mnode/impl/src/mndUser.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/source/dnode/mnode/impl/src/mndUser.c b/source/dnode/mnode/impl/src/mndUser.c index 65c1cfbea2..c59d23d252 100644 --- a/source/dnode/mnode/impl/src/mndUser.c +++ b/source/dnode/mnode/impl/src/mndUser.c @@ -863,6 +863,7 @@ static int32_t mndProcessAlterUserReq(SRpcMsg *pReq) { mndReleaseDb(pMnode, pDb); goto _OVER; } + mndReleaseDb(pMnode, pDb); } else { while (1) { SDbObj *pDb = NULL; @@ -887,6 +888,7 @@ static int32_t mndProcessAlterUserReq(SRpcMsg *pReq) { mndReleaseDb(pMnode, pDb); goto _OVER; } + mndReleaseDb(pMnode, pDb); } else { while (1) { SDbObj *pDb = NULL; @@ -908,6 +910,7 @@ static int32_t mndProcessAlterUserReq(SRpcMsg *pReq) { goto _OVER; } taosHashRemove(newUser.readDbs, alterReq.objname, len); + mndReleaseDb(pMnode, pDb); } else { taosHashClear(newUser.readDbs); } @@ -922,6 +925,7 @@ static int32_t mndProcessAlterUserReq(SRpcMsg *pReq) { goto _OVER; } taosHashRemove(newUser.writeDbs, alterReq.objname, len); + mndReleaseDb(pMnode, pDb); } else { taosHashClear(newUser.writeDbs); } From 83468a77107b9560849683b8e3989295950882b5 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Wed, 2 Aug 2023 03:18:59 +0000 Subject: [PATCH 004/122] fix state mem leak --- source/libs/stream/src/streamBackendRocksdb.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/source/libs/stream/src/streamBackendRocksdb.c b/source/libs/stream/src/streamBackendRocksdb.c index f6161a4767..5e08f566d6 100644 --- a/source/libs/stream/src/streamBackendRocksdb.c +++ b/source/libs/stream/src/streamBackendRocksdb.c @@ -1632,6 +1632,12 @@ int32_t streamStateSessionGetKVByCur_rocksdb(SStreamStateCur* pCur, SSessionKey* taosMemoryFree(val); return -1; } + + if (pVal != NULL) { + *pVal = (char*)val; + } else { + taosMemoryFree(val); + } *pKey = pKTmp->key; return 0; } From d9fd95777a430b92a0a12322b7faad20527c9607 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Wed, 2 Aug 2023 03:21:39 +0000 Subject: [PATCH 005/122] fix state mem leak --- source/libs/stream/src/streamBackendRocksdb.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/source/libs/stream/src/streamBackendRocksdb.c b/source/libs/stream/src/streamBackendRocksdb.c index 5e08f566d6..16fe7341a6 100644 --- a/source/libs/stream/src/streamBackendRocksdb.c +++ b/source/libs/stream/src/streamBackendRocksdb.c @@ -1617,12 +1617,6 @@ int32_t streamStateSessionGetKVByCur_rocksdb(SStreamStateCur* pCur, SSessionKey* if (len < 0) { return -1; } - if (pVal != NULL) { - *pVal = (char*)val; - } else { - taosMemoryFree(val); - } - if (pVLen != NULL) *pVLen = len; if (pKTmp->opNum != pCur->number) { taosMemoryFree(val); @@ -1638,6 +1632,8 @@ int32_t streamStateSessionGetKVByCur_rocksdb(SStreamStateCur* pCur, SSessionKey* } else { taosMemoryFree(val); } + + if (pVLen != NULL) *pVLen = len; *pKey = pKTmp->key; return 0; } From 6cdf94fec054d3779ee5c8479c4fa250398bd191 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Wed, 2 Aug 2023 03:29:13 +0000 Subject: [PATCH 006/122] fix state mem leak --- source/libs/stream/src/streamBackendRocksdb.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/source/libs/stream/src/streamBackendRocksdb.c b/source/libs/stream/src/streamBackendRocksdb.c index 16fe7341a6..d84a09ab20 100644 --- a/source/libs/stream/src/streamBackendRocksdb.c +++ b/source/libs/stream/src/streamBackendRocksdb.c @@ -1617,6 +1617,8 @@ int32_t streamStateSessionGetKVByCur_rocksdb(SStreamStateCur* pCur, SSessionKey* if (len < 0) { return -1; } + if (pVal != NULL) *pVal = NULL; + if (pVLen != NULL) *pVLen = 0; if (pKTmp->opNum != pCur->number) { taosMemoryFree(val); From 918eca4d22407b070804451030642348b0ce83cb Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Wed, 2 Aug 2023 03:31:11 +0000 Subject: [PATCH 007/122] fix state mem leak --- source/libs/stream/src/streamBackendRocksdb.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/source/libs/stream/src/streamBackendRocksdb.c b/source/libs/stream/src/streamBackendRocksdb.c index d84a09ab20..dd89c89481 100644 --- a/source/libs/stream/src/streamBackendRocksdb.c +++ b/source/libs/stream/src/streamBackendRocksdb.c @@ -1610,6 +1610,9 @@ int32_t streamStateSessionGetKVByCur_rocksdb(SStreamStateCur* pCur, SSessionKey* const char* curKey = rocksdb_iter_key(pCur->iter, (size_t*)&kLen); stateSessionKeyDecode((void*)&ktmp, (char*)curKey); + if (pVal != NULL) *pVal = NULL; + if (pVLen != NULL) *pVLen = 0; + SStateSessionKey* pKTmp = &ktmp; const char* vval = rocksdb_iter_value(pCur->iter, (size_t*)&vLen); char* val = NULL; @@ -1617,8 +1620,6 @@ int32_t streamStateSessionGetKVByCur_rocksdb(SStreamStateCur* pCur, SSessionKey* if (len < 0) { return -1; } - if (pVal != NULL) *pVal = NULL; - if (pVLen != NULL) *pVLen = 0; if (pKTmp->opNum != pCur->number) { taosMemoryFree(val); From 632f5fbe8f17d02c6bc04cd7bb28824c2a00e2ad Mon Sep 17 00:00:00 2001 From: wangjiaming0909 <604227650@qq.com> Date: Thu, 3 Aug 2023 11:10:43 +0800 Subject: [PATCH 008/122] feat: optimize table merge scan when 1 child table --- source/libs/executor/inc/tsort.h | 2 ++ source/libs/executor/src/scanoperator.c | 17 +++++++++----- source/libs/executor/src/tsort.c | 30 ++++++++++++++++++++++++- 3 files changed, 42 insertions(+), 7 deletions(-) diff --git a/source/libs/executor/inc/tsort.h b/source/libs/executor/inc/tsort.h index 538a9f18f6..57c8bce275 100644 --- a/source/libs/executor/inc/tsort.h +++ b/source/libs/executor/inc/tsort.h @@ -191,6 +191,8 @@ int32_t getProperSortPageSize(size_t rowSize, uint32_t numOfCols); bool tsortIsClosed(SSortHandle* pHandle); void tsortSetClosed(SSortHandle* pHandle); +void setSingleTableMerge(SSortHandle* pHandle); + #ifdef __cplusplus } #endif diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index da4bd1e23c..399bbbf6d0 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2938,17 +2938,22 @@ int32_t startGroupTableMergeScan(SOperatorInfo* pOperator) { // one table has one data block int32_t numOfTable = tableEndIdx - tableStartIdx + 1; - STableMergeScanSortSourceParam param = {0}; - param.pOperator = pOperator; + STableMergeScanSortSourceParam *param = taosMemoryCalloc(1, sizeof(STableMergeScanSortSourceParam)); + param->pOperator = pOperator; STableKeyInfo* startKeyInfo = tableListGetInfo(pInfo->base.pTableListInfo, tableStartIdx); pAPI->tsdReader.tsdReaderOpen(pHandle->vnode, &pInfo->base.cond, startKeyInfo, numOfTable, pInfo->pReaderBlock, (void**)&pInfo->base.dataReader, GET_TASKID(pTaskInfo), false, NULL); SSortSource* ps = taosMemoryCalloc(1, sizeof(SSortSource)); - ps->param = ¶m; - ps->onlyRef = true; + ps->param = param; + ps->onlyRef = false; tsortAddSource(pInfo->pSortHandle, ps); - int32_t code = tsortOpen(pInfo->pSortHandle); + int32_t code = TSDB_CODE_SUCCESS; + if (numOfTable == 1) { + setSingleTableMerge(pInfo->pSortHandle); + } else { + code = tsortOpen(pInfo->pSortHandle); + } if (code != TSDB_CODE_SUCCESS) { T_LONG_JMP(pTaskInfo->env, terrno); @@ -3587,4 +3592,4 @@ static void destoryTableCountScanOperator(void* param) { taosArrayDestroy(pTableCountScanInfo->stbUidList); taosMemoryFreeClear(param); -} \ No newline at end of file +} diff --git a/source/libs/executor/src/tsort.c b/source/libs/executor/src/tsort.c index 0a8d7ee376..b3e562a141 100644 --- a/source/libs/executor/src/tsort.c +++ b/source/libs/executor/src/tsort.c @@ -69,8 +69,14 @@ struct SSortHandle { _sort_fetch_block_fn_t fetchfp; _sort_merge_compar_fn_t comparFn; SMultiwayMergeTreeInfo* pMergeTree; + + bool singleTableMerge; }; +void setSingleTableMerge(SSortHandle* pHandle) { + pHandle->singleTableMerge = true; +} + static int32_t msortComparFn(const void* pLeft, const void* pRight, void* param); // | offset[0] | offset[1] |....| nullbitmap | data |...| @@ -1453,6 +1459,26 @@ static STupleHandle* tsortPQSortNextTuple(SSortHandle* pHandle) { return &pHandle->tupleHandle; } +static STupleHandle* tsortSingleTableMergeNextTuple(SSortHandle* pHandle) { + if (1 == pHandle->numOfCompletedSources) return NULL; + if (pHandle->tupleHandle.pBlock && pHandle->tupleHandle.rowIndex + 1 < pHandle->tupleHandle.pBlock->info.rows) { + pHandle->tupleHandle.rowIndex++; + } else { + if (pHandle->tupleHandle.rowIndex == -1) return NULL; + SSortSource** pSource = taosArrayGet(pHandle->pOrderedSource, 0); + SSortSource* source = *pSource; + SSDataBlock* pBlock = pHandle->fetchfp(source->param); + if (!pBlock || pBlock->info.rows == 0) { + setCurrentSourceDone(source, pHandle); + pHandle->tupleHandle.pBlock = NULL; + return NULL; + } + pHandle->tupleHandle.pBlock = pBlock; + pHandle->tupleHandle.rowIndex = 0; + } + return &pHandle->tupleHandle; +} + int32_t tsortOpen(SSortHandle* pHandle) { if (pHandle->opened) { return 0; @@ -1470,7 +1496,9 @@ int32_t tsortOpen(SSortHandle* pHandle) { } STupleHandle* tsortNextTuple(SSortHandle* pHandle) { - if (pHandle->pBoundedQueue) + if (pHandle->singleTableMerge) + return tsortSingleTableMergeNextTuple(pHandle); + else if (pHandle->pBoundedQueue) return tsortPQSortNextTuple(pHandle); else return tsortBufMergeSortNextTuple(pHandle); From 88d755be76d3b75bcebf9b114363ce0b5b4dcae1 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Fri, 4 Aug 2023 13:31:00 +0800 Subject: [PATCH 009/122] feat(tsdb/cos): s3 migration --- cmake/cmake.options | 6 + cmake/cos_CMakeLists.txt.in | 12 + contrib/CMakeLists.txt | 23 + contrib/test/CMakeLists.txt | 5 + contrib/test/cos/CMakeLists.txt | 49 + contrib/test/cos/main.c | 3090 +++++++++++++++++++ source/common/src/tglobal.c | 68 +- source/dnode/vnode/CMakeLists.txt | 28 +- source/dnode/vnode/src/inc/vndCos.h | 36 + source/dnode/vnode/src/tsdb/tsdbRetention.c | 120 +- source/dnode/vnode/src/vnd/vnodeCos.c | 114 + source/dnode/vnode/src/vnd/vnodeModule.c | 5 + 12 files changed, 3530 insertions(+), 26 deletions(-) create mode 100644 cmake/cos_CMakeLists.txt.in create mode 100644 contrib/test/cos/CMakeLists.txt create mode 100644 contrib/test/cos/main.c create mode 100644 source/dnode/vnode/src/inc/vndCos.h create mode 100644 source/dnode/vnode/src/vnd/vnodeCos.c diff --git a/cmake/cmake.options b/cmake/cmake.options index fa0b888415..ea5efcb13a 100644 --- a/cmake/cmake.options +++ b/cmake/cmake.options @@ -125,6 +125,12 @@ option( ON ) +option( + BUILD_WITH_COS + "If build with cos" + ON +) + option( BUILD_WITH_SQLITE "If build with sqlite" diff --git a/cmake/cos_CMakeLists.txt.in b/cmake/cos_CMakeLists.txt.in new file mode 100644 index 0000000000..ee1e58b50f --- /dev/null +++ b/cmake/cos_CMakeLists.txt.in @@ -0,0 +1,12 @@ +# cos +ExternalProject_Add(cos + GIT_REPOSITORY https://github.com/tencentyun/cos-c-sdk-v5.git + GIT_TAG v5.0.16 + SOURCE_DIR "${TD_CONTRIB_DIR}/cos-c-sdk-v5" + BINARY_DIR "" + #BUILD_IN_SOURCE TRUE + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" +) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index c60fd33b16..df9519d00f 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -122,6 +122,12 @@ if(${BUILD_WITH_SQLITE}) cat("${TD_SUPPORT_DIR}/sqlite_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) endif(${BUILD_WITH_SQLITE}) +# cos +if(${BUILD_WITH_COS}) + cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + add_definitions(-DUSE_COS) +endif(${BUILD_WITH_COS}) + # lucene if(${BUILD_WITH_LUCENE}) cat("${TD_SUPPORT_DIR}/lucene_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) @@ -347,6 +353,23 @@ if (${BUILD_WITH_ROCKSDB}) endif() endif() +# cos +if(${BUILD_WITH_COS}) + option(ENABLE_TEST "Enable the tests" OFF) + + set(CMAKE_BUILD_TYPE debug) + set(ORIG_CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME}) + set(CMAKE_PROJECT_NAME cos_c_sdk) + + add_subdirectory(cos-c-sdk-v5 EXCLUDE_FROM_ALL) + target_include_directories( + cos_c_sdk + PUBLIC $ + ) + + set(CMAKE_PROJECT_NAME ${ORIG_CMAKE_PROJECT_NAME}) +endif(${BUILD_WITH_COS}) + # lucene # To support build on ubuntu: sudo apt-get install libboost-all-dev if(${BUILD_WITH_LUCENE}) diff --git a/contrib/test/CMakeLists.txt b/contrib/test/CMakeLists.txt index f35cf0d13d..1deff5a67e 100644 --- a/contrib/test/CMakeLists.txt +++ b/contrib/test/CMakeLists.txt @@ -3,6 +3,11 @@ if(${BUILD_WITH_ROCKSDB}) add_subdirectory(rocksdb) endif(${BUILD_WITH_ROCKSDB}) +# cos +if(${BUILD_WITH_COS}) + add_subdirectory(cos) +endif(${BUILD_WITH_COS}) + if(${BUILD_WITH_LUCENE}) add_subdirectory(lucene) endif(${BUILD_WITH_LUCENE}) diff --git a/contrib/test/cos/CMakeLists.txt b/contrib/test/cos/CMakeLists.txt new file mode 100644 index 0000000000..77c57e5a65 --- /dev/null +++ b/contrib/test/cos/CMakeLists.txt @@ -0,0 +1,49 @@ +add_executable(cosTest "") +target_sources(cosTest + PRIVATE + "${CMAKE_CURRENT_SOURCE_DIR}/main.c" + ) + +#find_path(APR_INCLUDE_DIR apr-1/apr_time.h) +#find_path(APR_UTIL_INCLUDE_DIR apr/include/apr-1/apr_md5.h) +#find_path(MINIXML_INCLUDE_DIR mxml.h) +#find_path(CURL_INCLUDE_DIR curl/curl.h) + +#include_directories (${MINIXML_INCLUDE_DIR}) +#include_directories (${CURL_INCLUDE_DIR}) +FIND_PROGRAM(APR_CONFIG_BIN NAMES apr-config apr-1-config PATHS /usr/bin /usr/local/bin /usr/local/apr/bin/) +#FIND_PROGRAM(APU_CONFIG_BIN NAMES apu-config apu-1-config PATHS /usr/bin /usr/local/bin /usr/local/apr/bin/) + +IF (APR_CONFIG_BIN) + EXECUTE_PROCESS( + COMMAND ${APR_CONFIG_BIN} --includedir + OUTPUT_VARIABLE APR_INCLUDE_DIR + OUTPUT_STRIP_TRAILING_WHITESPACE + ) +ENDIF() +#IF (APU_CONFIG_BIN) +# EXECUTE_PROCESS( +# COMMAND ${APU_CONFIG_BIN} --includedir +# OUTPUT_VARIABLE APR_UTIL_INCLUDE_DIR +# OUTPUT_STRIP_TRAILING_WHITESPACE +# ) +#ENDIF() + +include_directories (${APR_INCLUDE_DIR}) +#include_directories (${APR_UTIL_INCLUDE_DIR}) + +target_include_directories( + cosTest + PUBLIC "${TD_SOURCE_DIR}/contrib/cos-c-sdk-v5/cos_c_sdk" + ) + +find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) +find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) +find_library(MINIXML_LIBRARY mxml) +find_library(CURL_LIBRARY curl) + +target_link_libraries(cosTest cos_c_sdk) +target_link_libraries(cosTest ${APR_UTIL_LIBRARY}) +target_link_libraries(cosTest ${APR_LIBRARY}) +target_link_libraries(cosTest ${MINIXML_LIBRARY}) +target_link_libraries(cosTest ${CURL_LIBRARY}) diff --git a/contrib/test/cos/main.c b/contrib/test/cos/main.c new file mode 100644 index 0000000000..faaceee2e3 --- /dev/null +++ b/contrib/test/cos/main.c @@ -0,0 +1,3090 @@ +#include +#include +#include +#include + +#include "cos_api.h" +#include "cos_http_io.h" +#include "cos_log.h" + +// endpoint 是 COS 访问域名信息,详情请参见 https://cloud.tencent.com/document/product/436/6224 文档 +// static char TEST_COS_ENDPOINT[] = "cos.ap-guangzhou.myqcloud.com"; +// static char TEST_COS_ENDPOINT[] = "http://oss-cn-beijing.aliyuncs.com"; +static char TEST_COS_ENDPOINT[] = "http://cos.ap-beijing.myqcloud.com"; +// 数据万象的访问域名,详情请参见 https://cloud.tencent.com/document/product/460/31066 文档 +static char TEST_CI_ENDPOINT[] = "https://ci.ap-guangzhou.myqcloud.com"; +// 开发者拥有的项目身份ID/密钥,可在 https://console.cloud.tencent.com/cam/capi 页面获取 +static char *TEST_ACCESS_KEY_ID; // your secret_id +static char *TEST_ACCESS_KEY_SECRET; // your secret_key +// 开发者访问 COS 服务时拥有的用户维度唯一资源标识,用以标识资源,可在 https://console.cloud.tencent.com/cam/capi +// 页面获取 +// static char TEST_APPID[] = ""; // your appid +// static char TEST_APPID[] = "119"; // your appid +static char TEST_APPID[] = "1309024725"; // your appid +// the cos bucket name, syntax: [bucket]-[appid], for example: mybucket-1253666666,可在 +// https://console.cloud.tencent.com/cos5/bucket 查看 static char TEST_BUCKET_NAME[] = ""; +// static char TEST_BUCKET_NAME[] = ""; +// static char TEST_BUCKET_NAME[] = "test-bucket-119"; +static char TEST_BUCKET_NAME[] = "test0711-1309024725"; +// 对象拥有者,比如用户UIN:100000000001 +static char TEST_UIN[] = ""; // your uin +// 地域信息,枚举值可参见 https://cloud.tencent.com/document/product/436/6224 +// 文档,例如:ap-beijing、ap-hongkong、eu-frankfurt 等 +static char TEST_REGION[] = "ap-guangzhou"; // region in endpoint +// 对象键,对象(Object)在存储桶(Bucket)中的唯一标识。有关对象与对象键的进一步说明,请参见 +// https://cloud.tencent.com/document/product/436/13324 文档 +static char TEST_OBJECT_NAME1[] = "1.txt"; +static char TEST_OBJECT_NAME2[] = "test2.dat"; +static char TEST_OBJECT_NAME3[] = "test3.dat"; +static char TEST_OBJECT_NAME4[] = "multipart.txt"; +// static char TEST_DOWNLOAD_NAME2[] = "download_test2.dat"; +static char *TEST_APPEND_NAMES[] = {"test.7z.001", "test.7z.002"}; +static char TEST_DOWNLOAD_NAME3[] = "download_test3.dat"; +static char TEST_MULTIPART_OBJECT[] = "multipart.dat"; +static char TEST_DOWNLOAD_NAME4[] = "multipart_download.dat"; +static char TEST_MULTIPART_FILE[] = "test.zip"; +// static char TEST_MULTIPART_OBJECT2[] = "multipart2.dat"; +static char TEST_MULTIPART_OBJECT3[] = "multipart3.dat"; +static char TEST_MULTIPART_OBJECT4[] = "multipart4.dat"; + +static void print_headers(cos_table_t *headers) { + const cos_array_header_t *tarr; + const cos_table_entry_t *telts; + int i = 0; + + if (apr_is_empty_table(headers)) { + return; + } + + tarr = cos_table_elts(headers); + telts = (cos_table_entry_t *)tarr->elts; + + printf("headers:\n"); + for (; i < tarr->nelts; i++) { + telts = (cos_table_entry_t *)(tarr->elts + i * tarr->elt_size); + printf("%s: %s\n", telts->key, telts->val); + } +} + +void init_test_config(cos_config_t *config, int is_cname) { + cos_str_set(&config->endpoint, TEST_COS_ENDPOINT); + cos_str_set(&config->access_key_id, TEST_ACCESS_KEY_ID); + cos_str_set(&config->access_key_secret, TEST_ACCESS_KEY_SECRET); + cos_str_set(&config->appid, TEST_APPID); + config->is_cname = is_cname; +} + +void init_test_request_options(cos_request_options_t *options, int is_cname) { + options->config = cos_config_create(options->pool); + init_test_config(options->config, is_cname); + options->ctl = cos_http_controller_create(options->pool, 0); +} + +void log_status(cos_status_t *s) { + cos_warn_log("status->code: %d", s->code); + if (s->error_code) cos_warn_log("status->error_code: %s", s->error_code); + if (s->error_msg) cos_warn_log("status->error_msg: %s", s->error_msg); + if (s->req_id) cos_warn_log("status->req_id: %s", s->req_id); +} + +void test_sign() { + cos_pool_t *p = NULL; + const unsigned char secret_key[] = "your secret_key"; + const unsigned char time_str[] = "1480932292;1481012292"; + unsigned char sign_key[40]; + cos_buf_t *fmt_str; + const char *value = NULL; + const char *uri = "/testfile"; + const char *host = "testbucket-125000000.cn-north.myqcloud.com&range=bytes%3d0-3"; + unsigned char fmt_str_hex[40]; + + cos_pool_create(&p, NULL); + fmt_str = cos_create_buf(p, 1024); + + cos_get_hmac_sha1_hexdigest(sign_key, secret_key, sizeof(secret_key) - 1, time_str, sizeof(time_str) - 1); + char *pstr = apr_pstrndup(p, (char *)sign_key, sizeof(sign_key)); + cos_warn_log("sign_key: %s", pstr); + + // method + value = "get"; + cos_buf_append_string(p, fmt_str, value, strlen(value)); + cos_buf_append_string(p, fmt_str, "\n", sizeof("\n") - 1); + + // canonicalized resource(URI) + cos_buf_append_string(p, fmt_str, uri, strlen(uri)); + cos_buf_append_string(p, fmt_str, "\n", sizeof("\n") - 1); + + // query-parameters + cos_buf_append_string(p, fmt_str, "\n", sizeof("\n") - 1); + + // Host + cos_buf_append_string(p, fmt_str, "host=", sizeof("host=") - 1); + cos_buf_append_string(p, fmt_str, host, strlen(host)); + cos_buf_append_string(p, fmt_str, "\n", sizeof("\n") - 1); + + char *pstr3 = apr_pstrndup(p, (char *)fmt_str->pos, cos_buf_size(fmt_str)); + cos_warn_log("Format string: %s", pstr3); + + // Format-String sha1hash + cos_get_sha1_hexdigest(fmt_str_hex, (unsigned char *)fmt_str->pos, cos_buf_size(fmt_str)); + + char *pstr2 = apr_pstrndup(p, (char *)fmt_str_hex, sizeof(fmt_str_hex)); + cos_warn_log("Format string sha1hash: %s", pstr2); + + cos_pool_destroy(p); +} + +void test_bucket() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_acl_e cos_acl = COS_ACL_PRIVATE; + cos_string_t bucket; + cos_table_t *resp_headers = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // create test bucket + s = cos_create_bucket(options, &bucket, cos_acl, &resp_headers); + log_status(s); + + // list object (get bucket) + cos_list_object_params_t *list_params = NULL; + list_params = cos_create_list_object_params(p); + cos_str_set(&list_params->encoding_type, "url"); + s = cos_list_object(options, &bucket, list_params, &resp_headers); + log_status(s); + cos_list_object_content_t *content = NULL; + char *line = NULL; + cos_list_for_each_entry(cos_list_object_content_t, content, &list_params->object_list, node) { + line = apr_psprintf(p, "%.*s\t%.*s\t%.*s\n", content->key.len, content->key.data, content->size.len, + content->size.data, content->last_modified.len, content->last_modified.data); + printf("%s", line); + printf("next marker: %s\n", list_params->next_marker.data); + } + cos_list_object_common_prefix_t *common_prefix = NULL; + cos_list_for_each_entry(cos_list_object_common_prefix_t, common_prefix, &list_params->common_prefix_list, node) { + printf("common prefix: %s\n", common_prefix->prefix.data); + } + + // delete bucket + s = cos_delete_bucket(options, &bucket, &resp_headers); + log_status(s); + + cos_pool_destroy(p); +} + +void test_list_objects() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers = NULL; + + //创建内存池 + cos_pool_create(&p, NULL); + + //初始化请求选项 + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //获取对象列表 + cos_list_object_params_t *list_params = NULL; + cos_list_object_content_t *content = NULL; + list_params = cos_create_list_object_params(p); + s = cos_list_object(options, &bucket, list_params, &resp_headers); + if (cos_status_is_ok(s)) { + printf("list object succeeded\n"); + cos_list_for_each_entry(cos_list_object_content_t, content, &list_params->object_list, node) { + printf("object: %.*s\n", content->key.len, content->key.data); + } + } else { + printf("list object failed\n"); + } + + //销毁内存池 + cos_pool_destroy(p); +} + +void test_bucket_lifecycle() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + cos_list_t rule_list; + cos_list_init(&rule_list); + cos_lifecycle_rule_content_t *rule_content = NULL; + + rule_content = cos_create_lifecycle_rule_content(p); + cos_str_set(&rule_content->id, "testrule1"); + cos_str_set(&rule_content->prefix, "abc/"); + cos_str_set(&rule_content->status, "Enabled"); + rule_content->expire.days = 365; + cos_list_add_tail(&rule_content->node, &rule_list); + + rule_content = cos_create_lifecycle_rule_content(p); + cos_str_set(&rule_content->id, "testrule2"); + cos_str_set(&rule_content->prefix, "efg/"); + cos_str_set(&rule_content->status, "Disabled"); + cos_str_set(&rule_content->transition.storage_class, "Standard_IA"); + rule_content->transition.days = 999; + cos_list_add_tail(&rule_content->node, &rule_list); + + rule_content = cos_create_lifecycle_rule_content(p); + cos_str_set(&rule_content->id, "testrule3"); + cos_str_set(&rule_content->prefix, "xxx/"); + cos_str_set(&rule_content->status, "Enabled"); + rule_content->abort.days = 1; + cos_list_add_tail(&rule_content->node, &rule_list); + + s = cos_put_bucket_lifecycle(options, &bucket, &rule_list, &resp_headers); + log_status(s); + + cos_list_t rule_list_ret; + cos_list_init(&rule_list_ret); + s = cos_get_bucket_lifecycle(options, &bucket, &rule_list_ret, &resp_headers); + log_status(s); + + cos_delete_bucket_lifecycle(options, &bucket, &resp_headers); + log_status(s); + + cos_pool_destroy(p); +} + +void test_put_object_with_limit() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t file; + cos_table_t *resp_headers = NULL; + cos_table_t *headers = NULL; + + //创建内存池 + cos_pool_create(&p, NULL); + + //初始化请求选项 + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //限速值设置范围为819200 - 838860800,即100KB/s - 100MB/s,如果超出该范围将返回400错误 + headers = cos_table_make(p, 1); + cos_table_add_int(headers, "x-cos-traffic-limit", 819200); + + //上传对象 + cos_str_set(&file, "test_file.bin"); + cos_str_set(&object, TEST_OBJECT_NAME1); + s = cos_put_object_from_file(options, &bucket, &object, &file, headers, &resp_headers); + if (cos_status_is_ok(s)) { + printf("put object succeeded\n"); + } else { + printf("put object failed\n"); + } + + //销毁内存池 + cos_pool_destroy(p); +} + +void test_get_object_with_limit() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t file; + cos_table_t *resp_headers = NULL; + cos_table_t *headers = NULL; + + //创建内存池 + cos_pool_create(&p, NULL); + + //初始化请求选项 + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //限速值设置范围为819200 - 838860800,即100KB/s - 100MB/s,如果超出该范围将返回400错误 + headers = cos_table_make(p, 1); + cos_table_add_int(headers, "x-cos-traffic-limit", 819200); + + //下载对象 + cos_str_set(&file, "test_file.bin"); + cos_str_set(&object, TEST_OBJECT_NAME1); + s = cos_get_object_to_file(options, &bucket, &object, headers, NULL, &file, &resp_headers); + if (cos_status_is_ok(s)) { + printf("get object succeeded\n"); + } else { + printf("get object failed\n"); + } + + //销毁内存池 + cos_pool_destroy(p); +} + +void test_gen_object_url() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_OBJECT_NAME1); + + printf("url:%s\n", cos_gen_object_url(options, &bucket, &object)); + + cos_pool_destroy(p); +} + +void test_create_dir() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers; + cos_table_t *headers = NULL; + cos_list_t buffer; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, "folder/"); + + //上传文件夹 + cos_list_init(&buffer); + s = cos_put_object_from_buffer(options, &bucket, &object, &buffer, headers, &resp_headers); + if (cos_status_is_ok(s)) { + printf("put object succeeded\n"); + } else { + printf("put object failed\n"); + } + cos_pool_destroy(p); +} + +void test_object() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers; + cos_table_t *headers = NULL; + cos_list_t buffer; + cos_buf_t *content = NULL; + char *str = "This is my test data."; + cos_string_t file; + int traffic_limit = 0; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_OBJECT_NAME1); + + cos_list_init(&buffer); + content = cos_buf_pack(options->pool, str, strlen(str)); + cos_list_add_tail(&content->node, &buffer); + s = cos_put_object_from_buffer(options, &bucket, &object, &buffer, headers, &resp_headers); + log_status(s); + + cos_list_t download_buffer; + cos_list_init(&download_buffer); + if (traffic_limit) { + // 限速值设置范围为819200 - 838860800,即100KB/s - 100MB/s,如果超出该范围将返回400错误 + headers = cos_table_make(p, 1); + cos_table_add_int(headers, "x-cos-traffic-limit", 819200); + } + s = cos_get_object_to_buffer(options, &bucket, &object, headers, NULL, &download_buffer, &resp_headers); + log_status(s); + print_headers(resp_headers); + int64_t len = 0; + int64_t size = 0; + int64_t pos = 0; + cos_list_for_each_entry(cos_buf_t, content, &download_buffer, node) { len += cos_buf_size(content); } + char *buf = cos_pcalloc(p, (apr_size_t)(len + 1)); + buf[len] = '\0'; + cos_list_for_each_entry(cos_buf_t, content, &download_buffer, node) { + size = cos_buf_size(content); + memcpy(buf + pos, content->pos, (size_t)size); + pos += size; + } + cos_warn_log("Download data=%s", buf); + + cos_str_set(&file, TEST_OBJECT_NAME4); + cos_str_set(&object, TEST_OBJECT_NAME4); + s = cos_put_object_from_file(options, &bucket, &object, &file, NULL, &resp_headers); + log_status(s); + + cos_str_set(&file, TEST_DOWNLOAD_NAME3); + cos_str_set(&object, TEST_OBJECT_NAME3); + s = cos_get_object_to_file(options, &bucket, &object, NULL, NULL, &file, &resp_headers); + log_status(s); + + cos_str_set(&object, TEST_OBJECT_NAME2); + s = cos_head_object(options, &bucket, &object, NULL, &resp_headers); + log_status(s); + + cos_str_set(&object, TEST_OBJECT_NAME1); + s = cos_delete_object(options, &bucket, &object, &resp_headers); + log_status(s); + + cos_str_set(&object, TEST_OBJECT_NAME3); + s = cos_delete_object(options, &bucket, &object, &resp_headers); + log_status(s); + + cos_pool_destroy(p); +} + +void test_append_object() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t file; + cos_table_t *resp_headers = NULL; + + //创建内存池 + cos_pool_create(&p, NULL); + + //初始化请求选项 + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //追加上传对象 + cos_str_set(&object, TEST_OBJECT_NAME3); + int32_t count = sizeof(TEST_APPEND_NAMES) / sizeof(char *); + int32_t index = 0; + int64_t position = 0; + s = cos_head_object(options, &bucket, &object, NULL, &resp_headers); + if (s->code == 200) { + char *content_length_str = (char *)apr_table_get(resp_headers, COS_CONTENT_LENGTH); + if (content_length_str != NULL) { + position = atol(content_length_str); + } + } + for (; index < count; index++) { + cos_str_set(&file, TEST_APPEND_NAMES[index]); + s = cos_append_object_from_file(options, &bucket, &object, position, &file, NULL, &resp_headers); + log_status(s); + + s = cos_head_object(options, &bucket, &object, NULL, &resp_headers); + if (s->code == 200) { + char *content_length_str = (char *)apr_table_get(resp_headers, COS_CONTENT_LENGTH); + if (content_length_str != NULL) { + position = atol(content_length_str); + } + } + } + + //销毁内存池 + cos_pool_destroy(p); +} + +void test_head_object() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers = NULL; + + //创建内存池 + cos_pool_create(&p, NULL); + + //初始化请求选项 + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //获取对象元数据 + cos_str_set(&object, TEST_OBJECT_NAME1); + s = cos_head_object(options, &bucket, &object, NULL, &resp_headers); + print_headers(resp_headers); + if (cos_status_is_ok(s)) { + printf("head object succeeded\n"); + } else { + printf("head object failed\n"); + } + + //销毁内存池 + cos_pool_destroy(p); +} + +void test_check_object_exist() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers; + cos_table_t *headers = NULL; + cos_object_exist_status_e object_exist; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_OBJECT_NAME1); + + // 检查对象是否存在 + s = cos_check_object_exist(options, &bucket, &object, headers, &object_exist, &resp_headers); + if (object_exist == COS_OBJECT_NON_EXIST) { + printf("object: %.*s non exist.\n", object.len, object.data); + } else if (object_exist == COS_OBJECT_EXIST) { + printf("object: %.*s exist.\n", object.len, object.data); + } else { + printf("object: %.*s unknown status.\n", object.len, object.data); + log_status(s); + } + + cos_pool_destroy(p); +} + +void test_object_restore() { + cos_pool_t *p = NULL; + cos_string_t bucket; + cos_string_t object; + int is_cname = 0; + cos_table_t *resp_headers = NULL; + cos_request_options_t *options = NULL; + cos_status_t *s = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, "test_restore.dat"); + + cos_object_restore_params_t *restore_params = cos_create_object_restore_params(p); + restore_params->days = 30; + cos_str_set(&restore_params->tier, "Standard"); + s = cos_post_object_restore(options, &bucket, &object, restore_params, NULL, NULL, &resp_headers); + log_status(s); + + cos_pool_destroy(p); +} + +void progress_callback(int64_t consumed_bytes, int64_t total_bytes) { + printf("consumed_bytes = %" APR_INT64_T_FMT ", total_bytes = %" APR_INT64_T_FMT "\n", consumed_bytes, total_bytes); +} + +void test_put_object_from_file() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers; + cos_string_t file; + int traffic_limit = 0; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_table_t *headers = NULL; + if (traffic_limit) { + // 限速值设置范围为819200 - 838860800,即100KB/s - 100MB/s,如果超出该范围将返回400错误 + headers = cos_table_make(p, 1); + cos_table_add_int(headers, "x-cos-traffic-limit", 819200); + } + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&file, TEST_OBJECT_NAME4); + cos_str_set(&object, TEST_OBJECT_NAME4); + s = cos_put_object_from_file(options, &bucket, &object, &file, headers, &resp_headers); + log_status(s); + + cos_pool_destroy(p); +} + +void test_put_object_from_file_with_sse() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers; + cos_string_t file; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_table_t *headers = NULL; + headers = cos_table_make(p, 3); + // apr_table_add(headers, "x-cos-server-side-encryption", "AES256"); + apr_table_add(headers, "x-cos-server-side-encryption-customer-algorithm", "AES256"); + apr_table_add(headers, "x-cos-server-side-encryption-customer-key", "MDEyMzQ1Njc4OUFCQ0RFRjAxMjM0NTY3ODlBQkNERUY="); + apr_table_add(headers, "x-cos-server-side-encryption-customer-key-MD5", "U5L61r7jcwdNvT7frmUG8g=="); + + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&file, "/home/jojoliang/data/test.jpg"); + cos_str_set(&object, "pic"); + + s = cos_put_object_from_file(options, &bucket, &object, &file, headers, &resp_headers); + log_status(s); + { + int i = 0; + apr_array_header_t *pp = (apr_array_header_t *)apr_table_elts(resp_headers); + for (; i < pp->nelts; i++) { + apr_table_entry_t *ele = (apr_table_entry_t *)pp->elts + i; + printf("%s: %s\n", ele->key, ele->val); + } + } + + cos_pool_destroy(p); +} + +void test_get_object_to_file_with_sse() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers; + cos_string_t file; + cos_table_t *headers = NULL; + cos_table_t *params = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + headers = cos_table_make(p, 3); + /* + apr_table_add(headers, "x-cos-server-side-encryption", "AES256"); + */ + /* + apr_table_add(headers, "x-cos-server-side-encryption-customer-algorithm", "AES256"); + apr_table_add(headers, "x-cos-server-side-encryption-customer-key", + "MDEyMzQ1Njc4OUFCQ0RFRjAxMjM0NTY3ODlBQkNERUY="); apr_table_add(headers, + "x-cos-server-side-encryption-customer-key-MD5", "U5L61r7jcwdNvT7frmUG8g=="); + */ + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&file, "getfile"); + cos_str_set(&object, TEST_OBJECT_NAME1); + + s = cos_get_object_to_file(options, &bucket, &object, headers, params, &file, &resp_headers); + log_status(s); + + { + int i = 0; + apr_array_header_t *pp = (apr_array_header_t *)apr_table_elts(resp_headers); + for (; i < pp->nelts; i++) { + apr_table_entry_t *ele = (apr_table_entry_t *)pp->elts + i; + printf("%s: %s\n", ele->key, ele->val); + } + } + + cos_pool_destroy(p); +} + +void multipart_upload_file_from_file() { + cos_pool_t *p = NULL; + cos_string_t bucket; + cos_string_t object; + int is_cname = 0; + cos_table_t *headers = NULL; + cos_table_t *complete_headers = NULL; + cos_table_t *resp_headers = NULL; + cos_request_options_t *options = NULL; + cos_string_t upload_id; + cos_upload_file_t *upload_file = NULL; + cos_status_t *s = NULL; + cos_list_upload_part_params_t *params = NULL; + cos_list_t complete_part_list; + cos_list_part_content_t *part_content = NULL; + cos_complete_part_content_t *complete_part_content = NULL; + int part_num = 1; + int64_t pos = 0; + int64_t file_length = 0; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + headers = cos_table_make(p, 1); + complete_headers = cos_table_make(p, 1); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_MULTIPART_OBJECT); + + // init mulitipart + s = cos_init_multipart_upload(options, &bucket, &object, &upload_id, headers, &resp_headers); + + if (cos_status_is_ok(s)) { + printf("Init multipart upload succeeded, upload_id:%.*s\n", upload_id.len, upload_id.data); + } else { + printf("Init multipart upload failed\n"); + cos_pool_destroy(p); + return; + } + + // upload part from file + int res = COSE_OK; + cos_file_buf_t *fb = cos_create_file_buf(p); + res = cos_open_file_for_all_read(p, TEST_MULTIPART_FILE, fb); + if (res != COSE_OK) { + cos_error_log("Open read file fail, filename:%s\n", TEST_MULTIPART_FILE); + return; + } + file_length = fb->file_last; + apr_file_close(fb->file); + while (pos < file_length) { + upload_file = cos_create_upload_file(p); + cos_str_set(&upload_file->filename, TEST_MULTIPART_FILE); + upload_file->file_pos = pos; + pos += 2 * 1024 * 1024; + upload_file->file_last = pos < file_length ? pos : file_length; // 2MB + s = cos_upload_part_from_file(options, &bucket, &object, &upload_id, part_num++, upload_file, &resp_headers); + + if (cos_status_is_ok(s)) { + printf("Multipart upload part from file succeeded\n"); + } else { + printf("Multipart upload part from file failed\n"); + } + } + + // list part + params = cos_create_list_upload_part_params(p); + params->max_ret = 1000; + cos_list_init(&complete_part_list); + s = cos_list_upload_part(options, &bucket, &object, &upload_id, params, &resp_headers); + + if (cos_status_is_ok(s)) { + printf("List multipart succeeded\n"); + cos_list_for_each_entry(cos_list_part_content_t, part_content, ¶ms->part_list, node) { + printf("part_number = %s, size = %s, last_modified = %s, etag = %s\n", part_content->part_number.data, + part_content->size.data, part_content->last_modified.data, part_content->etag.data); + } + } else { + printf("List multipart failed\n"); + cos_pool_destroy(p); + return; + } + + cos_list_for_each_entry(cos_list_part_content_t, part_content, ¶ms->part_list, node) { + complete_part_content = cos_create_complete_part_content(p); + cos_str_set(&complete_part_content->part_number, part_content->part_number.data); + cos_str_set(&complete_part_content->etag, part_content->etag.data); + cos_list_add_tail(&complete_part_content->node, &complete_part_list); + } + + // complete multipart + s = cos_complete_multipart_upload(options, &bucket, &object, &upload_id, &complete_part_list, complete_headers, + &resp_headers); + + if (cos_status_is_ok(s)) { + printf("Complete multipart upload from file succeeded, upload_id:%.*s\n", upload_id.len, upload_id.data); + } else { + printf("Complete multipart upload from file failed\n"); + } + + cos_pool_destroy(p); +} + +void multipart_upload_file_from_buffer() { + cos_pool_t *p = NULL; + cos_string_t bucket; + cos_string_t object; + int is_cname = 0; + cos_table_t *headers = NULL; + cos_table_t *complete_headers = NULL; + cos_table_t *resp_headers = NULL; + cos_request_options_t *options = NULL; + cos_string_t upload_id; + cos_status_t *s = NULL; + cos_list_t complete_part_list; + cos_complete_part_content_t *complete_part_content = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + headers = cos_table_make(p, 1); + complete_headers = cos_table_make(p, 1); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_MULTIPART_OBJECT); + + // init mulitipart + s = cos_init_multipart_upload(options, &bucket, &object, &upload_id, headers, &resp_headers); + + if (cos_status_is_ok(s)) { + printf("Init multipart upload succeeded, upload_id:%.*s\n", upload_id.len, upload_id.data); + } else { + printf("Init multipart upload failed\n"); + cos_pool_destroy(p); + return; + } + + // upload part from buffer + char *str = "This is my test data...."; + cos_list_t buffer; + cos_buf_t *content; + + // 上传一个分块 + cos_list_init(&buffer); + content = cos_buf_pack(p, str, strlen(str)); + cos_list_add_tail(&content->node, &buffer); + s = cos_upload_part_from_buffer(options, &bucket, &object, &upload_id, 1, &buffer, &resp_headers); + + // 直接获取etag + char *etag = apr_pstrdup(p, (char *)apr_table_get(resp_headers, "ETag")); + cos_list_init(&complete_part_list); + complete_part_content = cos_create_complete_part_content(p); + cos_str_set(&complete_part_content->part_number, "1"); + cos_str_set(&complete_part_content->etag, etag); + cos_list_add_tail(&complete_part_content->node, &complete_part_list); + + // 也可以通过 list part 获取取etag + /* + //list part + params = cos_create_list_upload_part_params(p); + params->max_ret = 1000; + cos_list_init(&complete_part_list); + s = cos_list_upload_part(options, &bucket, &object, &upload_id, + params, &resp_headers); + + if (cos_status_is_ok(s)) { + printf("List multipart succeeded\n"); + cos_list_for_each_entry(cos_list_part_content_t, part_content, ¶ms->part_list, node) { + printf("part_number = %s, size = %s, last_modified = %s, etag = %s\n", + part_content->part_number.data, + part_content->size.data, + part_content->last_modified.data, + part_content->etag.data); + } + } else { + printf("List multipart failed\n"); + cos_pool_destroy(p); + return; + } + + cos_list_for_each_entry(cos_list_part_content_t, part_content, ¶ms->part_list, node) { + complete_part_content = cos_create_complete_part_content(p); + cos_str_set(&complete_part_content->part_number, part_content->part_number.data); + cos_str_set(&complete_part_content->etag, part_content->etag.data); + cos_list_add_tail(&complete_part_content->node, &complete_part_list); + } + */ + + // complete multipart + s = cos_complete_multipart_upload(options, &bucket, &object, &upload_id, &complete_part_list, complete_headers, + &resp_headers); + + if (cos_status_is_ok(s)) { + printf("Complete multipart upload from file succeeded, upload_id:%.*s\n", upload_id.len, upload_id.data); + } else { + printf("Complete multipart upload from file failed\n"); + } + + cos_pool_destroy(p); +} + +void abort_multipart_upload() { + cos_pool_t *p = NULL; + cos_string_t bucket; + cos_string_t object; + int is_cname = 0; + cos_table_t *headers = NULL; + cos_table_t *resp_headers = NULL; + cos_request_options_t *options = NULL; + cos_string_t upload_id; + cos_status_t *s = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + headers = cos_table_make(p, 1); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_MULTIPART_OBJECT); + + s = cos_init_multipart_upload(options, &bucket, &object, &upload_id, headers, &resp_headers); + + if (cos_status_is_ok(s)) { + printf("Init multipart upload succeeded, upload_id:%.*s\n", upload_id.len, upload_id.data); + } else { + printf("Init multipart upload failed\n"); + cos_pool_destroy(p); + return; + } + + s = cos_abort_multipart_upload(options, &bucket, &object, &upload_id, &resp_headers); + + if (cos_status_is_ok(s)) { + printf("Abort multipart upload succeeded, upload_id::%.*s\n", upload_id.len, upload_id.data); + } else { + printf("Abort multipart upload failed\n"); + } + + cos_pool_destroy(p); +} + +void list_multipart() { + cos_pool_t *p = NULL; + cos_string_t bucket; + cos_string_t object; + int is_cname = 0; + cos_table_t *resp_headers = NULL; + cos_request_options_t *options = NULL; + cos_status_t *s = NULL; + cos_list_multipart_upload_params_t *list_multipart_params = NULL; + cos_list_upload_part_params_t *list_upload_param = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + list_multipart_params = cos_create_list_multipart_upload_params(p); + list_multipart_params->max_ret = 999; + s = cos_list_multipart_upload(options, &bucket, list_multipart_params, &resp_headers); + log_status(s); + + list_upload_param = cos_create_list_upload_part_params(p); + list_upload_param->max_ret = 1000; + cos_string_t upload_id; + cos_str_set(&upload_id, "149373379126aee264fecbf5fe8ddb8b9cd23b76c73ab1af0bcfd50683cc4254f81ebe2386"); + cos_str_set(&object, TEST_MULTIPART_OBJECT); + s = cos_list_upload_part(options, &bucket, &object, &upload_id, list_upload_param, &resp_headers); + if (cos_status_is_ok(s)) { + printf("List upload part succeeded, upload_id::%.*s\n", upload_id.len, upload_id.data); + cos_list_part_content_t *part_content = NULL; + cos_list_for_each_entry(cos_list_part_content_t, part_content, &list_upload_param->part_list, node) { + printf("part_number = %s, size = %s, last_modified = %s, etag = %s\n", part_content->part_number.data, + part_content->size.data, part_content->last_modified.data, part_content->etag.data); + } + } else { + printf("List upload part failed\n"); + } + + cos_pool_destroy(p); +} + +void test_resumable() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t filepath; + cos_resumable_clt_params_t *clt_params; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_MULTIPART_OBJECT4); + cos_str_set(&filepath, TEST_DOWNLOAD_NAME4); + + clt_params = cos_create_resumable_clt_params_content(p, 5 * 1024 * 1024, 3, COS_FALSE, NULL); + s = cos_resumable_download_file(options, &bucket, &object, &filepath, NULL, NULL, clt_params, NULL); + log_status(s); + + cos_pool_destroy(p); +} + +void test_resumable_upload_with_multi_threads() { + cos_pool_t *p = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t filename; + cos_status_t *s = NULL; + int is_cname = 0; + cos_table_t *headers = NULL; + cos_table_t *resp_headers = NULL; + cos_request_options_t *options = NULL; + cos_resumable_clt_params_t *clt_params; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + headers = cos_table_make(p, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_MULTIPART_OBJECT4); + cos_str_set(&filename, TEST_MULTIPART_FILE); + + // upload + clt_params = cos_create_resumable_clt_params_content(p, 1024 * 1024, 8, COS_FALSE, NULL); + s = cos_resumable_upload_file(options, &bucket, &object, &filename, headers, NULL, clt_params, NULL, &resp_headers, + NULL); + + if (cos_status_is_ok(s)) { + printf("upload succeeded\n"); + } else { + printf("upload failed\n"); + } + + cos_pool_destroy(p); +} + +void test_delete_objects() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_string_t bucket; + cos_status_t *s = NULL; + cos_table_t *resp_headers = NULL; + cos_request_options_t *options = NULL; + char *object_name1 = TEST_OBJECT_NAME2; + char *object_name2 = TEST_OBJECT_NAME3; + cos_object_key_t *content1 = NULL; + cos_object_key_t *content2 = NULL; + cos_list_t object_list; + cos_list_t deleted_object_list; + int is_quiet = COS_TRUE; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + cos_list_init(&object_list); + cos_list_init(&deleted_object_list); + content1 = cos_create_cos_object_key(p); + cos_str_set(&content1->key, object_name1); + cos_list_add_tail(&content1->node, &object_list); + content2 = cos_create_cos_object_key(p); + cos_str_set(&content2->key, object_name2); + cos_list_add_tail(&content2->node, &object_list); + + s = cos_delete_objects(options, &bucket, &object_list, is_quiet, &resp_headers, &deleted_object_list); + log_status(s); + + cos_pool_destroy(p); + + if (cos_status_is_ok(s)) { + printf("delete objects succeeded\n"); + } else { + printf("delete objects failed\n"); + } +} + +void test_delete_objects_by_prefix() { + cos_pool_t *p = NULL; + cos_request_options_t *options = NULL; + int is_cname = 0; + cos_string_t bucket; + cos_status_t *s = NULL; + cos_string_t prefix; + char *prefix_str = ""; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&prefix, prefix_str); + + s = cos_delete_objects_by_prefix(options, &bucket, &prefix); + log_status(s); + cos_pool_destroy(p); + + printf("test_delete_object_by_prefix ok\n"); +} + +void test_acl() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_acl_e cos_acl = COS_ACL_PRIVATE; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, "test.txt"); + + // put acl + cos_string_t read; + cos_str_set(&read, "id=\"qcs::cam::uin/12345:uin/12345\", id=\"qcs::cam::uin/45678:uin/45678\""); + s = cos_put_bucket_acl(options, &bucket, cos_acl, &read, NULL, NULL, &resp_headers); + log_status(s); + + // get acl + cos_acl_params_t *acl_params = NULL; + acl_params = cos_create_acl_params(p); + s = cos_get_bucket_acl(options, &bucket, acl_params, &resp_headers); + log_status(s); + printf("acl owner id:%s, name:%s\n", acl_params->owner_id.data, acl_params->owner_name.data); + cos_acl_grantee_content_t *acl_content = NULL; + cos_list_for_each_entry(cos_acl_grantee_content_t, acl_content, &acl_params->grantee_list, node) { + printf("acl grantee type:%s, id:%s, name:%s, permission:%s\n", acl_content->type.data, acl_content->id.data, + acl_content->name.data, acl_content->permission.data); + } + + // put acl + s = cos_put_object_acl(options, &bucket, &object, cos_acl, &read, NULL, NULL, &resp_headers); + log_status(s); + + // get acl + cos_acl_params_t *acl_params2 = NULL; + acl_params2 = cos_create_acl_params(p); + s = cos_get_object_acl(options, &bucket, &object, acl_params2, &resp_headers); + log_status(s); + printf("acl owner id:%s, name:%s\n", acl_params2->owner_id.data, acl_params2->owner_name.data); + acl_content = NULL; + cos_list_for_each_entry(cos_acl_grantee_content_t, acl_content, &acl_params2->grantee_list, node) { + printf("acl grantee id:%s, name:%s, permission:%s\n", acl_content->id.data, acl_content->name.data, + acl_content->permission.data); + } + + cos_pool_destroy(p); +} + +void test_copy() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t src_bucket; + cos_string_t src_object; + cos_string_t src_endpoint; + cos_table_t *resp_headers = NULL; + + //创建内存池 + cos_pool_create(&p, NULL); + + //初始化请求选项 + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //设置对象复制 + cos_str_set(&object, TEST_OBJECT_NAME2); + cos_str_set(&src_bucket, TEST_BUCKET_NAME); + cos_str_set(&src_endpoint, TEST_COS_ENDPOINT); + cos_str_set(&src_object, TEST_OBJECT_NAME1); + + cos_copy_object_params_t *params = NULL; + params = cos_create_copy_object_params(p); + s = cos_copy_object(options, &src_bucket, &src_object, &src_endpoint, &bucket, &object, NULL, params, &resp_headers); + if (cos_status_is_ok(s)) { + printf("put object copy succeeded\n"); + } else { + printf("put object copy failed\n"); + } + + //销毁内存池 + cos_pool_destroy(p); +} + +void test_modify_storage_class() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t src_bucket; + cos_string_t src_object; + cos_string_t src_endpoint; + cos_table_t *resp_headers = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_OBJECT_NAME1); + cos_str_set(&src_bucket, TEST_BUCKET_NAME); + cos_str_set(&src_endpoint, TEST_COS_ENDPOINT); + cos_str_set(&src_object, TEST_OBJECT_NAME1); + + // 设置x-cos-metadata-directive和x-cos-storage-class头域(替换为自己要更改的存储类型) + cos_table_t *headers = cos_table_make(p, 2); + apr_table_add(headers, "x-cos-metadata-directive", "Replaced"); + // 存储类型包括NTELLIGENT_TIERING,MAZ_INTELLIGENT_TIERING,STANDARD_IA,ARCHIVE,DEEP_ARCHIVE + apr_table_add(headers, "x-cos-storage-class", "ARCHIVE"); + + cos_copy_object_params_t *params = NULL; + params = cos_create_copy_object_params(p); + s = cos_copy_object(options, &src_bucket, &src_object, &src_endpoint, &bucket, &object, headers, params, + &resp_headers); + log_status(s); + + cos_pool_destroy(p); +} + +void test_copy_mt() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t src_bucket; + cos_string_t src_object; + cos_string_t src_endpoint; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, "test_copy.txt"); + cos_str_set(&src_bucket, "mybucket-1253685564"); + cos_str_set(&src_endpoint, "cn-south.myqcloud.com"); + cos_str_set(&src_object, "test.txt"); + + s = cos_upload_object_by_part_copy_mt(options, &src_bucket, &src_object, &src_endpoint, &bucket, &object, 1024 * 1024, + 8, NULL); + log_status(s); + + cos_pool_destroy(p); +} + +void test_copy_with_part_copy() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t copy_source; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, "test_copy.txt"); + cos_str_set(©_source, "mybucket-1253685564.cn-south.myqcloud.com/test.txt"); + + s = cos_upload_object_by_part_copy(options, ©_source, &bucket, &object, 1024 * 1024); + log_status(s); + + cos_pool_destroy(p); +} + +void make_rand_string(cos_pool_t *p, int len, cos_string_t *data) { + char *str = NULL; + int i = 0; + str = (char *)cos_palloc(p, len + 1); + for (; i < len; i++) { + str[i] = 'a' + rand() % 32; + } + str[len] = '\0'; + cos_str_set(data, str); +} + +unsigned long get_file_size(const char *file_path) { + unsigned long filesize = -1; + struct stat statbuff; + + if (stat(file_path, &statbuff) < 0) { + return filesize; + } else { + filesize = statbuff.st_size; + } + + return filesize; +} + +void test_part_copy() { + cos_pool_t *p = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t file; + int is_cname = 0; + cos_string_t upload_id; + cos_list_upload_part_params_t *list_upload_part_params = NULL; + cos_upload_part_copy_params_t *upload_part_copy_params1 = NULL; + cos_upload_part_copy_params_t *upload_part_copy_params2 = NULL; + cos_table_t *headers = NULL; + cos_table_t *query_params = NULL; + cos_table_t *resp_headers = NULL; + cos_table_t *list_part_resp_headers = NULL; + cos_list_t complete_part_list; + cos_list_part_content_t *part_content = NULL; + cos_complete_part_content_t *complete_content = NULL; + cos_table_t *complete_resp_headers = NULL; + cos_status_t *s = NULL; + int part1 = 1; + int part2 = 2; + char *local_filename = "test_upload_part_copy.file"; + char *download_filename = "test_upload_part_copy.file.download"; + char *source_object_name = "cos_test_upload_part_copy_source_object"; + char *dest_object_name = "cos_test_upload_part_copy_dest_object"; + FILE *fd = NULL; + cos_string_t download_file; + cos_string_t dest_bucket; + cos_string_t dest_object; + int64_t range_start1 = 0; + int64_t range_end1 = 6000000; + int64_t range_start2 = 6000001; + int64_t range_end2; + cos_string_t data; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + + // create multipart upload local file + make_rand_string(p, 10 * 1024 * 1024, &data); + fd = fopen(local_filename, "w"); + fwrite(data.data, sizeof(data.data[0]), data.len, fd); + fclose(fd); + + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, source_object_name); + cos_str_set(&file, local_filename); + s = cos_put_object_from_file(options, &bucket, &object, &file, NULL, &resp_headers); + log_status(s); + + // init mulitipart + cos_str_set(&object, dest_object_name); + s = cos_init_multipart_upload(options, &bucket, &object, &upload_id, NULL, &resp_headers); + log_status(s); + + // upload part copy 1 + upload_part_copy_params1 = cos_create_upload_part_copy_params(p); + cos_str_set(&upload_part_copy_params1->copy_source, + "bucket-appid.cn-south.myqcloud.com/cos_test_upload_part_copy_source_object"); + cos_str_set(&upload_part_copy_params1->dest_bucket, TEST_BUCKET_NAME); + cos_str_set(&upload_part_copy_params1->dest_object, dest_object_name); + cos_str_set(&upload_part_copy_params1->upload_id, upload_id.data); + upload_part_copy_params1->part_num = part1; + upload_part_copy_params1->range_start = range_start1; + upload_part_copy_params1->range_end = range_end1; + headers = cos_table_make(p, 0); + s = cos_upload_part_copy(options, upload_part_copy_params1, headers, &resp_headers); + log_status(s); + printf("last modified:%s, etag:%s\n", upload_part_copy_params1->rsp_content->last_modify.data, + upload_part_copy_params1->rsp_content->etag.data); + + // upload part copy 2 + resp_headers = NULL; + range_end2 = get_file_size(local_filename) - 1; + upload_part_copy_params2 = cos_create_upload_part_copy_params(p); + cos_str_set(&upload_part_copy_params2->copy_source, + "bucket-appid.cn-south.myqcloud.com/cos_test_upload_part_copy_source_object"); + cos_str_set(&upload_part_copy_params2->dest_bucket, TEST_BUCKET_NAME); + cos_str_set(&upload_part_copy_params2->dest_object, dest_object_name); + cos_str_set(&upload_part_copy_params2->upload_id, upload_id.data); + upload_part_copy_params2->part_num = part2; + upload_part_copy_params2->range_start = range_start2; + upload_part_copy_params2->range_end = range_end2; + headers = cos_table_make(p, 0); + s = cos_upload_part_copy(options, upload_part_copy_params2, headers, &resp_headers); + log_status(s); + printf("last modified:%s, etag:%s\n", upload_part_copy_params1->rsp_content->last_modify.data, + upload_part_copy_params1->rsp_content->etag.data); + + // list part + list_upload_part_params = cos_create_list_upload_part_params(p); + list_upload_part_params->max_ret = 10; + cos_list_init(&complete_part_list); + + cos_str_set(&dest_bucket, TEST_BUCKET_NAME); + cos_str_set(&dest_object, dest_object_name); + s = cos_list_upload_part(options, &dest_bucket, &dest_object, &upload_id, list_upload_part_params, + &list_part_resp_headers); + log_status(s); + cos_list_for_each_entry(cos_list_part_content_t, part_content, &list_upload_part_params->part_list, node) { + complete_content = cos_create_complete_part_content(p); + cos_str_set(&complete_content->part_number, part_content->part_number.data); + cos_str_set(&complete_content->etag, part_content->etag.data); + cos_list_add_tail(&complete_content->node, &complete_part_list); + } + + // complete multipart + headers = cos_table_make(p, 0); + s = cos_complete_multipart_upload(options, &dest_bucket, &dest_object, &upload_id, &complete_part_list, headers, + &complete_resp_headers); + log_status(s); + + // check upload copy part content equal to local file + headers = cos_table_make(p, 0); + cos_str_set(&download_file, download_filename); + s = cos_get_object_to_file(options, &dest_bucket, &dest_object, headers, query_params, &download_file, &resp_headers); + log_status(s); + printf("local file len = %" APR_INT64_T_FMT ", download file len = %" APR_INT64_T_FMT, get_file_size(local_filename), + get_file_size(download_filename)); + remove(download_filename); + remove(local_filename); + cos_pool_destroy(p); + + printf("test part copy ok\n"); +} + +void test_cors() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + cos_list_t rule_list; + cos_list_init(&rule_list); + cos_cors_rule_content_t *rule_content = NULL; + + rule_content = cos_create_cors_rule_content(p); + cos_str_set(&rule_content->id, "testrule1"); + cos_str_set(&rule_content->allowed_origin, "http://www.qq1.com"); + cos_str_set(&rule_content->allowed_method, "GET"); + cos_str_set(&rule_content->allowed_header, "*"); + cos_str_set(&rule_content->expose_header, "xxx"); + rule_content->max_age_seconds = 3600; + cos_list_add_tail(&rule_content->node, &rule_list); + + rule_content = cos_create_cors_rule_content(p); + cos_str_set(&rule_content->id, "testrule2"); + cos_str_set(&rule_content->allowed_origin, "http://www.qq2.com"); + cos_str_set(&rule_content->allowed_method, "GET"); + cos_str_set(&rule_content->allowed_header, "*"); + cos_str_set(&rule_content->expose_header, "yyy"); + rule_content->max_age_seconds = 7200; + cos_list_add_tail(&rule_content->node, &rule_list); + + rule_content = cos_create_cors_rule_content(p); + cos_str_set(&rule_content->id, "testrule3"); + cos_str_set(&rule_content->allowed_origin, "http://www.qq3.com"); + cos_str_set(&rule_content->allowed_method, "GET"); + cos_str_set(&rule_content->allowed_header, "*"); + cos_str_set(&rule_content->expose_header, "zzz"); + rule_content->max_age_seconds = 60; + cos_list_add_tail(&rule_content->node, &rule_list); + + // put cors + s = cos_put_bucket_cors(options, &bucket, &rule_list, &resp_headers); + log_status(s); + + // get cors + cos_list_t rule_list_ret; + cos_list_init(&rule_list_ret); + s = cos_get_bucket_cors(options, &bucket, &rule_list_ret, &resp_headers); + log_status(s); + cos_cors_rule_content_t *content = NULL; + cos_list_for_each_entry(cos_cors_rule_content_t, content, &rule_list_ret, node) { + printf( + "cors id:%s, allowed_origin:%s, allowed_method:%s, allowed_header:%s, expose_header:%s, max_age_seconds:%d\n", + content->id.data, content->allowed_origin.data, content->allowed_method.data, content->allowed_header.data, + content->expose_header.data, content->max_age_seconds); + } + + // delete cors + cos_delete_bucket_cors(options, &bucket, &resp_headers); + log_status(s); + + cos_pool_destroy(p); +} + +void test_versioning() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + cos_versioning_content_t *versioning = NULL; + versioning = cos_create_versioning_content(p); + cos_str_set(&versioning->status, "Suspended"); + + // put bucket versioning + s = cos_put_bucket_versioning(options, &bucket, versioning, &resp_headers); + log_status(s); + + // get bucket versioning + cos_str_set(&versioning->status, ""); + s = cos_get_bucket_versioning(options, &bucket, versioning, &resp_headers); + log_status(s); + printf("bucket versioning status: %s\n", versioning->status.data); + + cos_pool_destroy(p); +} + +void test_replication() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_request_options_t *dst_options = NULL; + cos_string_t bucket; + cos_string_t dst_bucket; + cos_table_t *resp_headers = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&dst_bucket, "replicationtest"); + + dst_options = cos_request_options_create(p); + init_test_request_options(dst_options, is_cname); + cos_str_set(&dst_options->config->endpoint, "cn-east.myqcloud.com"); + + // enable bucket versioning + cos_versioning_content_t *versioning = NULL; + versioning = cos_create_versioning_content(p); + cos_str_set(&versioning->status, "Enabled"); + s = cos_put_bucket_versioning(options, &bucket, versioning, &resp_headers); + log_status(s); + s = cos_put_bucket_versioning(dst_options, &dst_bucket, versioning, &resp_headers); + log_status(s); + + cos_replication_params_t *replication_param = NULL; + replication_param = cos_create_replication_params(p); + cos_str_set(&replication_param->role, "qcs::cam::uin/100000616666:uin/100000616666"); + + cos_replication_rule_content_t *rule = NULL; + rule = cos_create_replication_rule_content(p); + cos_str_set(&rule->id, "Rule_01"); + cos_str_set(&rule->status, "Enabled"); + cos_str_set(&rule->prefix, "test1"); + cos_str_set(&rule->dst_bucket, "qcs:id/0:cos:cn-east:appid/1253686666:replicationtest"); + cos_list_add_tail(&rule->node, &replication_param->rule_list); + + rule = cos_create_replication_rule_content(p); + cos_str_set(&rule->id, "Rule_02"); + cos_str_set(&rule->status, "Disabled"); + cos_str_set(&rule->prefix, "test2"); + cos_str_set(&rule->storage_class, "Standard_IA"); + cos_str_set(&rule->dst_bucket, "qcs:id/0:cos:cn-east:appid/1253686666:replicationtest"); + cos_list_add_tail(&rule->node, &replication_param->rule_list); + + rule = cos_create_replication_rule_content(p); + cos_str_set(&rule->id, "Rule_03"); + cos_str_set(&rule->status, "Enabled"); + cos_str_set(&rule->prefix, "test3"); + cos_str_set(&rule->storage_class, "Standard_IA"); + cos_str_set(&rule->dst_bucket, "qcs:id/0:cos:cn-east:appid/1253686666:replicationtest"); + cos_list_add_tail(&rule->node, &replication_param->rule_list); + + // put bucket replication + s = cos_put_bucket_replication(options, &bucket, replication_param, &resp_headers); + log_status(s); + + // get bucket replication + cos_replication_params_t *replication_param2 = NULL; + replication_param2 = cos_create_replication_params(p); + s = cos_get_bucket_replication(options, &bucket, replication_param2, &resp_headers); + log_status(s); + printf("ReplicationConfiguration role: %s\n", replication_param2->role.data); + cos_replication_rule_content_t *content = NULL; + cos_list_for_each_entry(cos_replication_rule_content_t, content, &replication_param2->rule_list, node) { + printf("ReplicationConfiguration rule, id:%s, status:%s, prefix:%s, dst_bucket:%s, storage_class:%s\n", + content->id.data, content->status.data, content->prefix.data, content->dst_bucket.data, + content->storage_class.data); + } + + // delete bucket replication + s = cos_delete_bucket_replication(options, &bucket, &resp_headers); + log_status(s); + + // disable bucket versioning + cos_str_set(&versioning->status, "Suspended"); + s = cos_put_bucket_versioning(options, &bucket, versioning, &resp_headers); + log_status(s); + s = cos_put_bucket_versioning(dst_options, &dst_bucket, versioning, &resp_headers); + log_status(s); + + cos_pool_destroy(p); +} + +void test_presigned_url() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t presigned_url; + cos_table_t *params = NULL; + cos_table_t *headers = NULL; + int sign_host = 1; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_OBJECT_NAME1); + + cos_gen_presigned_url(options, &bucket, &object, 300, HTTP_GET, &presigned_url); + printf("presigned_url: %s\n", presigned_url.data); + + // 添加您自己的params和headers + params = cos_table_make(options->pool, 0); + // cos_table_add(params, "param1", "value"); + headers = cos_table_make(options->pool, 0); + // cos_table_add(headers, "header1", "value"); + + // 强烈建议sign_host为1,这样强制把host头域加入签名列表,防止越权访问问题 + cos_gen_presigned_url_safe(options, &bucket, &object, 300, HTTP_GET, headers, params, sign_host, &presigned_url); + printf("presigned_url_safe: %s\n", presigned_url.data); + + cos_pool_destroy(p); +} + +void test_head_bucket() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers = NULL; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + options->config = cos_config_create(options->pool); + + init_test_request_options(options, is_cname); + + cos_str_set(&bucket, TEST_BUCKET_NAME); + options->ctl = cos_http_controller_create(options->pool, 0); + + status = cos_head_bucket(options, &bucket, &resp_headers); + log_status(status); + + cos_pool_destroy(pool); +} + +void test_check_bucket_exist() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers = NULL; + cos_bucket_exist_status_e bucket_exist; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + init_test_request_options(options, is_cname); + + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // 检查桶是否存在 + status = cos_check_bucket_exist(options, &bucket, &bucket_exist, &resp_headers); + if (bucket_exist == COS_BUCKET_NON_EXIST) { + printf("bucket: %.*s non exist.\n", bucket.len, bucket.data); + } else if (bucket_exist == COS_BUCKET_EXIST) { + printf("bucket: %.*s exist.\n", bucket.len, bucket.data); + } else { + printf("bucket: %.*s unknown status.\n", bucket.len, bucket.data); + log_status(status); + } + + cos_pool_destroy(pool); +} + +void test_get_service() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_get_service_params_t *list_params = NULL; + cos_table_t *resp_headers = NULL; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + options->config = cos_config_create(options->pool); + + init_test_request_options(options, is_cname); + options->ctl = cos_http_controller_create(options->pool, 0); + + //创建get service参数, 默认获取全部bucket + list_params = cos_create_get_service_params(options->pool); + //若将all_region设置为0,则只根据options->config->endpoint的区域进行查询 + // list_params->all_region = 0; + + status = cos_get_service(options, list_params, &resp_headers); + log_status(status); + if (!cos_status_is_ok(status)) { + cos_pool_destroy(pool); + return; + } + + //查看结果 + cos_get_service_content_t *content = NULL; + char *line = NULL; + cos_list_for_each_entry(cos_get_service_content_t, content, &list_params->bucket_list, node) { + line = apr_psprintf(options->pool, "%.*s\t%.*s\t%.*s\n", content->bucket_name.len, content->bucket_name.data, + content->location.len, content->location.data, content->creation_date.len, + content->creation_date.data); + printf("%s", line); + } + + cos_pool_destroy(pool); +} + +void test_website() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_website_params_t *website_params = NULL; + cos_website_params_t *website_result = NULL; + cos_website_rule_content_t *website_content = NULL; + cos_table_t *resp_headers = NULL; + cos_string_t bucket; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //创建website参数 + website_params = cos_create_website_params(options->pool); + cos_str_set(&website_params->index, "index.html"); + cos_str_set(&website_params->redirect_protocol, "https"); + cos_str_set(&website_params->error_document, "Error.html"); + + website_content = cos_create_website_rule_content(options->pool); + cos_str_set(&website_content->condition_errcode, "404"); + cos_str_set(&website_content->redirect_protocol, "https"); + cos_str_set(&website_content->redirect_replace_key, "404.html"); + cos_list_add_tail(&website_content->node, &website_params->rule_list); + + website_content = cos_create_website_rule_content(options->pool); + cos_str_set(&website_content->condition_prefix, "docs/"); + cos_str_set(&website_content->redirect_protocol, "https"); + cos_str_set(&website_content->redirect_replace_key_prefix, "documents/"); + cos_list_add_tail(&website_content->node, &website_params->rule_list); + + website_content = cos_create_website_rule_content(options->pool); + cos_str_set(&website_content->condition_prefix, "img/"); + cos_str_set(&website_content->redirect_protocol, "https"); + cos_str_set(&website_content->redirect_replace_key, "demo.jpg"); + cos_list_add_tail(&website_content->node, &website_params->rule_list); + + status = cos_put_bucket_website(options, &bucket, website_params, &resp_headers); + log_status(status); + + website_result = cos_create_website_params(options->pool); + status = cos_get_bucket_website(options, &bucket, website_result, &resp_headers); + log_status(status); + if (!cos_status_is_ok(status)) { + cos_pool_destroy(pool); + return; + } + + //查看结果 + cos_website_rule_content_t *content = NULL; + char *line = NULL; + line = apr_psprintf(options->pool, "%.*s\n", website_result->index.len, website_result->index.data); + printf("index: %s", line); + line = apr_psprintf(options->pool, "%.*s\n", website_result->redirect_protocol.len, + website_result->redirect_protocol.data); + printf("redirect protocol: %s", line); + line = apr_psprintf(options->pool, "%.*s\n", website_result->error_document.len, website_result->error_document.data); + printf("error document: %s", line); + cos_list_for_each_entry(cos_website_rule_content_t, content, &website_result->rule_list, node) { + line = apr_psprintf(options->pool, "%.*s\t%.*s\t%.*s\t%.*s\t%.*s\n", content->condition_errcode.len, + content->condition_errcode.data, content->condition_prefix.len, content->condition_prefix.data, + content->redirect_protocol.len, content->redirect_protocol.data, + content->redirect_replace_key.len, content->redirect_replace_key.data, + content->redirect_replace_key_prefix.len, content->redirect_replace_key_prefix.data); + printf("%s", line); + } + + status = cos_delete_bucket_website(options, &bucket, &resp_headers); + log_status(status); + + cos_pool_destroy(pool); +} + +void test_domain() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_domain_params_t *domain_params = NULL; + cos_domain_params_t *domain_result = NULL; + cos_table_t *resp_headers = NULL; + cos_string_t bucket; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + options->config = cos_config_create(options->pool); + + init_test_request_options(options, is_cname); + options->ctl = cos_http_controller_create(options->pool, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //创建domain参数 + domain_params = cos_create_domain_params(options->pool); + cos_str_set(&domain_params->status, "ENABLED"); + cos_str_set(&domain_params->name, "www.abc.com"); + cos_str_set(&domain_params->type, "REST"); + cos_str_set(&domain_params->forced_replacement, "CNAME"); + + status = cos_put_bucket_domain(options, &bucket, domain_params, &resp_headers); + log_status(status); + + domain_result = cos_create_domain_params(options->pool); + status = cos_get_bucket_domain(options, &bucket, domain_result, &resp_headers); + log_status(status); + if (!cos_status_is_ok(status)) { + cos_pool_destroy(pool); + return; + } + + //查看结果 + char *line = NULL; + line = apr_psprintf(options->pool, "%.*s\n", domain_result->status.len, domain_result->status.data); + printf("status: %s", line); + line = apr_psprintf(options->pool, "%.*s\n", domain_result->name.len, domain_result->name.data); + printf("name: %s", line); + line = apr_psprintf(options->pool, "%.*s\n", domain_result->type.len, domain_result->type.data); + printf("type: %s", line); + line = apr_psprintf(options->pool, "%.*s\n", domain_result->forced_replacement.len, + domain_result->forced_replacement.data); + printf("forced_replacement: %s", line); + + cos_pool_destroy(pool); +} + +void test_logging() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_logging_params_t *params = NULL; + cos_logging_params_t *result = NULL; + cos_table_t *resp_headers = NULL; + cos_string_t bucket; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //创建logging参数 + params = cos_create_logging_params(options->pool); + cos_str_set(¶ms->target_bucket, TEST_BUCKET_NAME); + cos_str_set(¶ms->target_prefix, "logging/"); + + status = cos_put_bucket_logging(options, &bucket, params, &resp_headers); + log_status(status); + + result = cos_create_logging_params(options->pool); + status = cos_get_bucket_logging(options, &bucket, result, &resp_headers); + log_status(status); + if (!cos_status_is_ok(status)) { + cos_pool_destroy(pool); + return; + } + + //查看结果 + char *line = NULL; + line = apr_psprintf(options->pool, "%.*s\n", result->target_bucket.len, result->target_bucket.data); + printf("target bucket: %s", line); + line = apr_psprintf(options->pool, "%.*s\n", result->target_prefix.len, result->target_prefix.data); + printf("target prefix: %s", line); + + cos_pool_destroy(pool); +} + +void test_inventory() { + cos_pool_t *pool = NULL; + int is_cname = 0; + int inum = 3, i, len; + char buf[inum][32]; + char dest_bucket[128]; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_table_t *resp_headers = NULL; + cos_string_t bucket; + cos_inventory_params_t *get_params = NULL; + cos_inventory_optional_t *optional = NULL; + cos_list_inventory_params_t *list_params = NULL; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + options->config = cos_config_create(options->pool); + + init_test_request_options(options, is_cname); + options->ctl = cos_http_controller_create(options->pool, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // put bucket inventory + len = snprintf(dest_bucket, 128, "qcs::cos:%s::%s", TEST_REGION, TEST_BUCKET_NAME); + dest_bucket[len] = 0; + for (i = 0; i < inum; i++) { + cos_inventory_params_t *params = cos_create_inventory_params(pool); + cos_inventory_optional_t *optional; + len = snprintf(buf[i], 32, "id%d", i); + buf[i][len] = 0; + cos_str_set(¶ms->id, buf[i]); + cos_str_set(¶ms->is_enabled, "true"); + cos_str_set(¶ms->frequency, "Daily"); + cos_str_set(¶ms->filter_prefix, "myPrefix"); + cos_str_set(¶ms->included_object_versions, "All"); + cos_str_set(¶ms->destination.format, "CSV"); + cos_str_set(¶ms->destination.account_id, TEST_UIN); + cos_str_set(¶ms->destination.bucket, dest_bucket); + cos_str_set(¶ms->destination.prefix, "invent"); + params->destination.encryption = 1; + optional = cos_create_inventory_optional(pool); + cos_str_set(&optional->field, "Size"); + cos_list_add_tail(&optional->node, ¶ms->fields); + optional = cos_create_inventory_optional(pool); + cos_str_set(&optional->field, "LastModifiedDate"); + cos_list_add_tail(&optional->node, ¶ms->fields); + optional = cos_create_inventory_optional(pool); + cos_str_set(&optional->field, "ETag"); + cos_list_add_tail(&optional->node, ¶ms->fields); + optional = cos_create_inventory_optional(pool); + cos_str_set(&optional->field, "StorageClass"); + cos_list_add_tail(&optional->node, ¶ms->fields); + optional = cos_create_inventory_optional(pool); + cos_str_set(&optional->field, "ReplicationStatus"); + cos_list_add_tail(&optional->node, ¶ms->fields); + + status = cos_put_bucket_inventory(options, &bucket, params, &resp_headers); + log_status(status); + } + + // get inventory + get_params = cos_create_inventory_params(pool); + cos_str_set(&get_params->id, buf[inum / 2]); + status = cos_get_bucket_inventory(options, &bucket, get_params, &resp_headers); + log_status(status); + + printf("id: %s\nis_enabled: %s\nfrequency: %s\nfilter_prefix: %s\nincluded_object_versions: %s\n", + get_params->id.data, get_params->is_enabled.data, get_params->frequency.data, get_params->filter_prefix.data, + get_params->included_object_versions.data); + printf("destination:\n"); + printf("\tencryption: %d\n", get_params->destination.encryption); + printf("\tformat: %s\n", get_params->destination.format.data); + printf("\taccount_id: %s\n", get_params->destination.account_id.data); + printf("\tbucket: %s\n", get_params->destination.bucket.data); + printf("\tprefix: %s\n", get_params->destination.prefix.data); + cos_list_for_each_entry(cos_inventory_optional_t, optional, &get_params->fields, node) { + printf("field: %s\n", optional->field.data); + } + + // list inventory + list_params = cos_create_list_inventory_params(pool); + status = cos_list_bucket_inventory(options, &bucket, list_params, &resp_headers); + log_status(status); + + get_params = NULL; + cos_list_for_each_entry(cos_inventory_params_t, get_params, &list_params->inventorys, node) { + printf("id: %s\nis_enabled: %s\nfrequency: %s\nfilter_prefix: %s\nincluded_object_versions: %s\n", + get_params->id.data, get_params->is_enabled.data, get_params->frequency.data, get_params->filter_prefix.data, + get_params->included_object_versions.data); + printf("destination:\n"); + printf("\tencryption: %d\n", get_params->destination.encryption); + printf("\tformat: %s\n", get_params->destination.format.data); + printf("\taccount_id: %s\n", get_params->destination.account_id.data); + printf("\tbucket: %s\n", get_params->destination.bucket.data); + printf("\tprefix: %s\n", get_params->destination.prefix.data); + cos_list_for_each_entry(cos_inventory_optional_t, optional, &get_params->fields, node) { + printf("field: %s\n", optional->field.data); + } + } + + // delete inventory + for (i = 0; i < inum; i++) { + cos_string_t id; + cos_str_set(&id, buf[i]); + status = cos_delete_bucket_inventory(options, &bucket, &id, &resp_headers); + log_status(status); + } + + cos_pool_destroy(pool); +} + +void test_bucket_tagging() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_table_t *resp_headers = NULL; + cos_string_t bucket; + cos_tagging_params_t *params = NULL; + cos_tagging_params_t *result = NULL; + cos_tagging_tag_t *tag = NULL; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // put tagging + params = cos_create_tagging_params(pool); + tag = cos_create_tagging_tag(pool); + cos_str_set(&tag->key, "age"); + cos_str_set(&tag->value, "18"); + cos_list_add_tail(&tag->node, ¶ms->node); + + tag = cos_create_tagging_tag(pool); + cos_str_set(&tag->key, "name"); + cos_str_set(&tag->value, "xiaoming"); + cos_list_add_tail(&tag->node, ¶ms->node); + + status = cos_put_bucket_tagging(options, &bucket, params, &resp_headers); + log_status(status); + + // get tagging + result = cos_create_tagging_params(pool); + status = cos_get_bucket_tagging(options, &bucket, result, &resp_headers); + log_status(status); + + tag = NULL; + cos_list_for_each_entry(cos_tagging_tag_t, tag, &result->node, node) { + printf("taging key: %s\n", tag->key.data); + printf("taging value: %s\n", tag->value.data); + } + + // delete tagging + status = cos_delete_bucket_tagging(options, &bucket, &resp_headers); + log_status(status); + + cos_pool_destroy(pool); +} + +void test_object_tagging() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_table_t *resp_headers = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t version_id = cos_string(""); + cos_tagging_params_t *params = NULL; + cos_tagging_params_t *result = NULL; + cos_tagging_tag_t *tag = NULL; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + options->config = cos_config_create(options->pool); + + init_test_request_options(options, is_cname); + options->ctl = cos_http_controller_create(options->pool, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_OBJECT_NAME1); + + // put object tagging + params = cos_create_tagging_params(pool); + tag = cos_create_tagging_tag(pool); + cos_str_set(&tag->key, "age"); + cos_str_set(&tag->value, "18"); + cos_list_add_tail(&tag->node, ¶ms->node); + + tag = cos_create_tagging_tag(pool); + cos_str_set(&tag->key, "name"); + cos_str_set(&tag->value, "xiaoming"); + cos_list_add_tail(&tag->node, ¶ms->node); + + status = cos_put_object_tagging(options, &bucket, &object, &version_id, NULL, params, &resp_headers); + log_status(status); + + // get object tagging + result = cos_create_tagging_params(pool); + status = cos_get_object_tagging(options, &bucket, &object, &version_id, NULL, result, &resp_headers); + log_status(status); + + tag = NULL; + cos_list_for_each_entry(cos_tagging_tag_t, tag, &result->node, node) { + printf("taging key: %s\n", tag->key.data); + printf("taging value: %s\n", tag->value.data); + } + + // delete tagging + status = cos_delete_object_tagging(options, &bucket, &object, &version_id, NULL, &resp_headers); + log_status(status); + + cos_pool_destroy(pool); +} + +static void log_get_referer(cos_referer_params_t *result) { + int index = 0; + cos_referer_domain_t *domain; + + cos_warn_log("status: %s", result->status.data); + cos_warn_log("referer_type: %s", result->referer_type.data); + cos_warn_log("empty_refer_config: %s", result->empty_refer_config.data); + + cos_list_for_each_entry(cos_referer_domain_t, domain, &result->domain_list, node) { + cos_warn_log("domain index:%d", ++index); + cos_warn_log("domain: %s", domain->domain.data); + } +} + +void test_referer() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_table_t *resp_headers = NULL; + cos_string_t bucket; + cos_referer_params_t *params = NULL; + cos_referer_domain_t *domain = NULL; + cos_referer_params_t *result = NULL; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // 替换为您的配置信息,可参见文档 https://cloud.tencent.com/document/product/436/32492 + params = cos_create_referer_params(pool); + cos_str_set(¶ms->status, "Enabled"); + cos_str_set(¶ms->referer_type, "White-List"); + cos_str_set(¶ms->empty_refer_config, "Allow"); + domain = cos_create_referer_domain(pool); + cos_str_set(&domain->domain, "www.qq.com"); + cos_list_add_tail(&domain->node, ¶ms->domain_list); + domain = cos_create_referer_domain(pool); + cos_str_set(&domain->domain, "*.tencent.com"); + cos_list_add_tail(&domain->node, ¶ms->domain_list); + + // put referer + status = cos_put_bucket_referer(options, &bucket, params, &resp_headers); + log_status(status); + + // get referer + result = cos_create_referer_params(pool); + status = cos_get_bucket_referer(options, &bucket, result, &resp_headers); + log_status(status); + if (status->code == 200) { + log_get_referer(result); + } + + cos_pool_destroy(pool); +} + +void test_intelligenttiering() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_table_t *resp_headers = NULL; + cos_string_t bucket; + cos_intelligenttiering_params_t *params = NULL; + cos_intelligenttiering_params_t *result = NULL; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + options->config = cos_config_create(options->pool); + + init_test_request_options(options, is_cname); + options->ctl = cos_http_controller_create(options->pool, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // put intelligenttiering + params = cos_create_intelligenttiering_params(pool); + cos_str_set(¶ms->status, "Enabled"); + params->days = 30; + + status = cos_put_bucket_intelligenttiering(options, &bucket, params, &resp_headers); + log_status(status); + + // get intelligenttiering + result = cos_create_intelligenttiering_params(pool); + status = cos_get_bucket_intelligenttiering(options, &bucket, result, &resp_headers); + log_status(status); + + printf("status: %s\n", result->status.data); + printf("days: %d\n", result->days); + cos_pool_destroy(pool); +} + +void test_delete_directory() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers; + int is_truncated = 1; + cos_string_t marker; + cos_list_t deleted_object_list; + int is_quiet = COS_TRUE; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // list object (get bucket) + cos_list_object_params_t *list_params = NULL; + list_params = cos_create_list_object_params(p); + cos_str_set(&list_params->prefix, "folder/"); + cos_str_set(&marker, ""); + while (is_truncated) { + list_params->marker = marker; + s = cos_list_object(options, &bucket, list_params, &resp_headers); + if (!cos_status_is_ok(s)) { + printf("list object failed, req_id:%s\n", s->req_id); + break; + } + + s = cos_delete_objects(options, &bucket, &list_params->object_list, is_quiet, &resp_headers, &deleted_object_list); + log_status(s); + if (!cos_status_is_ok(s)) { + printf("delete objects failed, req_id:%s\n", s->req_id); + } + + is_truncated = list_params->truncated; + marker = list_params->next_marker; + } + cos_pool_destroy(p); +} + +void test_list_directory() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers; + int is_truncated = 1; + cos_string_t marker; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // list object (get bucket) + cos_list_object_params_t *list_params = NULL; + list_params = cos_create_list_object_params(p); + // prefix表示列出的object的key以prefix开始 + cos_str_set(&list_params->prefix, "folder/"); + // deliter表示分隔符, 设置为/表示列出当前目录下的object, 设置为空表示列出所有的object + cos_str_set(&list_params->delimiter, "/"); + // 设置最大遍历出多少个对象, 一次listobject最大支持1000 + list_params->max_ret = 1000; + cos_str_set(&marker, ""); + while (is_truncated) { + list_params->marker = marker; + s = cos_list_object(options, &bucket, list_params, &resp_headers); + if (!cos_status_is_ok(s)) { + printf("list object failed, req_id:%s\n", s->req_id); + break; + } + // list_params->object_list 返回列出的object对象。 + cos_list_object_content_t *content = NULL; + cos_list_for_each_entry(cos_list_object_content_t, content, &list_params->object_list, node) { + printf("object: %s\n", content->key.data); + } + // list_params->common_prefix_list 表示被delimiter截断的路径, 如delimter设置为/, common prefix则表示所有子目录的路径 + cos_list_object_common_prefix_t *common_prefix = NULL; + cos_list_for_each_entry(cos_list_object_common_prefix_t, common_prefix, &list_params->common_prefix_list, node) { + printf("common prefix: %s\n", common_prefix->prefix.data); + } + + is_truncated = list_params->truncated; + marker = list_params->next_marker; + } + cos_pool_destroy(p); +} + +void test_list_all_objects() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers; + int is_truncated = 1; + cos_string_t marker; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // list object (get bucket) + cos_list_object_params_t *list_params = NULL; + list_params = cos_create_list_object_params(p); + // 设置最大遍历出多少个对象, 一次listobject最大支持1000 + list_params->max_ret = 1000; + cos_str_set(&marker, ""); + while (is_truncated) { + list_params->marker = marker; + cos_list_init(&list_params->object_list); + s = cos_list_object(options, &bucket, list_params, &resp_headers); + if (!cos_status_is_ok(s)) { + printf("list object failed, req_id:%s\n", s->req_id); + break; + } + // list_params->object_list 返回列出的object对象。 + cos_list_object_content_t *content = NULL; + cos_list_for_each_entry(cos_list_object_content_t, content, &list_params->object_list, node) { + printf("object: %s\n", content->key.data); + } + + is_truncated = list_params->truncated; + marker = list_params->next_marker; + } + cos_pool_destroy(p); +} + +void test_download_directory() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t file_name; + cos_string_t suffix = cos_string("/"); + cos_table_t *resp_headers; + cos_table_t *headers = NULL; + cos_table_t *params = NULL; + int is_truncated = 1; + cos_string_t marker; + apr_status_t status; + + //初始化请求选项 + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // list object (get bucket) + cos_list_object_params_t *list_params = NULL; + list_params = cos_create_list_object_params(p); + cos_str_set(&list_params->prefix, "folder/"); //替换为您自己的目录名称 + cos_str_set(&marker, ""); + while (is_truncated) { + list_params->marker = marker; + s = cos_list_object(options, &bucket, list_params, &resp_headers); + log_status(s); + if (!cos_status_is_ok(s)) { + printf("list object failed, req_id:%s\n", s->req_id); + break; + } + cos_list_object_content_t *content = NULL; + cos_list_for_each_entry(cos_list_object_content_t, content, &list_params->object_list, node) { + cos_str_set(&file_name, content->key.data); + if (cos_ends_with(&content->key, &suffix)) { + //如果是目录需要先创建, 0x0755权限可以自己按需修改,参考apr_file_info.h中定义 + status = apr_dir_make(content->key.data, 0x0755, options->pool); + if (status != APR_SUCCESS && !APR_STATUS_IS_EEXIST(status)) { + printf("mkdir: %s failed, status: %d\n", content->key.data, status); + } + } else { + //下载对象到本地目录,这里默认下载在程序运行的当前目录 + s = cos_get_object_to_file(options, &bucket, &content->key, headers, params, &file_name, &resp_headers); + if (!cos_status_is_ok(s)) { + printf("get object[%s] failed, req_id:%s\n", content->key.data, s->req_id); + } + } + } + is_truncated = list_params->truncated; + marker = list_params->next_marker; + } + + //销毁内存池 + cos_pool_destroy(p); +} + +void test_move() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t src_object; + cos_string_t src_endpoint; + cos_table_t *resp_headers = NULL; + + //创建内存池 + cos_pool_create(&p, NULL); + + //初始化请求选项 + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //设置对象复制 + cos_str_set(&object, TEST_OBJECT_NAME1); + cos_str_set(&src_endpoint, TEST_COS_ENDPOINT); + cos_str_set(&src_object, TEST_OBJECT_NAME2); + + cos_copy_object_params_t *params = NULL; + params = cos_create_copy_object_params(p); + s = cos_copy_object(options, &bucket, &src_object, &src_endpoint, &bucket, &object, NULL, params, &resp_headers); + log_status(s); + if (cos_status_is_ok(s)) { + s = cos_delete_object(options, &bucket, &src_object, &resp_headers); + log_status(s); + printf("move object succeeded\n"); + } else { + printf("move object failed\n"); + } + + //销毁内存池 + cos_pool_destroy(p); +} + +// 基础图片处理 +void test_ci_base_image_process() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t file; + cos_table_t *resp_headers; + cos_table_t *params = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + options->config = cos_config_create(options->pool); + cos_str_set(&options->config->endpoint, TEST_COS_ENDPOINT); + cos_str_set(&options->config->access_key_id, TEST_ACCESS_KEY_ID); + cos_str_set(&options->config->access_key_secret, TEST_ACCESS_KEY_SECRET); + cos_str_set(&options->config->appid, TEST_APPID); + options->config->is_cname = is_cname; + options->ctl = cos_http_controller_create(options->pool, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + params = cos_table_make(p, 1); + apr_table_addn(params, "imageMogr2/thumbnail/!50p", ""); + cos_str_set(&file, "test.jpg"); + cos_str_set(&object, "test.jpg"); + s = cos_get_object_to_file(options, &bucket, &object, NULL, params, &file, &resp_headers); + log_status(s); + if (!cos_status_is_ok(s)) { + printf("cos_get_object_to_file fail, req_id:%s\n", s->req_id); + } + cos_pool_destroy(p); +} + +// 持久化处理 +void test_ci_image_process() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t file; + cos_table_t *resp_headers; + cos_table_t *headers = NULL; + ci_operation_result_t *results = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, "test.jpg"); + + // 云上数据处理 + headers = cos_table_make(p, 1); + apr_table_addn(headers, "pic-operations", + "{\"is_pic_info\":1,\"rules\":[{\"fileid\":\"test.png\",\"rule\":\"imageView2/format/png\"}]}"); + s = ci_image_process(options, &bucket, &object, headers, &resp_headers, &results); + log_status(s); + printf("origin key: %s\n", results->origin.key.data); + printf("process key: %s\n", results->object.key.data); + + // 上传时处理 + headers = cos_table_make(p, 1); + apr_table_addn(headers, "pic-operations", + "{\"is_pic_info\":1,\"rules\":[{\"fileid\":\"test.png\",\"rule\":\"imageView2/format/png\"}]}"); + cos_str_set(&file, "test.jpg"); + cos_str_set(&object, "test.jpg"); + s = ci_put_object_from_file(options, &bucket, &object, &file, headers, &resp_headers, &results); + log_status(s); + printf("origin key: %s\n", results->origin.key.data); + printf("process key: %s\n", results->object.key.data); + + cos_pool_destroy(p); +} + +// 二维码识别 +void test_ci_image_qrcode() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t file; + cos_table_t *resp_headers; + cos_table_t *headers = NULL; + ci_operation_result_t *results = NULL; + ci_qrcode_info_t *content = NULL; + ci_qrcode_result_t *result2 = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, "test.jpg"); + + headers = cos_table_make(p, 1); + apr_table_addn(headers, "pic-operations", + "{\"is_pic_info\":1,\"rules\":[{\"fileid\":\"test.png\",\"rule\":\"QRcode/cover/1\"}]}"); + // 上传时识别 + cos_str_set(&file, "test.jpg"); + cos_str_set(&object, "test.jpg"); + s = ci_put_object_from_file(options, &bucket, &object, &file, headers, &resp_headers, &results); + log_status(s); + if (!cos_status_is_ok(s)) { + printf("put object failed\n"); + } + printf("CodeStatus: %d\n", results->object.code_status); + cos_list_for_each_entry(ci_qrcode_info_t, content, &results->object.qrcode_info, node) { + printf("CodeUrl: %s\n", content->code_url.data); + printf("Point: %s\n", content->point[0].data); + printf("Point: %s\n", content->point[1].data); + printf("Point: %s\n", content->point[2].data); + printf("Point: %s\n", content->point[3].data); + } + + // 下载时识别 + s = ci_get_qrcode(options, &bucket, &object, 1, NULL, NULL, &resp_headers, &result2); + log_status(s); + if (!cos_status_is_ok(s)) { + printf("get object failed\n"); + } + printf("CodeStatus: %d\n", result2->code_status); + cos_list_for_each_entry(ci_qrcode_info_t, content, &result2->qrcode_info, node) { + printf("CodeUrl: %s\n", content->code_url.data); + printf("Point: %s\n", content->point[0].data); + printf("Point: %s\n", content->point[1].data); + printf("Point: %s\n", content->point[2].data); + printf("Point: %s\n", content->point[3].data); + } + printf("ImageResult: %s\n", result2->result_image.data); + + //销毁内存池 + cos_pool_destroy(p); +} + +// 图片压缩 +void test_ci_image_compression() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t file; + cos_table_t *resp_headers; + cos_table_t *params = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + options->config = cos_config_create(options->pool); + cos_str_set(&options->config->endpoint, TEST_COS_ENDPOINT); + cos_str_set(&options->config->access_key_id, TEST_ACCESS_KEY_ID); + cos_str_set(&options->config->access_key_secret, TEST_ACCESS_KEY_SECRET); + cos_str_set(&options->config->appid, TEST_APPID); + options->config->is_cname = is_cname; + options->ctl = cos_http_controller_create(options->pool, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + params = cos_table_make(p, 1); + apr_table_addn(params, "imageMogr2/format/tpg", ""); + cos_str_set(&object, "test.jpg"); + cos_str_set(&file, "test.tpg"); + s = cos_get_object_to_file(options, &bucket, &object, NULL, params, &file, &resp_headers); + log_status(s); + if (!cos_status_is_ok(s)) { + printf("cos_get_object_to_file fail, req_id:%s\n", s->req_id); + } + + params = cos_table_make(p, 1); + apr_table_addn(params, "imageMogr2/format/heif", ""); + cos_str_set(&file, "test.heif"); + s = cos_get_object_to_file(options, &bucket, &object, NULL, params, &file, &resp_headers); + log_status(s); + if (!cos_status_is_ok(s)) { + printf("cos_get_object_to_file fail, req_id:%s\n", s->req_id); + } +} + +static void log_video_auditing_result(ci_video_auditing_job_result_t *result) { + cos_warn_log("jobid: %s", result->jobs_detail.job_id.data); + cos_warn_log("state: %s", result->jobs_detail.state.data); + cos_warn_log("creation_time: %s", result->jobs_detail.creation_time.data); +} + +static void log_get_auditing_result(ci_auditing_job_result_t *result) { + int index = 0; + ci_auditing_snapshot_result_t *snapshot_info; + ci_auditing_audio_section_result_t *audio_section_info; + + cos_warn_log("nonexist_job_ids: %s", result->nonexist_job_ids.data); + cos_warn_log("code: %s", result->jobs_detail.code.data); + cos_warn_log("message: %s", result->jobs_detail.message.data); + cos_warn_log("state: %s", result->jobs_detail.state.data); + cos_warn_log("creation_time: %s", result->jobs_detail.creation_time.data); + cos_warn_log("object: %s", result->jobs_detail.object.data); + cos_warn_log("snapshot_count: %s", result->jobs_detail.snapshot_count.data); + cos_warn_log("result: %d", result->jobs_detail.result); + + cos_warn_log("porn_info.hit_flag: %d", result->jobs_detail.porn_info.hit_flag); + cos_warn_log("porn_info.count: %d", result->jobs_detail.porn_info.count); + cos_warn_log("terrorism_info.hit_flag: %d", result->jobs_detail.terrorism_info.hit_flag); + cos_warn_log("terrorism_info.count: %d", result->jobs_detail.terrorism_info.count); + cos_warn_log("politics_info.hit_flag: %d", result->jobs_detail.politics_info.hit_flag); + cos_warn_log("politics_info.count: %d", result->jobs_detail.politics_info.count); + cos_warn_log("ads_info.hit_flag: %d", result->jobs_detail.ads_info.hit_flag); + cos_warn_log("ads_info.count: %d", result->jobs_detail.ads_info.count); + + cos_list_for_each_entry(ci_auditing_snapshot_result_t, snapshot_info, &result->jobs_detail.snapshot_info_list, node) { + cos_warn_log("snapshot index:%d", ++index); + cos_warn_log("snapshot_info->url: %s", snapshot_info->url.data); + cos_warn_log("snapshot_info->snapshot_time: %d", snapshot_info->snapshot_time); + cos_warn_log("snapshot_info->text: %s", snapshot_info->text.data); + + cos_warn_log("snapshot_info->porn_info.hit_flag: %d", snapshot_info->porn_info.hit_flag); + cos_warn_log("snapshot_info->porn_info.score: %d", snapshot_info->porn_info.score); + cos_warn_log("snapshot_info->porn_info.label: %s", snapshot_info->porn_info.label.data); + cos_warn_log("snapshot_info->porn_info.sub_lable: %s", snapshot_info->porn_info.sub_lable.data); + cos_warn_log("snapshot_info->terrorism_info.hit_flag: %d", snapshot_info->terrorism_info.hit_flag); + cos_warn_log("snapshot_info->terrorism_info.score: %d", snapshot_info->terrorism_info.score); + cos_warn_log("snapshot_info->terrorism_info.label: %s", snapshot_info->terrorism_info.label.data); + cos_warn_log("snapshot_info->terrorism_info.sub_lable: %s", snapshot_info->terrorism_info.sub_lable.data); + cos_warn_log("snapshot_info->politics_info.hit_flag: %d", snapshot_info->politics_info.hit_flag); + cos_warn_log("snapshot_info->politics_info.score: %d", snapshot_info->politics_info.score); + cos_warn_log("snapshot_info->politics_info.label: %s", snapshot_info->politics_info.label.data); + cos_warn_log("snapshot_info->politics_info.sub_lable: %s", snapshot_info->politics_info.sub_lable.data); + cos_warn_log("snapshot_info->ads_info.hit_flag: %d", snapshot_info->ads_info.hit_flag); + cos_warn_log("snapshot_info->ads_info.score: %d", snapshot_info->ads_info.score); + cos_warn_log("snapshot_info->ads_info.label: %s", snapshot_info->ads_info.label.data); + cos_warn_log("snapshot_info->ads_info.sub_lable: %s", snapshot_info->ads_info.sub_lable.data); + } + + index = 0; + cos_list_for_each_entry(ci_auditing_audio_section_result_t, audio_section_info, + &result->jobs_detail.audio_section_info_list, node) { + cos_warn_log("audio_section index:%d", ++index); + cos_warn_log("audio_section_info->url: %s", audio_section_info->url.data); + cos_warn_log("audio_section_info->text: %s", audio_section_info->text.data); + cos_warn_log("audio_section_info->offset_time: %d", audio_section_info->offset_time); + cos_warn_log("audio_section_info->duration: %d", audio_section_info->duration); + + cos_warn_log("audio_section_info->porn_info.hit_flag: %d", audio_section_info->porn_info.hit_flag); + cos_warn_log("audio_section_info->porn_info.score: %d", audio_section_info->porn_info.score); + cos_warn_log("audio_section_info->porn_info.key_words: %s", audio_section_info->porn_info.key_words.data); + cos_warn_log("audio_section_info->terrorism_info.hit_flag: %d", audio_section_info->terrorism_info.hit_flag); + cos_warn_log("audio_section_info->terrorism_info.score: %d", audio_section_info->terrorism_info.score); + cos_warn_log("audio_section_info->terrorism_info.key_words: %s", audio_section_info->terrorism_info.key_words.data); + cos_warn_log("audio_section_info->politics_info.hit_flag: %d", audio_section_info->politics_info.hit_flag); + cos_warn_log("audio_section_info->politics_info.score: %d", audio_section_info->politics_info.score); + cos_warn_log("audio_section_info->politics_info.key_words: %s", audio_section_info->politics_info.key_words.data); + cos_warn_log("audio_section_info->ads_info.hit_flag: %d", audio_section_info->ads_info.hit_flag); + cos_warn_log("audio_section_info->ads_info.score: %d", audio_section_info->ads_info.score); + cos_warn_log("audio_section_info->ads_info.key_words: %s", audio_section_info->ads_info.key_words.data); + } +} + +static void log_media_buckets_result(ci_media_buckets_result_t *result) { + int index = 0; + ci_media_bucket_list_t *media_bucket; + + cos_warn_log("total_count: %d", result->total_count); + cos_warn_log("page_number: %d", result->page_number); + cos_warn_log("page_size: %d", result->page_size); + + cos_list_for_each_entry(ci_media_bucket_list_t, media_bucket, &result->media_bucket_list, node) { + cos_warn_log("media_bucket index:%d", ++index); + cos_warn_log("media_bucket->bucket_id: %s", media_bucket->bucket_id.data); + cos_warn_log("media_bucket->name: %s", media_bucket->name.data); + cos_warn_log("media_bucket->region: %s", media_bucket->region.data); + cos_warn_log("media_bucket->create_time: %s", media_bucket->create_time.data); + } +} + +static void log_media_info_result(ci_media_info_result_t *result) { + // format + cos_warn_log("format.num_stream: %d", result->format.num_stream); + cos_warn_log("format.num_program: %d", result->format.num_program); + cos_warn_log("format.format_name: %s", result->format.format_name.data); + cos_warn_log("format.format_long_name: %s", result->format.format_long_name.data); + cos_warn_log("format.start_time: %f", result->format.start_time); + cos_warn_log("format.duration: %f", result->format.duration); + cos_warn_log("format.bit_rate: %d", result->format.bit_rate); + cos_warn_log("format.size: %d", result->format.size); + + // stream.video + cos_warn_log("stream.video.index: %d", result->stream.video.index); + cos_warn_log("stream.video.codec_name: %s", result->stream.video.codec_name.data); + cos_warn_log("stream.video.codec_long_name: %s", result->stream.video.codec_long_name.data); + cos_warn_log("stream.video.codec_time_base: %s", result->stream.video.codec_time_base.data); + cos_warn_log("stream.video.codec_tag_string: %s", result->stream.video.codec_tag_string.data); + cos_warn_log("stream.video.codec_tag: %s", result->stream.video.codec_tag.data); + cos_warn_log("stream.video.profile: %s", result->stream.video.profile.data); + cos_warn_log("stream.video.height: %d", result->stream.video.height); + cos_warn_log("stream.video.width: %d", result->stream.video.width); + cos_warn_log("stream.video.has_b_frame: %d", result->stream.video.has_b_frame); + cos_warn_log("stream.video.ref_frames: %d", result->stream.video.ref_frames); + cos_warn_log("stream.video.sar: %s", result->stream.video.sar.data); + cos_warn_log("stream.video.dar: %s", result->stream.video.dar.data); + cos_warn_log("stream.video.pix_format: %s", result->stream.video.pix_format.data); + cos_warn_log("stream.video.field_order: %s", result->stream.video.field_order.data); + cos_warn_log("stream.video.level: %d", result->stream.video.level); + cos_warn_log("stream.video.fps: %d", result->stream.video.fps); + cos_warn_log("stream.video.avg_fps: %s", result->stream.video.avg_fps.data); + cos_warn_log("stream.video.timebase: %s", result->stream.video.timebase.data); + cos_warn_log("stream.video.start_time: %f", result->stream.video.start_time); + cos_warn_log("stream.video.duration: %f", result->stream.video.duration); + cos_warn_log("stream.video.bit_rate: %f", result->stream.video.bit_rate); + cos_warn_log("stream.video.num_frames: %d", result->stream.video.num_frames); + cos_warn_log("stream.video.language: %s", result->stream.video.language.data); + + // stream.audio + cos_warn_log("stream.audio.index: %d", result->stream.audio.index); + cos_warn_log("stream.audio.codec_name: %s", result->stream.audio.codec_name.data); + cos_warn_log("stream.audio.codec_long_name: %s", result->stream.audio.codec_long_name.data); + cos_warn_log("stream.audio.codec_time_base: %s", result->stream.audio.codec_time_base.data); + cos_warn_log("stream.audio.codec_tag_string: %s", result->stream.audio.codec_tag_string.data); + cos_warn_log("stream.audio.codec_tag: %s", result->stream.audio.codec_tag.data); + cos_warn_log("stream.audio.sample_fmt: %s", result->stream.audio.sample_fmt.data); + cos_warn_log("stream.audio.sample_rate: %d", result->stream.audio.sample_rate); + cos_warn_log("stream.audio.channel: %d", result->stream.audio.channel); + cos_warn_log("stream.audio.channel_layout: %s", result->stream.audio.channel_layout.data); + cos_warn_log("stream.audio.timebase: %s", result->stream.audio.timebase.data); + cos_warn_log("stream.audio.start_time: %f", result->stream.audio.start_time); + cos_warn_log("stream.audio.duration: %f", result->stream.audio.duration); + cos_warn_log("stream.audio.bit_rate: %f", result->stream.audio.bit_rate); + cos_warn_log("stream.audio.language: %s", result->stream.audio.language.data); + + // stream.subtitle + cos_warn_log("stream.subtitle.index: %d", result->stream.subtitle.index); + cos_warn_log("stream.subtitle.language: %s", result->stream.subtitle.language.data); +} + +void test_ci_video_auditing() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers; + ci_video_auditing_job_options_t *job_options; + ci_video_auditing_job_result_t *job_result; + ci_auditing_job_result_t *auditing_result; + + // 基本配置 + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + options->config = cos_config_create(options->pool); + cos_str_set(&options->config->endpoint, TEST_CI_ENDPOINT); // https://ci..myqcloud.com + cos_str_set(&options->config->access_key_id, TEST_ACCESS_KEY_ID); + cos_str_set(&options->config->access_key_secret, TEST_ACCESS_KEY_SECRET); + cos_str_set(&options->config->appid, TEST_APPID); + options->config->is_cname = is_cname; + options->ctl = cos_http_controller_create(options->pool, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // 替换为您的配置信息,可参见文档 https://cloud.tencent.com/document/product/436/47316 + job_options = ci_video_auditing_job_options_create(p); + cos_str_set(&job_options->input_object, "test.mp4"); + cos_str_set(&job_options->job_conf.detect_type, "Porn,Terrorism,Politics,Ads"); + cos_str_set(&job_options->job_conf.callback_version, "Detail"); + job_options->job_conf.detect_content = 1; + cos_str_set(&job_options->job_conf.snapshot.mode, "Interval"); + job_options->job_conf.snapshot.time_interval = 1.5; + job_options->job_conf.snapshot.count = 10; + + // 提交一个视频审核任务 + s = ci_create_video_auditing_job(options, &bucket, job_options, NULL, &resp_headers, &job_result); + log_status(s); + if (s->code == 200) { + log_video_auditing_result(job_result); + } + + // 等待视频审核任务完成,此处可修改您的等待时间 + sleep(300); + + // 获取审核任务结果 + s = ci_get_auditing_job(options, &bucket, &job_result->jobs_detail.job_id, NULL, &resp_headers, &auditing_result); + log_status(s); + if (s->code == 200) { + log_get_auditing_result(auditing_result); + } + + // 销毁内存池 + cos_pool_destroy(p); +} + +void test_ci_media_process_media_bucket() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_table_t *resp_headers; + ci_media_buckets_request_t *media_buckets_request; + ci_media_buckets_result_t *media_buckets_result; + + // 基本配置 + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + options->config = cos_config_create(options->pool); + cos_str_set(&options->config->endpoint, TEST_CI_ENDPOINT); // https://ci..myqcloud.com + cos_str_set(&options->config->access_key_id, TEST_ACCESS_KEY_ID); + cos_str_set(&options->config->access_key_secret, TEST_ACCESS_KEY_SECRET); + cos_str_set(&options->config->appid, TEST_APPID); + options->config->is_cname = is_cname; + options->ctl = cos_http_controller_create(options->pool, 0); + + // 替换为您的配置信息,可参见文档 https://cloud.tencent.com/document/product/436/48988 + media_buckets_request = ci_media_buckets_request_create(p); + cos_str_set(&media_buckets_request->regions, ""); + cos_str_set(&media_buckets_request->bucket_names, ""); + cos_str_set(&media_buckets_request->bucket_name, ""); + cos_str_set(&media_buckets_request->page_number, "1"); + cos_str_set(&media_buckets_request->page_size, "10"); + s = ci_describe_media_buckets(options, media_buckets_request, NULL, &resp_headers, &media_buckets_result); + log_status(s); + if (s->code == 200) { + log_media_buckets_result(media_buckets_result); + } + + // 销毁内存池 + cos_pool_destroy(p); +} + +void test_ci_media_process_snapshot() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers; + cos_list_t download_buffer; + cos_string_t object; + ci_get_snapshot_request_t *snapshot_request; + cos_buf_t *content = NULL; + cos_string_t pic_file = cos_string("snapshot.jpg"); + + // 基本配置 + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + options->config = cos_config_create(options->pool); + cos_str_set(&options->config->endpoint, TEST_COS_ENDPOINT); + cos_str_set(&options->config->access_key_id, TEST_ACCESS_KEY_ID); + cos_str_set(&options->config->access_key_secret, TEST_ACCESS_KEY_SECRET); + cos_str_set(&options->config->appid, TEST_APPID); + options->config->is_cname = is_cname; + options->ctl = cos_http_controller_create(options->pool, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, "test.mp4"); + + // 替换为您的配置信息,可参见文档 https://cloud.tencent.com/document/product/436/55671 + snapshot_request = ci_snapshot_request_create(p); + snapshot_request->time = 7.5; + snapshot_request->width = 0; + snapshot_request->height = 0; + cos_str_set(&snapshot_request->format, "jpg"); + cos_str_set(&snapshot_request->rotate, "auto"); + cos_str_set(&snapshot_request->mode, "exactframe"); + cos_list_init(&download_buffer); + + s = ci_get_snapshot_to_buffer(options, &bucket, &object, snapshot_request, NULL, &download_buffer, &resp_headers); + log_status(s); + + int64_t len = 0; + int64_t size = 0; + int64_t pos = 0; + cos_list_for_each_entry(cos_buf_t, content, &download_buffer, node) { len += cos_buf_size(content); } + char *buf = cos_pcalloc(p, (apr_size_t)(len + 1)); + buf[len] = '\0'; + cos_list_for_each_entry(cos_buf_t, content, &download_buffer, node) { + size = cos_buf_size(content); + memcpy(buf + pos, content->pos, (size_t)size); + pos += size; + } + cos_warn_log("Download len:%ld data=%s", len, buf); + + s = ci_get_snapshot_to_file(options, &bucket, &object, snapshot_request, NULL, &pic_file, &resp_headers); + log_status(s); + + // 销毁内存池 + cos_pool_destroy(p); +} + +void test_ci_media_process_media_info() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers; + ci_media_info_result_t *media_info; + cos_string_t object; + + // 基本配置 + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + options->config = cos_config_create(options->pool); + cos_str_set(&options->config->endpoint, TEST_COS_ENDPOINT); + cos_str_set(&options->config->access_key_id, TEST_ACCESS_KEY_ID); + cos_str_set(&options->config->access_key_secret, TEST_ACCESS_KEY_SECRET); + cos_str_set(&options->config->appid, TEST_APPID); + options->config->is_cname = is_cname; + options->ctl = cos_http_controller_create(options->pool, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, "test.mp4"); + + // 替换为您的配置信息,可参见文档 https://cloud.tencent.com/document/product/436/55672 + s = ci_get_media_info(options, &bucket, &object, NULL, &resp_headers, &media_info); + log_status(s); + if (s->code == 200) { + log_media_info_result(media_info); + } + + // 销毁内存池 + cos_pool_destroy(p); +} + +int main(int argc, char *argv[]) { + // 通过环境变量获取 SECRETID 和 SECRETKEY + // TEST_ACCESS_KEY_ID = getenv("COS_SECRETID"); + // TEST_ACCESS_KEY_SECRET = getenv("COS_SECRETKEY"); + + if (cos_http_io_initialize(NULL, 0) != COSE_OK) { + exit(1); + } + + // set log level, default COS_LOG_WARN + cos_log_set_level(COS_LOG_WARN); + + // set log output, default stderr + cos_log_set_output(NULL); + + // test_intelligenttiering(); + // test_bucket_tagging(); + // test_object_tagging(); + // test_referer(); + // test_logging(); + // test_inventory(); + // test_put_object_from_file_with_sse(); + // test_get_object_to_file_with_sse(); + // test_head_bucket(); + // test_check_bucket_exist(); + // test_get_service(); + // test_website(); + // test_domain(); + // test_delete_objects(); + // test_delete_objects_by_prefix(); + // test_bucket(); + // test_bucket_lifecycle(); + // test_object_restore(); + test_put_object_from_file(); + // test_sign(); + // test_object(); + // test_put_object_with_limit(); + // test_get_object_with_limit(); + // test_head_object(); + // test_gen_object_url(); + // test_list_objects(); + // test_list_directory(); + // test_list_all_objects(); + // test_create_dir(); + // test_append_object(); + // test_check_object_exist(); + // multipart_upload_file_from_file(); + // multipart_upload_file_from_buffer(); + // abort_multipart_upload(); + // list_multipart(); + + // pthread_t tid[20]; + // test_resumable_upload_with_multi_threads(); + // test_resumable(); + // test_bucket(); + // test_acl(); + // test_copy(); + // test_modify_storage_class(); + // test_cors(); + // test_versioning(); + // test_replication(); + // test_part_copy(); + // test_copy_with_part_copy(); + // test_move(); + // test_delete_directory(); + // test_download_directory(); + // test_presigned_url(); + // test_ci_base_image_process(); + // test_ci_image_process(); + // test_ci_image_qrcode(); + // test_ci_image_compression(); + // test_ci_video_auditing(); + // test_ci_media_process_media_bucket(); + // test_ci_media_process_snapshot(); + // test_ci_media_process_media_info(); + + // cos_http_io_deinitialize last + cos_http_io_deinitialize(); + + return 0; +} diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 0546ed7f47..1f6d0800a5 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -14,8 +14,8 @@ */ #define _DEFAULT_SOURCE -#include "os.h" #include "tglobal.h" +#include "os.h" #include "tconfig.h" #include "tgrant.h" #include "tlog.h" @@ -63,7 +63,7 @@ int32_t tsNumOfQnodeFetchThreads = 1; int32_t tsNumOfSnodeStreamThreads = 4; int32_t tsNumOfSnodeWriteThreads = 1; int32_t tsMaxStreamBackendCache = 128; // M -int32_t tsPQSortMemThreshold = 16; // M +int32_t tsPQSortMemThreshold = 16; // M // sync raft int32_t tsElectInterval = 25 * 1000; @@ -121,8 +121,8 @@ int32_t tsQueryPolicy = 1; int32_t tsQueryRspPolicy = 0; int64_t tsQueryMaxConcurrentTables = 200; // unit is TSDB_TABLE_NUM_UNIT bool tsEnableQueryHb = true; -bool tsEnableScience = false; // on taos-cli show float and doulbe with scientific notation if true -bool tsTtlChangeOnWrite = false; // ttl delete time changes on last write if true +bool tsEnableScience = false; // on taos-cli show float and doulbe with scientific notation if true +bool tsTtlChangeOnWrite = false; // ttl delete time changes on last write if true int32_t tsQuerySmaOptimize = 0; int32_t tsQueryRsmaTolerance = 1000; // the tolerance time (ms) to judge from which level to query rsma data. bool tsQueryPlannerTrace = false; @@ -235,6 +235,13 @@ int64_t tsCheckpointInterval = 3 * 60 * 60 * 1000; bool tsFilterScalarMode = false; int32_t tsKeepTimeOffset = 0; // latency of data migration +char tsS3Endpoint[TSDB_FQDN_LEN] = ""; +char tsS3AcessKeyId[TSDB_FQDN_LEN] = ""; +char tsS3AcessKeySecret[TSDB_FQDN_LEN] = ""; +char tsS3BucketName[TSDB_FQDN_LEN] = ""; +char tsS3AppId[TSDB_FQDN_LEN] = ""; +int8_t tsS3Enabled = false; + #ifndef _STORAGE int32_t taosSetTfsCfg(SConfig *pCfg) { SConfigItem *pItem = cfgGetItem(pCfg, "dataDir"); @@ -256,7 +263,9 @@ int32_t taosSetTfsCfg(SConfig *pCfg) { int32_t taosSetTfsCfg(SConfig *pCfg); #endif -struct SConfig *taosGetCfg() { return tsCfg; } +struct SConfig *taosGetCfg() { + return tsCfg; +} static int32_t taosLoadCfg(SConfig *pCfg, const char **envCmd, const char *inputCfgDir, const char *envFile, char *apolloUrl) { @@ -376,7 +385,9 @@ static int32_t taosAddClientCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "maxRetryWaitTime", tsMaxRetryWaitTime, 0, 86400000, CFG_SCOPE_BOTH) != 0) return -1; if (cfgAddBool(pCfg, "useAdapter", tsUseAdapter, CFG_SCOPE_CLIENT) != 0) return -1; if (cfgAddBool(pCfg, "crashReporting", tsEnableCrashReport, CFG_SCOPE_SERVER) != 0) return -1; - if (cfgAddInt64(pCfg, "queryMaxConcurrentTables", tsQueryMaxConcurrentTables, INT64_MIN, INT64_MAX, CFG_SCOPE_CLIENT) != 0) return -1; + if (cfgAddInt64(pCfg, "queryMaxConcurrentTables", tsQueryMaxConcurrentTables, INT64_MIN, INT64_MAX, + CFG_SCOPE_CLIENT) != 0) + return -1; if (cfgAddInt32(pCfg, "metaCacheMaxSize", tsMetaCacheMaxSize, -1, INT32_MAX, CFG_SCOPE_CLIENT) != 0) return -1; if (cfgAddInt32(pCfg, "slowLogThreshold", tsSlowLogThreshold, 0, INT32_MAX, CFG_SCOPE_CLIENT) != 0) return -1; if (cfgAddString(pCfg, "slowLogScope", "", CFG_SCOPE_CLIENT) != 0) return -1; @@ -389,7 +400,8 @@ static int32_t taosAddClientCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "numOfRpcSessions", tsNumOfRpcSessions, 1, 100000, CFG_SCOPE_BOTH) != 0) return -1; tsTimeToGetAvailableConn = TRANGE(tsTimeToGetAvailableConn, 20, 10000000); - if (cfgAddInt32(pCfg, "timeToGetAvailableConn", tsTimeToGetAvailableConn, 20, 1000000, CFG_SCOPE_BOTH) != 0) return -1; + if (cfgAddInt32(pCfg, "timeToGetAvailableConn", tsTimeToGetAvailableConn, 20, 1000000, CFG_SCOPE_BOTH) != 0) + return -1; tsNumOfTaskQueueThreads = tsNumOfCores / 2; tsNumOfTaskQueueThreads = TMAX(tsNumOfTaskQueueThreads, 4); @@ -449,7 +461,9 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "statusInterval", tsStatusInterval, 1, 30, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddInt32(pCfg, "minSlidingTime", tsMinSlidingTime, 1, 1000000, CFG_SCOPE_CLIENT) != 0) return -1; if (cfgAddInt32(pCfg, "minIntervalTime", tsMinIntervalTime, 1, 1000000, CFG_SCOPE_CLIENT) != 0) return -1; - if (cfgAddInt32(pCfg, "maxNumOfDistinctRes", tsMaxNumOfDistinctResults, 10 * 10000, 10000 * 10000, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddInt32(pCfg, "maxNumOfDistinctRes", tsMaxNumOfDistinctResults, 10 * 10000, 10000 * 10000, + CFG_SCOPE_SERVER) != 0) + return -1; if (cfgAddInt32(pCfg, "countAlwaysReturnValue", tsCountAlwaysReturnValue, 0, 1, CFG_SCOPE_BOTH) != 0) return -1; if (cfgAddInt32(pCfg, "queryBufferSize", tsQueryBufferSize, -1, 500000000000, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddBool(pCfg, "printAuth", tsPrintAuth, CFG_SCOPE_SERVER) != 0) return -1; @@ -477,7 +491,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { tsNumOfVnodeQueryThreads = TMAX(tsNumOfVnodeQueryThreads, 4); if (cfgAddInt32(pCfg, "numOfVnodeQueryThreads", tsNumOfVnodeQueryThreads, 4, 1024, CFG_SCOPE_SERVER) != 0) return -1; - if (cfgAddFloat(pCfg, "ratioOfVnodeStreamThreads", tsRatioOfVnodeStreamThreads, 0.01, 100, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddFloat(pCfg, "ratioOfVnodeStreamThreads", tsRatioOfVnodeStreamThreads, 0.01, 100, CFG_SCOPE_SERVER) != 0) + return -1; tsNumOfVnodeFetchThreads = tsNumOfCores / 4; tsNumOfVnodeFetchThreads = TMAX(tsNumOfVnodeFetchThreads, 4); @@ -497,7 +512,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { tsNumOfSnodeStreamThreads = tsNumOfCores / 4; tsNumOfSnodeStreamThreads = TRANGE(tsNumOfSnodeStreamThreads, 2, 4); - if (cfgAddInt32(pCfg, "numOfSnodeSharedThreads", tsNumOfSnodeStreamThreads, 2, 1024, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddInt32(pCfg, "numOfSnodeSharedThreads", tsNumOfSnodeStreamThreads, 2, 1024, CFG_SCOPE_SERVER) != 0) + return -1; tsNumOfSnodeWriteThreads = tsNumOfCores / 4; tsNumOfSnodeWriteThreads = TRANGE(tsNumOfSnodeWriteThreads, 2, 4); @@ -505,14 +521,18 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { tsRpcQueueMemoryAllowed = tsTotalMemoryKB * 1024 * 0.1; tsRpcQueueMemoryAllowed = TRANGE(tsRpcQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * 10LL, TSDB_MAX_MSG_SIZE * 10000LL); - if (cfgAddInt64(pCfg, "rpcQueueMemoryAllowed", tsRpcQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * 10L, INT64_MAX, CFG_SCOPE_BOTH) != 0) + if (cfgAddInt64(pCfg, "rpcQueueMemoryAllowed", tsRpcQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * 10L, INT64_MAX, + CFG_SCOPE_BOTH) != 0) return -1; if (cfgAddInt32(pCfg, "syncElectInterval", tsElectInterval, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER) != 0) return -1; - if (cfgAddInt32(pCfg, "syncHeartbeatInterval", tsHeartbeatInterval, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER) != 0) return -1; - if (cfgAddInt32(pCfg, "syncHeartbeatTimeout", tsHeartbeatTimeout, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddInt32(pCfg, "syncHeartbeatInterval", tsHeartbeatInterval, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER) != 0) + return -1; + if (cfgAddInt32(pCfg, "syncHeartbeatTimeout", tsHeartbeatTimeout, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER) != 0) + return -1; - if (cfgAddInt64(pCfg, "vndCommitMaxInterval", tsVndCommitMaxIntervalMs, 1000, 1000 * 60 * 60, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddInt64(pCfg, "vndCommitMaxInterval", tsVndCommitMaxIntervalMs, 1000, 1000 * 60 * 60, CFG_SCOPE_SERVER) != 0) + return -1; if (cfgAddInt64(pCfg, "mndSdbWriteDelta", tsMndSdbWriteDelta, 20, 10000, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddInt64(pCfg, "mndLogRetention", tsMndLogRetention, 500, 10000, CFG_SCOPE_SERVER) != 0) return -1; @@ -542,7 +562,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "uptimeInterval", tsUptimeInterval, 1, 100000, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddInt32(pCfg, "queryRsmaTolerance", tsQueryRsmaTolerance, 0, 900000, CFG_SCOPE_SERVER) != 0) return -1; - if (cfgAddInt64(pCfg, "walFsyncDataSizeLimit", tsWalFsyncDataSizeLimit, 100 * 1024 * 1024, INT64_MAX, CFG_SCOPE_SERVER) != 0) + if (cfgAddInt64(pCfg, "walFsyncDataSizeLimit", tsWalFsyncDataSizeLimit, 100 * 1024 * 1024, INT64_MAX, + CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddBool(pCfg, "udf", tsStartUdfd, CFG_SCOPE_SERVER) != 0) return -1; @@ -553,13 +574,16 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddInt64(pCfg, "streamBufferSize", tsStreamBufferSize, 0, INT64_MAX, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddInt64(pCfg, "checkpointInterval", tsCheckpointInterval, 0, INT64_MAX, CFG_SCOPE_SERVER) != 0) return -1; - if (cfgAddInt32(pCfg, "cacheLazyLoadThreshold", tsCacheLazyLoadThreshold, 0, 100000, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddInt32(pCfg, "cacheLazyLoadThreshold", tsCacheLazyLoadThreshold, 0, 100000, CFG_SCOPE_SERVER) != 0) + return -1; if (cfgAddBool(pCfg, "filterScalarMode", tsFilterScalarMode, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddInt32(pCfg, "keepTimeOffset", tsKeepTimeOffset, 0, 23, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddInt32(pCfg, "maxStreamBackendCache", tsMaxStreamBackendCache, 16, 1024, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddInt32(pCfg, "pqSortMemThreshold", tsPQSortMemThreshold, 1, 10240, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddString(pCfg, "s3BucketName", tsS3BucketName, CFG_SCOPE_SERVER) != 0) return -1; + GRANT_CFG_ADD; return 0; } @@ -908,7 +932,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tstrncpy(tsTelemServer, cfgGetItem(pCfg, "telemetryServer")->str, TSDB_FQDN_LEN); tsTelemPort = (uint16_t)cfgGetItem(pCfg, "telemetryPort")->i32; - tmqMaxTopicNum= cfgGetItem(pCfg, "tmqMaxTopicNum")->i32; + tmqMaxTopicNum = cfgGetItem(pCfg, "tmqMaxTopicNum")->i32; tsTransPullupInterval = cfgGetItem(pCfg, "transPullupInterval")->i32; tsMqRebalanceInterval = cfgGetItem(pCfg, "mqRebalanceInterval")->i32; @@ -948,6 +972,8 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tsMaxStreamBackendCache = cfgGetItem(pCfg, "maxStreamBackendCache")->i32; tsPQSortMemThreshold = cfgGetItem(pCfg, "pqSortMemThreshold")->i32; + tstrncpy(tsS3BucketName, cfgGetItem(pCfg, "s3BucketName")->str, TSDB_FQDN_LEN); + GRANT_CFG_GET; return 0; } @@ -1020,7 +1046,7 @@ int32_t taosApplyLocalCfg(SConfig *pCfg, char *name) { taosSetCoreDump(enableCore); } else if (strcasecmp("enableQueryHb", name) == 0) { tsEnableQueryHb = cfgGetItem(pCfg, "enableQueryHb")->bval; - } else if (strcasecmp("ttlChangeOnWrite", name) == 0) { + } else if (strcasecmp("ttlChangeOnWrite", name) == 0) { tsTtlChangeOnWrite = cfgGetItem(pCfg, "ttlChangeOnWrite")->bval; } break; @@ -1249,9 +1275,9 @@ int32_t taosApplyLocalCfg(SConfig *pCfg, char *name) { // tsSmlDataFormat = cfgGetItem(pCfg, "smlDataFormat")->bval; // } else if (strcasecmp("smlBatchSize", name) == 0) { // tsSmlBatchSize = cfgGetItem(pCfg, "smlBatchSize")->i32; - } else if(strcasecmp("smlTsDefaultName", name) == 0) { + } else if (strcasecmp("smlTsDefaultName", name) == 0) { tstrncpy(tsSmlTsDefaultName, cfgGetItem(pCfg, "smlTsDefaultName")->str, TSDB_COL_NAME_LEN); - } else if(strcasecmp("smlDot2Underline", name) == 0) { + } else if (strcasecmp("smlDot2Underline", name) == 0) { tsSmlDot2Underline = cfgGetItem(pCfg, "smlDot2Underline")->bval; } else if (strcasecmp("shellActivityTimer", name) == 0) { tsShellActivityTimer = cfgGetItem(pCfg, "shellActivityTimer")->i32; @@ -1272,6 +1298,8 @@ int32_t taosApplyLocalCfg(SConfig *pCfg, char *name) { taosGetFqdnPortFromEp(strlen(pFirstEpItem->str) == 0 ? defaultFirstEp : pFirstEpItem->str, &firstEp); snprintf(tsFirst, sizeof(tsFirst), "%s:%u", firstEp.fqdn, firstEp.port); cfgSetItem(pCfg, "firstEp", tsFirst, pFirstEpItem->stype); + } else if (strcasecmp("s3BucketName", name) == 0) { + tstrncpy(tsS3BucketName, cfgGetItem(pCfg, "s3BucketName")->str, TSDB_FQDN_LEN); } else if (strcasecmp("sDebugFlag", name) == 0) { sDebugFlag = cfgGetItem(pCfg, "sDebugFlag")->i32; } else if (strcasecmp("smaDebugFlag", name) == 0) { diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index 194ffa16f6..0612f924f5 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -8,6 +8,7 @@ set( "src/vnd/vnodeCommit.c" "src/vnd/vnodeQuery.c" "src/vnd/vnodeModule.c" + "src/vnd/vnodeCos.c" "src/vnd/vnodeSvr.c" "src/vnd/vnodeSync.c" "src/vnd/vnodeSnapshot.c" @@ -134,6 +135,11 @@ else() endif() endif() +find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) +find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) +find_library(MINIXML_LIBRARY mxml) +find_library(CURL_LIBRARY curl) + target_link_libraries( vnode PUBLIC os @@ -153,6 +159,13 @@ target_link_libraries( PUBLIC transport PUBLIC stream PUBLIC index + + # s3 + cos_c_sdk + ${APR_UTIL_LIBRARY} + ${APR_LIBRARY} + ${MINIXML_LIBRARY} + ${CURL_LIBRARY} ) IF (TD_GRANT) @@ -169,7 +182,20 @@ if(${BUILD_WITH_ROCKSDB}) add_definitions(-DUSE_ROCKSDB) endif(${BUILD_WITH_ROCKSDB}) - +# s3 +FIND_PROGRAM(APR_CONFIG_BIN NAMES apr-config apr-1-config PATHS /usr/bin /usr/local/bin /usr/local/apr/bin/) +IF (APR_CONFIG_BIN) + EXECUTE_PROCESS( + COMMAND ${APR_CONFIG_BIN} --includedir + OUTPUT_VARIABLE APR_INCLUDE_DIR + OUTPUT_STRIP_TRAILING_WHITESPACE + ) +ENDIF() +include_directories (${APR_INCLUDE_DIR}) +target_include_directories( + vnode + PUBLIC "${TD_SOURCE_DIR}/contrib/cos-c-sdk-v5/cos_c_sdk" + ) if(${BUILD_TEST}) add_subdirectory(test) diff --git a/source/dnode/vnode/src/inc/vndCos.h b/source/dnode/vnode/src/inc/vndCos.h new file mode 100644 index 0000000000..b8510213d7 --- /dev/null +++ b/source/dnode/vnode/src/inc/vndCos.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef _TD_VND_COS_H_ +#define _TD_VND_COS_H_ + +#include "vnd.h" + +#ifdef __cplusplus +extern "C" { +#endif + +extern int8_t tsS3Enabled; + +int32_t s3Init(); +void s3CleanUp(); +void s3PutObjectFromFile(const char *file, const char *object); +void s3DeleteObjects(const char *object_name[], int nobject); + +#ifdef __cplusplus +} +#endif + +#endif /*_TD_VND_COS_H_*/ diff --git a/source/dnode/vnode/src/tsdb/tsdbRetention.c b/source/dnode/vnode/src/tsdb/tsdbRetention.c index a4d5715083..ebe20c0e85 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRetention.c +++ b/source/dnode/vnode/src/tsdb/tsdbRetention.c @@ -15,6 +15,7 @@ #include "tsdb.h" #include "tsdbFS2.h" +#include "vndCos.h" typedef struct { STsdb *tsdb; @@ -41,6 +42,28 @@ static int32_t tsdbDoRemoveFileObject(SRTNer *rtner, const STFileObj *fobj) { return TARRAY2_APPEND(rtner->fopArr, op); } +static int32_t tsdbRemoveFileObjectS3(SRTNer *rtner, const STFileObj *fobj) { + int32_t code = 0, lino = 0; + + STFileOp op = { + .optype = TSDB_FOP_REMOVE, + .fid = fobj->f->fid, + .of = fobj->f[0], + }; + + code = TARRAY2_APPEND(rtner->fopArr, op); + TSDB_CHECK_CODE(code, lino, _exit); + + const char *object_name = taosDirEntryBaseName((char *)fobj->fname); + s3DeleteObjects(&object_name, 1); + +_exit: + if (code) { + TSDB_ERROR_LOG(TD_VID(rtner->tsdb->pVnode), lino, code); + } + return code; +} + static int32_t tsdbDoCopyFile(SRTNer *rtner, const STFileObj *from, const STFile *to) { int32_t code = 0; int32_t lino = 0; @@ -76,6 +99,33 @@ _exit: return code; } +static int32_t tsdbCopyFileS3(SRTNer *rtner, const STFileObj *from, const STFile *to) { + int32_t code = 0; + int32_t lino = 0; + + char fname[TSDB_FILENAME_LEN]; + TdFilePtr fdFrom = NULL; + TdFilePtr fdTo = NULL; + + tsdbTFileName(rtner->tsdb, to, fname); + + fdFrom = taosOpenFile(from->fname, TD_FILE_READ); + if (fdFrom == NULL) code = terrno; + TSDB_CHECK_CODE(code, lino, _exit); + + char *object_name = taosDirEntryBaseName(fname); + s3PutObjectFromFile(from->fname, object_name); + + taosCloseFile(&fdFrom); + +_exit: + if (code) { + TSDB_ERROR_LOG(TD_VID(rtner->tsdb->pVnode), lino, code); + taosCloseFile(&fdFrom); + } + return code; +} + static int32_t tsdbDoMigrateFileObj(SRTNer *rtner, const STFileObj *fobj, const SDiskID *did) { int32_t code = 0; int32_t lino = 0; @@ -123,6 +173,53 @@ _exit: return code; } +static int32_t tsdbMigrateDataFileS3(SRTNer *rtner, const STFileObj *fobj, const SDiskID *did) { + int32_t code = 0; + int32_t lino = 0; + STFileOp op = {0}; + + // remove old + op = (STFileOp){ + .optype = TSDB_FOP_REMOVE, + .fid = fobj->f->fid, + .of = fobj->f[0], + }; + + code = TARRAY2_APPEND(rtner->fopArr, op); + TSDB_CHECK_CODE(code, lino, _exit); + + // create new + op = (STFileOp){ + .optype = TSDB_FOP_CREATE, + .fid = fobj->f->fid, + .nf = + { + .type = fobj->f->type, + .did = did[0], + .fid = fobj->f->fid, + .cid = fobj->f->cid, + .size = fobj->f->size, + .stt[0] = + { + .level = fobj->f->stt[0].level, + }, + }, + }; + + code = TARRAY2_APPEND(rtner->fopArr, op); + TSDB_CHECK_CODE(code, lino, _exit); + + // do copy the file + code = tsdbCopyFileS3(rtner, fobj, &op.nf); + TSDB_CHECK_CODE(code, lino, _exit); + +_exit: + if (code) { + TSDB_ERROR_LOG(TD_VID(rtner->tsdb->pVnode), lino, code); + } + return code; +} + typedef struct { STsdb *tsdb; int32_t sync; @@ -201,8 +298,14 @@ static int32_t tsdbDoRetention2(void *arg) { for (int32_t ftype = 0; (ftype < TSDB_FTYPE_MAX) && (fobj = rtner->ctx->fset->farr[ftype], 1); ++ftype) { if (fobj == NULL) continue; - code = tsdbDoRemoveFileObject(rtner, fobj); - TSDB_CHECK_CODE(code, lino, _exit); + int32_t nlevel = tfsGetLevel(rtner->tsdb->pVnode->pTfs); + if (tsS3Enabled && nlevel > 1 && TSDB_FTYPE_DATA == ftype && fobj->f->did.level == nlevel - 1) { + code = tsdbRemoveFileObjectS3(rtner, fobj); + TSDB_CHECK_CODE(code, lino, _exit); + } else { + code = tsdbDoRemoveFileObject(rtner, fobj); + TSDB_CHECK_CODE(code, lino, _exit); + } } SSttLvl *lvl; @@ -228,8 +331,15 @@ static int32_t tsdbDoRetention2(void *arg) { if (fobj == NULL) continue; if (fobj->f->did.level == did.level) continue; - code = tsdbDoMigrateFileObj(rtner, fobj, &did); - TSDB_CHECK_CODE(code, lino, _exit); + + int32_t nlevel = tfsGetLevel(rtner->tsdb->pVnode->pTfs); + if (tsS3Enabled && nlevel > 1 && TSDB_FTYPE_DATA == ftype && did.level == nlevel - 1) { + code = tsdbMigrateDataFileS3(rtner, fobj, &did); + TSDB_CHECK_CODE(code, lino, _exit); + } else { + code = tsdbDoMigrateFileObj(rtner, fobj, &did); + TSDB_CHECK_CODE(code, lino, _exit); + } } // stt @@ -281,4 +391,4 @@ int32_t tsdbRetention(STsdb *tsdb, int64_t now, int32_t sync) { tsdbFreeRtnArg(arg); } return code; -} \ No newline at end of file +} diff --git a/source/dnode/vnode/src/vnd/vnodeCos.c b/source/dnode/vnode/src/vnd/vnodeCos.c new file mode 100644 index 0000000000..1507df7074 --- /dev/null +++ b/source/dnode/vnode/src/vnd/vnodeCos.c @@ -0,0 +1,114 @@ +#define ALLOW_FORBID_FUNC + +#include "vndCos.h" + +#include "cos_api.h" +#include "cos_http_io.h" +#include "cos_log.h" + +extern char tsS3Endpoint[]; +extern char tsS3AcessKeyId[]; +extern char tsS3AcessKeySecret[]; +extern char tsS3BucketName[]; +extern char tsS3AppId[]; + +int32_t s3Init() { + if (cos_http_io_initialize(NULL, 0) != COSE_OK) { + return -1; + } + + // set log level, default COS_LOG_WARN + cos_log_set_level(COS_LOG_WARN); + + // set log output, default stderr + cos_log_set_output(NULL); + + return 0; +} + +void s3CleanUp() { cos_http_io_deinitialize(); } + +static void log_status(cos_status_t *s) { + cos_warn_log("status->code: %d", s->code); + if (s->error_code) cos_warn_log("status->error_code: %s", s->error_code); + if (s->error_msg) cos_warn_log("status->error_msg: %s", s->error_msg); + if (s->req_id) cos_warn_log("status->req_id: %s", s->req_id); +} + +static void s3InitRequestOptions(cos_request_options_t *options, int is_cname) { + options->config = cos_config_create(options->pool); + + cos_config_t *config = options->config; + + cos_str_set(&config->endpoint, tsS3Endpoint); + cos_str_set(&config->access_key_id, tsS3AcessKeyId); + cos_str_set(&config->access_key_secret, tsS3AcessKeySecret); + cos_str_set(&config->appid, tsS3AppId); + + config->is_cname = is_cname; + + options->ctl = cos_http_controller_create(options->pool, 0); +} + +void s3PutObjectFromFile(const char *file_str, const char *object_str) { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket, object, file; + cos_table_t *resp_headers; + int traffic_limit = 0; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + s3InitRequestOptions(options, is_cname); + cos_table_t *headers = NULL; + if (traffic_limit) { + // 限速值设置范围为819200 - 838860800,即100KB/s - 100MB/s,如果超出该范围将返回400错误 + headers = cos_table_make(p, 1); + cos_table_add_int(headers, "x-cos-traffic-limit", 819200); + } + cos_str_set(&bucket, tsS3BucketName); + cos_str_set(&file, file_str); + cos_str_set(&object, object_str); + s = cos_put_object_from_file(options, &bucket, &object, &file, headers, &resp_headers); + log_status(s); + + cos_pool_destroy(p); +} + +void s3DeleteObjects(const char *object_name[], int nobject) { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_string_t bucket; + cos_table_t *resp_headers = NULL; + cos_request_options_t *options = NULL; + cos_list_t object_list; + cos_list_t deleted_object_list; + int is_quiet = COS_TRUE; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + s3InitRequestOptions(options, is_cname); + cos_str_set(&bucket, tsS3BucketName); + + cos_list_init(&object_list); + cos_list_init(&deleted_object_list); + + for (int i = 0; i < nobject; ++i) { + cos_object_key_t *content = cos_create_cos_object_key(p); + cos_str_set(&content->key, object_name[i]); + cos_list_add_tail(&content->node, &object_list); + } + + cos_status_t *s = cos_delete_objects(options, &bucket, &object_list, is_quiet, &resp_headers, &deleted_object_list); + log_status(s); + + cos_pool_destroy(p); + + if (cos_status_is_ok(s)) { + cos_warn_log("delete objects succeeded\n"); + } else { + cos_warn_log("delete objects failed\n"); + } +} diff --git a/source/dnode/vnode/src/vnd/vnodeModule.c b/source/dnode/vnode/src/vnd/vnodeModule.c index 74a8d14a86..6ccce5c9d7 100644 --- a/source/dnode/vnode/src/vnd/vnodeModule.c +++ b/source/dnode/vnode/src/vnd/vnodeModule.c @@ -14,6 +14,7 @@ */ #include "vnd.h" +#include "vndCos.h" typedef struct SVnodeTask SVnodeTask; struct SVnodeTask { @@ -81,6 +82,9 @@ int vnodeInit(int nthreads) { if (tqInit() < 0) { return -1; } + if (s3Init() < 0) { + return -1; + } return 0; } @@ -112,6 +116,7 @@ void vnodeCleanup() { walCleanUp(); tqCleanUp(); smaCleanUp(); + s3CleanUp(); } int vnodeScheduleTaskEx(int tpid, int (*execute)(void*), void* arg) { From ebd09ca532aecdd448dfe6693740270d55150f44 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Fri, 4 Aug 2023 13:44:17 +0800 Subject: [PATCH 010/122] tsdb/write: use keep1 as minKey instead of keep2 --- source/dnode/vnode/src/tsdb/tsdbWrite.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbWrite.c b/source/dnode/vnode/src/tsdb/tsdbWrite.c index 2dbac956ed..6e89b47adc 100644 --- a/source/dnode/vnode/src/tsdb/tsdbWrite.c +++ b/source/dnode/vnode/src/tsdb/tsdbWrite.c @@ -76,7 +76,7 @@ int tsdbScanAndConvertSubmitMsg(STsdb *pTsdb, SSubmitReq2 *pMsg) { int32_t code = 0; STsdbKeepCfg *pCfg = &pTsdb->keepCfg; TSKEY now = taosGetTimestamp(pCfg->precision); - TSKEY minKey = now - tsTickPerMin[pCfg->precision] * pCfg->keep2; + TSKEY minKey = now - tsTickPerMin[pCfg->precision] * pCfg->keep1; TSKEY maxKey = tsMaxKeyByPrecision[pCfg->precision]; int32_t size = taosArrayGetSize(pMsg->aSubmitTbData); @@ -107,4 +107,4 @@ int tsdbScanAndConvertSubmitMsg(STsdb *pTsdb, SSubmitReq2 *pMsg) { _exit: return code; -} \ No newline at end of file +} From 973bb073a2ec804847531a83e0778aa9d8063328 Mon Sep 17 00:00:00 2001 From: slzhou Date: Thu, 27 Jul 2023 09:07:13 +0800 Subject: [PATCH 011/122] enhance: subquery can use expr primary key +/- value as primary key --- source/libs/parser/src/parTranslater.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 8ce68a5c8c..554dc7cce8 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -821,7 +821,19 @@ static bool isPrimaryKeyImpl(SNode* pExpr) { FUNCTION_TYPE_IROWTS == pFunc->funcType) { return true; } - } + } else if (QUERY_NODE_OPERATOR == nodeType(pExpr)) { + SOperatorNode* pOper = (SOperatorNode*)pExpr; + if (OP_TYPE_ADD != pOper->opType && OP_TYPE_SUB != pOper->opType) { + return false; + } + if (!isPrimaryKeyImpl(pOper->pLeft)) { + return false; + } + if (QUERY_NODE_VALUE != nodeType(pOper->pRight)) { + return false; + } + return true; + } return false; } From d3a9429c3b5ad4c69bbdec2e305b824d511d80ea Mon Sep 17 00:00:00 2001 From: slzhou Date: Fri, 4 Aug 2023 14:17:18 +0800 Subject: [PATCH 012/122] fix: add test case --- tests/parallel_test/cases.task | 1 + tests/script/tsim/query/join_pk.sim | 42 +++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+) create mode 100644 tests/script/tsim/query/join_pk.sim diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 89572d1c06..1883a50e59 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -954,6 +954,7 @@ ,,n,script,./test.sh -f tsim/query/udfpy.sim ,,y,script,./test.sh -f tsim/query/udf_with_const.sim ,,y,script,./test.sh -f tsim/query/join_interval.sim +,,y,script,./test.sh -f tsim/query/join_pk.sim ,,y,script,./test.sh -f tsim/query/unionall_as_table.sim ,,y,script,./test.sh -f tsim/query/multi_order_by.sim ,,y,script,./test.sh -f tsim/query/sys_tbname.sim diff --git a/tests/script/tsim/query/join_pk.sim b/tests/script/tsim/query/join_pk.sim new file mode 100644 index 0000000000..66bb20da24 --- /dev/null +++ b/tests/script/tsim/query/join_pk.sim @@ -0,0 +1,42 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sql connect + +sql create database test; +sql use test; +sql create table st(ts timestamp, f int) tags(t int); +sql insert into ct1 using st tags(1) values(now, 0)(now+1s, 1) +sql insert into ct2 using st tags(2) values(now+2s, 2)(now+3s, 3) +sql select * from (select _wstart - 1s as ts, count(*) as num1 from st interval(1s)) as t1 inner join (select _wstart as ts, count(*) as num2 from st interval(1s)) as t2 on t1.ts = t2.ts + +if $rows != 3 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data11 != 1 then + return -1 +endi + +if $data21 != 1 then + return -1 +endi +if $data03 != 1 then + return -1 +endi + +if $data13 != 1 then + return -1 +endi +if $data23 != 1 then + return -1 +endi +sql select * from (select _wstart - 1d as ts, count(*) as num1 from st interval(1s)) as t1 inner join (select _wstart as ts, count(*) as num2 from st interval(1s)) as t2 on t1.ts = t2.ts + +sql select * from (select _wstart + 1a as ts, count(*) as num1 from st interval(1s)) as t1 inner join (select _wstart as ts, count(*) as num2 from st interval(1s)) as t2 on t1.ts = t2.ts + +sql_error select * from (select _wstart * 3 as ts, count(*) as num1 from st interval(1s)) as t1 inner join (select _wstart as ts, count(*) as num2 from st interval(1s)) as t2 on t1.ts = t2.ts +#system sh/exec.sh -n dnode1 -s stop -x SIGINT + From fba43e17480b3423bae78c5e9ca638449bf58efb Mon Sep 17 00:00:00 2001 From: wangjiaming0909 <604227650@qq.com> Date: Thu, 3 Aug 2023 18:05:52 +0800 Subject: [PATCH 013/122] feature: optimize interval with limit --- source/libs/executor/inc/executorInt.h | 9 + source/libs/executor/src/executil.c | 3 +- source/libs/executor/src/operator.c | 1 - source/libs/executor/src/scanoperator.c | 47 ++-- source/libs/executor/src/timewindowoperator.c | 92 +++++- source/libs/planner/src/planLogicCreater.c | 1 - source/libs/planner/src/planOptimizer.c | 126 +++++++-- source/libs/planner/src/planSpliter.c | 12 + tests/parallel_test/cases.task | 1 + .../tsim/query/r/explain_tsorder.result | 212 +++++++------- .../system-test/2-query/interval_limit_opt.py | 266 ++++++++++++++++++ 11 files changed, 607 insertions(+), 163 deletions(-) create mode 100644 tests/system-test/2-query/interval_limit_opt.py diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h index 5baf0978cd..1bff9fce9e 100644 --- a/source/libs/executor/inc/executorInt.h +++ b/source/libs/executor/inc/executorInt.h @@ -25,6 +25,7 @@ extern "C" { #include "tsort.h" #include "ttszip.h" #include "tvariant.h" +#include "theap.h" #include "dataSinkMgt.h" #include "executil.h" @@ -418,6 +419,14 @@ typedef struct SIntervalAggOperatorInfo { EOPTR_EXEC_MODEL execModel; // operator execution model [batch model|stream model] STimeWindowAggSupp twAggSup; SArray* pPrevValues; // SArray used to keep the previous not null value for interpolation. + // for limit optimization + bool limited; + int64_t limit; + bool slimited; + int64_t slimit; + uint64_t curGroupId; // initialize to UINT64_MAX + uint64_t handledGroupNum; + BoundedQueue* pBQ; } SIntervalAggOperatorInfo; typedef struct SMergeAlignedIntervalAggOperatorInfo { diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index e1bf4e7cb0..aa0c7945b0 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -2118,8 +2118,9 @@ int32_t buildGroupIdMapForAllTables(STableListInfo* pTableListInfo, SReadHandle* if (code != TSDB_CODE_SUCCESS) { return code; } + if (pScanNode->groupOrderScan) pTableListInfo->numOfOuputGroups = taosArrayGetSize(pTableListInfo->pTableList); - if (groupSort) { + if (groupSort || pScanNode->groupOrderScan) { code = sortTableGroup(pTableListInfo); } } diff --git a/source/libs/executor/src/operator.c b/source/libs/executor/src/operator.c index 2db5ea2f1e..8ddcc8fd15 100644 --- a/source/libs/executor/src/operator.c +++ b/source/libs/executor/src/operator.c @@ -275,7 +275,6 @@ SOperatorInfo* createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SR SNode* pTagIndexCond, const char* pUser, const char* dbname) { int32_t type = nodeType(pPhyNode); const char* idstr = GET_TASKID(pTaskInfo); - if (pPhyNode->pChildren == NULL || LIST_LENGTH(pPhyNode->pChildren) == 0) { SOperatorInfo* pOperator = NULL; if (QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN == type) { diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 6c8d9ed59f..b15cf56c3d 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -848,30 +848,29 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) { return result; } - if ((++pInfo->currentGroupId) >= tableListGetOutputGroups(pInfo->base.pTableListInfo)) { - setOperatorCompleted(pOperator); - return NULL; + while (1) { + if ((++pInfo->currentGroupId) >= tableListGetOutputGroups(pInfo->base.pTableListInfo)) { + setOperatorCompleted(pOperator); + return NULL; + } + + // reset value for the next group data output + pOperator->status = OP_OPENED; + resetLimitInfoForNextGroup(&pInfo->base.limitInfo); + + int32_t num = 0; + STableKeyInfo* pList = NULL; + tableListGetGroupList(pInfo->base.pTableListInfo, pInfo->currentGroupId, &pList, &num); + + pAPI->tsdReader.tsdSetQueryTableList(pInfo->base.dataReader, pList, num); + pAPI->tsdReader.tsdReaderResetStatus(pInfo->base.dataReader, &pInfo->base.cond); + pInfo->scanTimes = 0; + + result = doGroupedTableScan(pOperator); + if (result != NULL) { + return result; + } } - - // reset value for the next group data output - pOperator->status = OP_OPENED; - resetLimitInfoForNextGroup(&pInfo->base.limitInfo); - - int32_t num = 0; - STableKeyInfo* pList = NULL; - tableListGetGroupList(pInfo->base.pTableListInfo, pInfo->currentGroupId, &pList, &num); - - pAPI->tsdReader.tsdSetQueryTableList(pInfo->base.dataReader, pList, num); - pAPI->tsdReader.tsdReaderResetStatus(pInfo->base.dataReader, &pInfo->base.cond); - pInfo->scanTimes = 0; - - result = doGroupedTableScan(pOperator); - if (result != NULL) { - return result; - } - - setOperatorCompleted(pOperator); - return NULL; } } @@ -3551,4 +3550,4 @@ static void destoryTableCountScanOperator(void* param) { taosArrayDestroy(pTableCountScanInfo->stbUidList); taosMemoryFreeClear(param); -} \ No newline at end of file +} diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 0a46def23d..4f8a3acd15 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -876,7 +876,67 @@ bool needDeleteWindowBuf(STimeWindow* pWin, STimeWindowAggSupp* pTwSup) { return pTwSup->maxTs != INT64_MIN && pWin->ekey < pTwSup->maxTs - pTwSup->deleteMark; } -static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResultRowInfo, SSDataBlock* pBlock, +static bool tsKeyCompFn(void* l, void* r, void* param) { + TSKEY* lTS = (TSKEY*)l; + TSKEY* rTS = (TSKEY*)r; + SIntervalAggOperatorInfo* pInfo = param; + return pInfo->binfo.outputTsOrder == ORDER_ASC ? *lTS < *rTS : *lTS > *rTS; +} + +static bool isCalculatedWin(SIntervalAggOperatorInfo* pInfo, const STimeWindow* win, uint64_t tableGroupId) { + char keyBuf[sizeof(TSKEY) + sizeof(uint64_t)] = {0}; + SET_RES_WINDOW_KEY(keyBuf, (char*)&win->skey, sizeof(TSKEY), tableGroupId); + return tSimpleHashGet(pInfo->aggSup.pResultRowHashTable, keyBuf, GET_RES_WINDOW_KEY_LEN(sizeof(TSKEY))) != NULL; +} + +/** + * @brief check if cur window should be filtered out by limit info + * @retval true if should be filtered out + * @retval false if not filtering out + * @note If no limit info, we skip filtering. + * If input/output ts order mismatch, we skip filtering too. + * eg. input ts order: desc, and output ts order: asc, limit: 10 + * IntervalOperator should output the first 10 windows, however, we can't find the first 10 windows until we scan + * every tuple in every block. + * And the boundedQueue keeps refreshing all records with smaller ts key. + */ +static bool filterWindowWithLimit(SIntervalAggOperatorInfo* pOperatorInfo, STimeWindow* win, uint64_t groupId) { + if (!pOperatorInfo->limited // if no limit info, no filter will be applied + || pOperatorInfo->binfo.inputTsOrder != + pOperatorInfo->binfo.outputTsOrder // if input/output ts order mismatch, no filter + ) { + return false; + } + if (pOperatorInfo->limit == 0) return true; + + if (pOperatorInfo->pBQ == NULL) { + pOperatorInfo->pBQ = createBoundedQueue(pOperatorInfo->limit - 1, tsKeyCompFn, taosMemoryFree, pOperatorInfo); + } + + bool shouldFilter = false; + // if BQ has been full, compare it with top of BQ + if (taosBQSize(pOperatorInfo->pBQ) == taosBQMaxSize(pOperatorInfo->pBQ) + 1) { + PriorityQueueNode* top = taosBQTop(pOperatorInfo->pBQ); + shouldFilter = tsKeyCompFn(top->data, &win->skey, pOperatorInfo); + } + if (shouldFilter) { + return true; + } else if (isCalculatedWin(pOperatorInfo, win, groupId)) { + return false; + } + + // cur win not been filtered out and not been pushed into BQ yet, push it into BQ + PriorityQueueNode node = {.data = taosMemoryMalloc(sizeof(TSKEY))}; + *((TSKEY*)node.data) = win->skey; + + if (NULL == taosBQPush(pOperatorInfo->pBQ, &node)) { + taosMemoryFree(node.data); + return true; + } + return false; +} + +static bool hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResultRowInfo, SSDataBlock* pBlock, int32_t scanFlag) { SIntervalAggOperatorInfo* pInfo = (SIntervalAggOperatorInfo*)pOperatorInfo->info; @@ -891,8 +951,21 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul TSKEY ts = getStartTsKey(&pBlock->info.window, tsCols); SResultRow* pResult = NULL; + if (tableGroupId != pInfo->curGroupId) { + pInfo->handledGroupNum += 1; + if (pInfo->slimited && pInfo->handledGroupNum > pInfo->slimit) { + return true; + } else { + pInfo->curGroupId = tableGroupId; + destroyBoundedQueue(pInfo->pBQ); + pInfo->pBQ = NULL; + } + } + STimeWindow win = getActiveTimeWindow(pInfo->aggSup.pResultBuf, pResultRowInfo, ts, &pInfo->interval, pInfo->binfo.inputTsOrder); + if (filterWindowWithLimit(pInfo, &win, tableGroupId)) return false; + int32_t ret = setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo); if (ret != TSDB_CODE_SUCCESS || pResult == NULL) { @@ -929,7 +1002,7 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul while (1) { int32_t prevEndPos = forwardRows - 1 + startPos; startPos = getNextQualifiedWindow(&pInfo->interval, &nextWin, &pBlock->info, tsCols, prevEndPos, pInfo->binfo.inputTsOrder); - if (startPos < 0) { + if (startPos < 0 || filterWindowWithLimit(pInfo, &nextWin, tableGroupId)) { break; } // null data, failed to allocate more memory buffer @@ -963,6 +1036,7 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul if (pInfo->timeWindowInterpo) { saveDataBlockLastRow(pInfo->pPrevValues, pBlock, pInfo->pInterpCols); } + return false; } void doCloseWindow(SResultRowInfo* pResultRowInfo, const SIntervalAggOperatorInfo* pInfo, SResultRow* pResult) { @@ -1043,7 +1117,7 @@ static int32_t doOpenIntervalAgg(SOperatorInfo* pOperator) { // the pDataBlock are always the same one, no need to call this again setInputDataBlock(pSup, pBlock, pInfo->binfo.inputTsOrder, scanFlag, true); - hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, scanFlag); + if (hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, scanFlag)) break; } initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, pInfo->binfo.outputTsOrder); @@ -1495,6 +1569,7 @@ void destroyIntervalOperatorInfo(void* param) { cleanupGroupResInfo(&pInfo->groupResInfo); colDataDestroy(&pInfo->twAggSup.timeWindowData); + destroyBoundedQueue(pInfo->pBQ); taosMemoryFreeClear(param); } @@ -1658,6 +1733,17 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SIntervalPh pInfo->interval = interval; pInfo->twAggSup = as; pInfo->binfo.mergeResultBlock = pPhyNode->window.mergeDataBlock; + if (pPhyNode->window.node.pLimit) { + SLimitNode* pLimit = (SLimitNode*)pPhyNode->window.node.pLimit; + pInfo->limited = true; + pInfo->limit = pLimit->limit + pLimit->offset; + } + if (pPhyNode->window.node.pSlimit) { + SLimitNode* pLimit = (SLimitNode*)pPhyNode->window.node.pSlimit; + pInfo->slimited = true; + pInfo->slimit = pLimit->limit + pLimit->offset; + pInfo->curGroupId = UINT64_MAX; + } if (pPhyNode->window.pExprs != NULL) { int32_t numOfScalar = 0; diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c index 713f12e229..854f3fc4c6 100644 --- a/source/libs/planner/src/planLogicCreater.c +++ b/source/libs/planner/src/planLogicCreater.c @@ -847,7 +847,6 @@ static int32_t createWindowLogicNodeByInterval(SLogicPlanContext* pCxt, SInterva : (pSelect->hasTimeLineFunc ? getRequireDataOrder(true, pSelect) : DATA_ORDER_LEVEL_IN_BLOCK); pWindow->node.resultDataOrder = pCxt->pPlanCxt->streamQuery ? DATA_ORDER_LEVEL_GLOBAL : getRequireDataOrder(true, pSelect); - pWindow->pTspk = nodesCloneNode(pInterval->pCol); if (NULL == pWindow->pTspk) { nodesDestroyNode((SNode*)pWindow); diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c index 32721d8060..b2f71adbd7 100644 --- a/source/libs/planner/src/planOptimizer.c +++ b/source/libs/planner/src/planOptimizer.c @@ -368,7 +368,7 @@ static void scanPathOptSetGroupOrderScan(SScanLogicNode* pScan) { if (pScan->node.pParent && nodeType(pScan->node.pParent) == QUERY_NODE_LOGIC_PLAN_AGG) { SAggLogicNode* pAgg = (SAggLogicNode*)pScan->node.pParent; - bool withSlimit = pAgg->node.pSlimit != NULL || (pAgg->node.pParent && pAgg->node.pParent->pSlimit); + bool withSlimit = pAgg->node.pSlimit != NULL || (pAgg->node.pParent && pAgg->node.pParent->pSlimit); if (withSlimit && isPartTableAgg(pAgg)) { pScan->groupOrderScan = pAgg->node.forceCreateNonBlockingOptr = true; } @@ -1546,11 +1546,33 @@ static bool planOptNodeListHasTbname(SNodeList* pKeys) { } static bool partTagsIsOptimizableNode(SLogicNode* pNode) { - return ((QUERY_NODE_LOGIC_PLAN_PARTITION == nodeType(pNode) || - (QUERY_NODE_LOGIC_PLAN_AGG == nodeType(pNode) && NULL != ((SAggLogicNode*)pNode)->pGroupKeys && - NULL != ((SAggLogicNode*)pNode)->pAggFuncs)) && - 1 == LIST_LENGTH(pNode->pChildren) && - QUERY_NODE_LOGIC_PLAN_SCAN == nodeType(nodesListGetNode(pNode->pChildren, 0))); + bool ret = 1 == LIST_LENGTH(pNode->pChildren) && + QUERY_NODE_LOGIC_PLAN_SCAN == nodeType(nodesListGetNode(pNode->pChildren, 0)); + if (!ret) return ret; + switch (nodeType(pNode)) { + case QUERY_NODE_LOGIC_PLAN_PARTITION: { + if (pNode->pParent && nodeType(pNode->pParent) == QUERY_NODE_LOGIC_PLAN_WINDOW) { + SWindowLogicNode* pWindow = (SWindowLogicNode*)pNode->pParent; + if (pWindow->winType == WINDOW_TYPE_INTERVAL) { + // if interval has slimit, we push down partition node to scan, and scan will set groupOrderScan to true + // we want to skip groups of blocks after slimit satisfied + // if interval only has limit, we do not push down partition node to scan + // we want to get grouped output from partition node and make use of limit + // if no slimit and no limit, we push down partition node and groupOrderScan is false, cause we do not need + // group ordered output + if (!pWindow->node.pSlimit && pWindow->node.pLimit) ret = false; + } + } + } break; + case QUERY_NODE_LOGIC_PLAN_AGG: { + SAggLogicNode* pAgg = (SAggLogicNode*)pNode; + ret = pAgg->pGroupKeys && pAgg->pAggFuncs; + } break; + default: + ret = false; + break; + } + return ret; } static SNodeList* partTagsGetPartKeys(SLogicNode* pNode) { @@ -1691,6 +1713,8 @@ static int32_t partTagsOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogicSub scanPathOptSetGroupOrderScan(pScan); pParent->hasGroupKeyOptimized = true; } + if (pNode->pParent->pSlimit) + pScan->groupOrderScan = true; NODES_CLEAR_LIST(pNode->pChildren); nodesDestroyNode((SNode*)pNode); @@ -2644,23 +2668,79 @@ static int32_t tagScanOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogicSubp } static bool pushDownLimitOptShouldBeOptimized(SLogicNode* pNode) { - if (NULL == pNode->pLimit || 1 != LIST_LENGTH(pNode->pChildren)) { + if ((NULL == pNode->pLimit && pNode->pSlimit == NULL) || 1 != LIST_LENGTH(pNode->pChildren)) { return false; } SLogicNode* pChild = (SLogicNode*)nodesListGetNode(pNode->pChildren, 0); - // push down to sort node - if (QUERY_NODE_LOGIC_PLAN_SORT == nodeType(pChild)) { - // if we have pushed down, we skip it - if (pChild->pLimit) return false; - } else if (QUERY_NODE_LOGIC_PLAN_SCAN != nodeType(pChild) || QUERY_NODE_LOGIC_PLAN_SORT == nodeType(pNode)) { - // push down to table scan node - // if pNode is sortNode, we skip push down limit info to table scan node - return false; - } + if (pChild->pLimit || pChild->pSlimit) return false; return true; } +static void swapLimit(SLogicNode* pParent, SLogicNode* pChild) { + pChild->pLimit = pParent->pLimit; + pParent->pLimit = NULL; +} + +static void cloneLimit(SLogicNode* pParent, SLogicNode* pChild) { + SLimitNode* pLimit = NULL; + if (pParent->pLimit) { + pChild->pLimit = nodesCloneNode(pParent->pLimit); + pLimit = (SLimitNode*)pChild->pLimit; + pLimit->limit += pLimit->offset; + pLimit->offset = 0; + } + + if (pParent->pSlimit) { + pChild->pSlimit = nodesCloneNode(pParent->pSlimit); + pLimit = (SLimitNode*)pChild->pSlimit; + pLimit->limit += pLimit->offset; + pLimit->offset = 0; + } +} + +static bool pushDownLimitHow(SLogicNode* pNodeWithLimit, SLogicNode* pNodeLimitPushTo); +static bool pushDownLimitTo(SLogicNode* pNodeWithLimit, SLogicNode* pNodeLimitPushTo) { + switch (nodeType(pNodeLimitPushTo)) { + case QUERY_NODE_LOGIC_PLAN_WINDOW: { + SWindowLogicNode* pWindow = (SWindowLogicNode*)pNodeLimitPushTo; + if (pWindow->winType != WINDOW_TYPE_INTERVAL) break; + cloneLimit(pNodeWithLimit, pNodeLimitPushTo); + return true; + } + case QUERY_NODE_LOGIC_PLAN_FILL: + case QUERY_NODE_LOGIC_PLAN_SORT: { + cloneLimit(pNodeWithLimit, pNodeLimitPushTo); + SNode* pChild = NULL; + FOREACH(pChild, pNodeLimitPushTo->pChildren) { pushDownLimitHow(pNodeLimitPushTo, (SLogicNode*)pChild); } + return true; + } + case QUERY_NODE_LOGIC_PLAN_SCAN: + if (nodeType(pNodeWithLimit) == QUERY_NODE_LOGIC_PLAN_PROJECT && pNodeWithLimit->pLimit) { + swapLimit(pNodeWithLimit, pNodeLimitPushTo); + return true; + } + default: + break; + } + return false; +} + +static bool pushDownLimitHow(SLogicNode* pNodeWithLimit, SLogicNode* pNodeLimitPushTo) { + switch (nodeType(pNodeWithLimit)) { + case QUERY_NODE_LOGIC_PLAN_PROJECT: + case QUERY_NODE_LOGIC_PLAN_FILL: + return pushDownLimitTo(pNodeWithLimit, pNodeLimitPushTo); + case QUERY_NODE_LOGIC_PLAN_SORT: { + SSortLogicNode* pSort = (SSortLogicNode*)pNodeWithLimit; + if (sortPriKeyOptIsPriKeyOrderBy(pSort->pSortKeys)) return pushDownLimitTo(pNodeWithLimit, pNodeLimitPushTo); + } + default: + break; + } + return false; +} + static int32_t pushDownLimitOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogicSubplan) { SLogicNode* pNode = optFindPossibleNode(pLogicSubplan->pNode, pushDownLimitOptShouldBeOptimized); if (NULL == pNode) { @@ -2669,17 +2749,9 @@ static int32_t pushDownLimitOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLog SLogicNode* pChild = (SLogicNode*)nodesListGetNode(pNode->pChildren, 0); nodesDestroyNode(pChild->pLimit); - if (QUERY_NODE_LOGIC_PLAN_SORT == nodeType(pChild)) { - pChild->pLimit = nodesCloneNode(pNode->pLimit); - SLimitNode* pLimit = (SLimitNode*)pChild->pLimit; - pLimit->limit += pLimit->offset; - pLimit->offset = 0; - } else { - pChild->pLimit = pNode->pLimit; - pNode->pLimit = NULL; + if (pushDownLimitHow(pNode, pChild)) { + pCxt->optimized = true; } - pCxt->optimized = true; - return TSDB_CODE_SUCCESS; } @@ -2980,6 +3052,7 @@ static const SOptimizeRule optimizeRuleSet[] = { {.pName = "sortNonPriKeyOptimize", .optimizeFunc = sortNonPriKeyOptimize}, {.pName = "SortPrimaryKey", .optimizeFunc = sortPrimaryKeyOptimize}, {.pName = "SmaIndex", .optimizeFunc = smaIndexOptimize}, + {.pName = "PushDownLimit", .optimizeFunc = pushDownLimitOptimize}, {.pName = "PartitionTags", .optimizeFunc = partTagsOptimize}, {.pName = "MergeProjects", .optimizeFunc = mergeProjectsOptimize}, {.pName = "EliminateProject", .optimizeFunc = eliminateProjOptimize}, @@ -2988,7 +3061,6 @@ static const SOptimizeRule optimizeRuleSet[] = { {.pName = "RewriteUnique", .optimizeFunc = rewriteUniqueOptimize}, {.pName = "LastRowScan", .optimizeFunc = lastRowScanOptimize}, {.pName = "TagScan", .optimizeFunc = tagScanOptimize}, - {.pName = "PushDownLimit", .optimizeFunc = pushDownLimitOptimize}, {.pName = "TableCountScan", .optimizeFunc = tableCountScanOptimize}, }; // clang-format on diff --git a/source/libs/planner/src/planSpliter.c b/source/libs/planner/src/planSpliter.c index 84a486649e..3f6c73b4e5 100644 --- a/source/libs/planner/src/planSpliter.c +++ b/source/libs/planner/src/planSpliter.c @@ -498,6 +498,18 @@ static int32_t stbSplRewriteFromMergeNode(SMergeLogicNode* pMerge, SLogicNode* p } break; } + case QUERY_NODE_LOGIC_PLAN_WINDOW: { + SWindowLogicNode* pWindow = (SWindowLogicNode*)pNode; + if (pMerge->node.pLimit) { + nodesDestroyNode(pMerge->node.pLimit); + pMerge->node.pLimit = NULL; + } + if (pMerge->node.pSlimit) { + nodesDestroyNode(pMerge->node.pSlimit); + pMerge->node.pSlimit = NULL; + } + break; + } default: break; } diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 9ec12197ec..dc72c67402 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -25,6 +25,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery_math.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery_time.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery_26.py -Q 4 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/interval_limit_opt.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqShow.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqDropStb.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/subscribeStb0.py diff --git a/tests/script/tsim/query/r/explain_tsorder.result b/tests/script/tsim/query/r/explain_tsorder.result index b69a77ada5..95f8c38fbf 100644 --- a/tests/script/tsim/query/r/explain_tsorder.result +++ b/tests/script/tsim/query/r/explain_tsorder.result @@ -1603,58 +1603,58 @@ QUERY_PLAN: Time Range: [-9223372036854775808, taos> select _wstart, last(ts), avg(c2) from meters interval(10s) order by _wstart desc; _wstart | last(ts) | avg(c2) | ================================================================================ - 2022-05-24 00:01:00.000 | 2022-05-24 00:01:08.000 | 210.000000000 | - 2022-05-23 00:01:00.000 | 2022-05-23 00:01:08.000 | 116.000000000 | - 2022-05-22 00:01:00.000 | 2022-05-22 00:01:08.000 | 196.000000000 | - 2022-05-21 00:01:00.000 | 2022-05-21 00:01:08.000 | 11.000000000 | - 2022-05-20 00:01:00.000 | 2022-05-20 00:01:08.000 | 120.000000000 | - 2022-05-19 00:01:00.000 | 2022-05-19 00:01:08.000 | 243.000000000 | - 2022-05-18 00:01:00.000 | 2022-05-18 00:01:08.000 | 58.000000000 | - 2022-05-17 00:01:00.000 | 2022-05-17 00:01:08.000 | 59.000000000 | - 2022-05-16 00:01:00.000 | 2022-05-16 00:01:08.000 | 136.000000000 | - 2022-05-15 00:01:00.000 | 2022-05-15 00:01:08.000 | 234.000000000 | + 2022-05-24 00:01:00.000 | 2022-05-24 00:01:08.000 | 210.000000000000000 | + 2022-05-23 00:01:00.000 | 2022-05-23 00:01:08.000 | 116.000000000000000 | + 2022-05-22 00:01:00.000 | 2022-05-22 00:01:08.000 | 196.000000000000000 | + 2022-05-21 00:01:00.000 | 2022-05-21 00:01:08.000 | 11.000000000000000 | + 2022-05-20 00:01:00.000 | 2022-05-20 00:01:08.000 | 120.000000000000000 | + 2022-05-19 00:01:00.000 | 2022-05-19 00:01:08.000 | 243.000000000000000 | + 2022-05-18 00:01:00.000 | 2022-05-18 00:01:08.000 | 58.000000000000000 | + 2022-05-17 00:01:00.000 | 2022-05-17 00:01:08.000 | 59.000000000000000 | + 2022-05-16 00:01:00.000 | 2022-05-16 00:01:08.000 | 136.000000000000000 | + 2022-05-15 00:01:00.000 | 2022-05-15 00:01:08.000 | 234.000000000000000 | taos> select _wstart, last(ts), avg(c2) from meters interval(10s) order by _wstart asc; _wstart | last(ts) | avg(c2) | ================================================================================ - 2022-05-15 00:01:00.000 | 2022-05-15 00:01:08.000 | 234.000000000 | - 2022-05-16 00:01:00.000 | 2022-05-16 00:01:08.000 | 136.000000000 | - 2022-05-17 00:01:00.000 | 2022-05-17 00:01:08.000 | 59.000000000 | - 2022-05-18 00:01:00.000 | 2022-05-18 00:01:08.000 | 58.000000000 | - 2022-05-19 00:01:00.000 | 2022-05-19 00:01:08.000 | 243.000000000 | - 2022-05-20 00:01:00.000 | 2022-05-20 00:01:08.000 | 120.000000000 | - 2022-05-21 00:01:00.000 | 2022-05-21 00:01:08.000 | 11.000000000 | - 2022-05-22 00:01:00.000 | 2022-05-22 00:01:08.000 | 196.000000000 | - 2022-05-23 00:01:00.000 | 2022-05-23 00:01:08.000 | 116.000000000 | - 2022-05-24 00:01:00.000 | 2022-05-24 00:01:08.000 | 210.000000000 | + 2022-05-15 00:01:00.000 | 2022-05-15 00:01:08.000 | 234.000000000000000 | + 2022-05-16 00:01:00.000 | 2022-05-16 00:01:08.000 | 136.000000000000000 | + 2022-05-17 00:01:00.000 | 2022-05-17 00:01:08.000 | 59.000000000000000 | + 2022-05-18 00:01:00.000 | 2022-05-18 00:01:08.000 | 58.000000000000000 | + 2022-05-19 00:01:00.000 | 2022-05-19 00:01:08.000 | 243.000000000000000 | + 2022-05-20 00:01:00.000 | 2022-05-20 00:01:08.000 | 120.000000000000000 | + 2022-05-21 00:01:00.000 | 2022-05-21 00:01:08.000 | 11.000000000000000 | + 2022-05-22 00:01:00.000 | 2022-05-22 00:01:08.000 | 196.000000000000000 | + 2022-05-23 00:01:00.000 | 2022-05-23 00:01:08.000 | 116.000000000000000 | + 2022-05-24 00:01:00.000 | 2022-05-24 00:01:08.000 | 210.000000000000000 | taos> select _wstart, first(ts), avg(c2) from meters interval(10s) order by _wstart asc; _wstart | first(ts) | avg(c2) | ================================================================================ - 2022-05-15 00:01:00.000 | 2022-05-15 00:01:08.000 | 234.000000000 | - 2022-05-16 00:01:00.000 | 2022-05-16 00:01:08.000 | 136.000000000 | - 2022-05-17 00:01:00.000 | 2022-05-17 00:01:08.000 | 59.000000000 | - 2022-05-18 00:01:00.000 | 2022-05-18 00:01:08.000 | 58.000000000 | - 2022-05-19 00:01:00.000 | 2022-05-19 00:01:08.000 | 243.000000000 | - 2022-05-20 00:01:00.000 | 2022-05-20 00:01:08.000 | 120.000000000 | - 2022-05-21 00:01:00.000 | 2022-05-21 00:01:08.000 | 11.000000000 | - 2022-05-22 00:01:00.000 | 2022-05-22 00:01:08.000 | 196.000000000 | - 2022-05-23 00:01:00.000 | 2022-05-23 00:01:08.000 | 116.000000000 | - 2022-05-24 00:01:00.000 | 2022-05-24 00:01:08.000 | 210.000000000 | + 2022-05-15 00:01:00.000 | 2022-05-15 00:01:08.000 | 234.000000000000000 | + 2022-05-16 00:01:00.000 | 2022-05-16 00:01:08.000 | 136.000000000000000 | + 2022-05-17 00:01:00.000 | 2022-05-17 00:01:08.000 | 59.000000000000000 | + 2022-05-18 00:01:00.000 | 2022-05-18 00:01:08.000 | 58.000000000000000 | + 2022-05-19 00:01:00.000 | 2022-05-19 00:01:08.000 | 243.000000000000000 | + 2022-05-20 00:01:00.000 | 2022-05-20 00:01:08.000 | 120.000000000000000 | + 2022-05-21 00:01:00.000 | 2022-05-21 00:01:08.000 | 11.000000000000000 | + 2022-05-22 00:01:00.000 | 2022-05-22 00:01:08.000 | 196.000000000000000 | + 2022-05-23 00:01:00.000 | 2022-05-23 00:01:08.000 | 116.000000000000000 | + 2022-05-24 00:01:00.000 | 2022-05-24 00:01:08.000 | 210.000000000000000 | taos> select _wstart, first(ts), avg(c2) from meters interval(10s) order by _wstart desc; _wstart | first(ts) | avg(c2) | ================================================================================ - 2022-05-24 00:01:00.000 | 2022-05-24 00:01:08.000 | 210.000000000 | - 2022-05-23 00:01:00.000 | 2022-05-23 00:01:08.000 | 116.000000000 | - 2022-05-22 00:01:00.000 | 2022-05-22 00:01:08.000 | 196.000000000 | - 2022-05-21 00:01:00.000 | 2022-05-21 00:01:08.000 | 11.000000000 | - 2022-05-20 00:01:00.000 | 2022-05-20 00:01:08.000 | 120.000000000 | - 2022-05-19 00:01:00.000 | 2022-05-19 00:01:08.000 | 243.000000000 | - 2022-05-18 00:01:00.000 | 2022-05-18 00:01:08.000 | 58.000000000 | - 2022-05-17 00:01:00.000 | 2022-05-17 00:01:08.000 | 59.000000000 | - 2022-05-16 00:01:00.000 | 2022-05-16 00:01:08.000 | 136.000000000 | - 2022-05-15 00:01:00.000 | 2022-05-15 00:01:08.000 | 234.000000000 | + 2022-05-24 00:01:00.000 | 2022-05-24 00:01:08.000 | 210.000000000000000 | + 2022-05-23 00:01:00.000 | 2022-05-23 00:01:08.000 | 116.000000000000000 | + 2022-05-22 00:01:00.000 | 2022-05-22 00:01:08.000 | 196.000000000000000 | + 2022-05-21 00:01:00.000 | 2022-05-21 00:01:08.000 | 11.000000000000000 | + 2022-05-20 00:01:00.000 | 2022-05-20 00:01:08.000 | 120.000000000000000 | + 2022-05-19 00:01:00.000 | 2022-05-19 00:01:08.000 | 243.000000000000000 | + 2022-05-18 00:01:00.000 | 2022-05-18 00:01:08.000 | 58.000000000000000 | + 2022-05-17 00:01:00.000 | 2022-05-17 00:01:08.000 | 59.000000000000000 | + 2022-05-16 00:01:00.000 | 2022-05-16 00:01:08.000 | 136.000000000000000 | + 2022-05-15 00:01:00.000 | 2022-05-15 00:01:08.000 | 234.000000000000000 | taos> select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s)) order by d; d | @@ -1792,35 +1792,35 @@ taos> select last(b) as d from (select last(ts) as b, avg(c2) as c from meters i taos> select _wstart, first(a) as d, avg(c) from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a desc) where a > '2022-05-15 00:01:00.000' and a < '2022-05-21 00:01:08.000' interval(5h) fill(linear) order by d desc; _wstart | d | avg(c) | ================================================================================ - 2022-05-20 20:00:00.000 | 2022-05-21 00:01:00.000 | 11.000000000 | - 2022-05-20 15:00:00.000 | 2022-05-20 18:01:00.000 | 38.250000000 | - 2022-05-20 10:00:00.000 | 2022-05-20 12:01:00.000 | 65.500000000 | - 2022-05-20 05:00:00.000 | 2022-05-20 06:01:00.000 | 92.750000000 | - 2022-05-20 00:00:00.000 | 2022-05-20 00:01:00.000 | 120.000000000 | - 2022-05-19 19:00:00.000 | 2022-05-19 19:13:00.000 | 144.600000000 | - 2022-05-19 14:00:00.000 | 2022-05-19 14:25:00.000 | 169.200000000 | - 2022-05-19 09:00:00.000 | 2022-05-19 09:37:00.000 | 193.800000000 | - 2022-05-19 04:00:00.000 | 2022-05-19 04:49:00.000 | 218.400000000 | - 2022-05-18 23:00:00.000 | 2022-05-19 00:01:00.000 | 243.000000000 | - 2022-05-18 18:00:00.000 | 2022-05-18 19:13:00.000 | 206.000000000 | - 2022-05-18 13:00:00.000 | 2022-05-18 14:25:00.000 | 169.000000000 | - 2022-05-18 08:00:00.000 | 2022-05-18 09:37:00.000 | 132.000000000 | - 2022-05-18 03:00:00.000 | 2022-05-18 04:49:00.000 | 95.000000000 | - 2022-05-17 22:00:00.000 | 2022-05-18 00:01:00.000 | 58.000000000 | - 2022-05-17 17:00:00.000 | 2022-05-17 19:13:00.000 | 58.200000000 | - 2022-05-17 12:00:00.000 | 2022-05-17 14:25:00.000 | 58.400000000 | - 2022-05-17 07:00:00.000 | 2022-05-17 09:37:00.000 | 58.600000000 | - 2022-05-17 02:00:00.000 | 2022-05-17 04:49:00.000 | 58.800000000 | - 2022-05-16 21:00:00.000 | 2022-05-17 00:01:00.000 | 59.000000000 | - 2022-05-16 16:00:00.000 | 2022-05-16 19:13:00.000 | 74.400000000 | - 2022-05-16 11:00:00.000 | 2022-05-16 14:25:00.000 | 89.800000000 | - 2022-05-16 06:00:00.000 | 2022-05-16 09:37:00.000 | 105.200000000 | - 2022-05-16 01:00:00.000 | 2022-05-16 04:49:00.000 | 120.600000000 | - 2022-05-15 20:00:00.000 | 2022-05-16 00:01:00.000 | 136.000000000 | - 2022-05-15 15:00:00.000 | 2022-05-15 18:01:00.000 | 160.500000000 | - 2022-05-15 10:00:00.000 | 2022-05-15 12:01:00.000 | 185.000000000 | - 2022-05-15 05:00:00.000 | 2022-05-15 06:01:00.000 | 209.500000000 | - 2022-05-15 00:00:00.000 | 2022-05-15 00:01:00.000 | 234.000000000 | + 2022-05-20 20:00:00.000 | 2022-05-21 00:01:00.000 | 11.000000000000000 | + 2022-05-20 15:00:00.000 | 2022-05-20 18:01:00.000 | 38.250000000000000 | + 2022-05-20 10:00:00.000 | 2022-05-20 12:01:00.000 | 65.500000000000000 | + 2022-05-20 05:00:00.000 | 2022-05-20 06:01:00.000 | 92.750000000000000 | + 2022-05-20 00:00:00.000 | 2022-05-20 00:01:00.000 | 120.000000000000000 | + 2022-05-19 19:00:00.000 | 2022-05-19 19:13:00.000 | 144.599999999999994 | + 2022-05-19 14:00:00.000 | 2022-05-19 14:25:00.000 | 169.199999999999989 | + 2022-05-19 09:00:00.000 | 2022-05-19 09:37:00.000 | 193.800000000000011 | + 2022-05-19 04:00:00.000 | 2022-05-19 04:49:00.000 | 218.400000000000006 | + 2022-05-18 23:00:00.000 | 2022-05-19 00:01:00.000 | 243.000000000000000 | + 2022-05-18 18:00:00.000 | 2022-05-18 19:13:00.000 | 206.000000000000000 | + 2022-05-18 13:00:00.000 | 2022-05-18 14:25:00.000 | 169.000000000000000 | + 2022-05-18 08:00:00.000 | 2022-05-18 09:37:00.000 | 132.000000000000000 | + 2022-05-18 03:00:00.000 | 2022-05-18 04:49:00.000 | 95.000000000000000 | + 2022-05-17 22:00:00.000 | 2022-05-18 00:01:00.000 | 58.000000000000000 | + 2022-05-17 17:00:00.000 | 2022-05-17 19:13:00.000 | 58.200000000000003 | + 2022-05-17 12:00:00.000 | 2022-05-17 14:25:00.000 | 58.399999999999999 | + 2022-05-17 07:00:00.000 | 2022-05-17 09:37:00.000 | 58.600000000000001 | + 2022-05-17 02:00:00.000 | 2022-05-17 04:49:00.000 | 58.799999999999997 | + 2022-05-16 21:00:00.000 | 2022-05-17 00:01:00.000 | 59.000000000000000 | + 2022-05-16 16:00:00.000 | 2022-05-16 19:13:00.000 | 74.400000000000006 | + 2022-05-16 11:00:00.000 | 2022-05-16 14:25:00.000 | 89.799999999999997 | + 2022-05-16 06:00:00.000 | 2022-05-16 09:37:00.000 | 105.200000000000003 | + 2022-05-16 01:00:00.000 | 2022-05-16 04:49:00.000 | 120.599999999999994 | + 2022-05-15 20:00:00.000 | 2022-05-16 00:01:00.000 | 136.000000000000000 | + 2022-05-15 15:00:00.000 | 2022-05-15 18:01:00.000 | 160.500000000000000 | + 2022-05-15 10:00:00.000 | 2022-05-15 12:01:00.000 | 185.000000000000000 | + 2022-05-15 05:00:00.000 | 2022-05-15 06:01:00.000 | 209.500000000000000 | + 2022-05-15 00:00:00.000 | 2022-05-15 00:01:00.000 | 234.000000000000000 | taos> explain verbose true select _wstart, first(a) as d, avg(c) from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a desc) where a > '2022-05-15 00:01:00.000' and a < '2022-05-21 00:01:08.000' interval(5h) fill(linear) order by d desc\G; *************************** 1.row *************************** @@ -2673,51 +2673,51 @@ taos> select ts, c2 from d1 order by ts asc, c2 desc limit 5,5; taos> select _wstart, first(a) as d, avg(c) from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a desc) where a > '2022-05-15 00:01:00.000' and a < '2022-05-21 00:01:08.000' interval(5h) fill(linear) order by avg(c) desc; _wstart | d | avg(c) | ================================================================================ - 2022-05-18 23:00:00.000 | 2022-05-19 00:01:00.000 | 243.000000000 | - 2022-05-15 00:00:00.000 | 2022-05-15 00:01:00.000 | 234.000000000 | - 2022-05-19 04:00:00.000 | 2022-05-19 04:49:00.000 | 218.400000000 | - 2022-05-15 05:00:00.000 | 2022-05-15 06:01:00.000 | 209.500000000 | - 2022-05-18 18:00:00.000 | 2022-05-18 19:13:00.000 | 206.000000000 | - 2022-05-19 09:00:00.000 | 2022-05-19 09:37:00.000 | 193.800000000 | - 2022-05-15 10:00:00.000 | 2022-05-15 12:01:00.000 | 185.000000000 | - 2022-05-19 14:00:00.000 | 2022-05-19 14:25:00.000 | 169.200000000 | - 2022-05-18 13:00:00.000 | 2022-05-18 14:25:00.000 | 169.000000000 | - 2022-05-15 15:00:00.000 | 2022-05-15 18:01:00.000 | 160.500000000 | - 2022-05-19 19:00:00.000 | 2022-05-19 19:13:00.000 | 144.600000000 | - 2022-05-15 20:00:00.000 | 2022-05-16 00:01:00.000 | 136.000000000 | - 2022-05-18 08:00:00.000 | 2022-05-18 09:37:00.000 | 132.000000000 | - 2022-05-16 01:00:00.000 | 2022-05-16 04:49:00.000 | 120.600000000 | - 2022-05-20 00:00:00.000 | 2022-05-20 00:01:00.000 | 120.000000000 | - 2022-05-16 06:00:00.000 | 2022-05-16 09:37:00.000 | 105.200000000 | - 2022-05-18 03:00:00.000 | 2022-05-18 04:49:00.000 | 95.000000000 | - 2022-05-20 05:00:00.000 | 2022-05-20 06:01:00.000 | 92.750000000 | - 2022-05-16 11:00:00.000 | 2022-05-16 14:25:00.000 | 89.800000000 | - 2022-05-16 16:00:00.000 | 2022-05-16 19:13:00.000 | 74.400000000 | - 2022-05-20 10:00:00.000 | 2022-05-20 12:01:00.000 | 65.500000000 | - 2022-05-16 21:00:00.000 | 2022-05-17 00:01:00.000 | 59.000000000 | - 2022-05-17 02:00:00.000 | 2022-05-17 04:49:00.000 | 58.800000000 | - 2022-05-17 07:00:00.000 | 2022-05-17 09:37:00.000 | 58.600000000 | - 2022-05-17 12:00:00.000 | 2022-05-17 14:25:00.000 | 58.400000000 | - 2022-05-17 17:00:00.000 | 2022-05-17 19:13:00.000 | 58.200000000 | - 2022-05-17 22:00:00.000 | 2022-05-18 00:01:00.000 | 58.000000000 | - 2022-05-20 15:00:00.000 | 2022-05-20 18:01:00.000 | 38.250000000 | - 2022-05-20 20:00:00.000 | 2022-05-21 00:01:00.000 | 11.000000000 | + 2022-05-18 23:00:00.000 | 2022-05-19 00:01:00.000 | 243.000000000000000 | + 2022-05-15 00:00:00.000 | 2022-05-15 00:01:00.000 | 234.000000000000000 | + 2022-05-19 04:00:00.000 | 2022-05-19 04:49:00.000 | 218.400000000000006 | + 2022-05-15 05:00:00.000 | 2022-05-15 06:01:00.000 | 209.500000000000000 | + 2022-05-18 18:00:00.000 | 2022-05-18 19:13:00.000 | 206.000000000000000 | + 2022-05-19 09:00:00.000 | 2022-05-19 09:37:00.000 | 193.800000000000011 | + 2022-05-15 10:00:00.000 | 2022-05-15 12:01:00.000 | 185.000000000000000 | + 2022-05-19 14:00:00.000 | 2022-05-19 14:25:00.000 | 169.199999999999989 | + 2022-05-18 13:00:00.000 | 2022-05-18 14:25:00.000 | 169.000000000000000 | + 2022-05-15 15:00:00.000 | 2022-05-15 18:01:00.000 | 160.500000000000000 | + 2022-05-19 19:00:00.000 | 2022-05-19 19:13:00.000 | 144.599999999999994 | + 2022-05-15 20:00:00.000 | 2022-05-16 00:01:00.000 | 136.000000000000000 | + 2022-05-18 08:00:00.000 | 2022-05-18 09:37:00.000 | 132.000000000000000 | + 2022-05-16 01:00:00.000 | 2022-05-16 04:49:00.000 | 120.599999999999994 | + 2022-05-20 00:00:00.000 | 2022-05-20 00:01:00.000 | 120.000000000000000 | + 2022-05-16 06:00:00.000 | 2022-05-16 09:37:00.000 | 105.200000000000003 | + 2022-05-18 03:00:00.000 | 2022-05-18 04:49:00.000 | 95.000000000000000 | + 2022-05-20 05:00:00.000 | 2022-05-20 06:01:00.000 | 92.750000000000000 | + 2022-05-16 11:00:00.000 | 2022-05-16 14:25:00.000 | 89.799999999999997 | + 2022-05-16 16:00:00.000 | 2022-05-16 19:13:00.000 | 74.400000000000006 | + 2022-05-20 10:00:00.000 | 2022-05-20 12:01:00.000 | 65.500000000000000 | + 2022-05-16 21:00:00.000 | 2022-05-17 00:01:00.000 | 59.000000000000000 | + 2022-05-17 02:00:00.000 | 2022-05-17 04:49:00.000 | 58.799999999999997 | + 2022-05-17 07:00:00.000 | 2022-05-17 09:37:00.000 | 58.600000000000001 | + 2022-05-17 12:00:00.000 | 2022-05-17 14:25:00.000 | 58.399999999999999 | + 2022-05-17 17:00:00.000 | 2022-05-17 19:13:00.000 | 58.200000000000003 | + 2022-05-17 22:00:00.000 | 2022-05-18 00:01:00.000 | 58.000000000000000 | + 2022-05-20 15:00:00.000 | 2022-05-20 18:01:00.000 | 38.250000000000000 | + 2022-05-20 20:00:00.000 | 2022-05-21 00:01:00.000 | 11.000000000000000 | taos> select _wstart, first(a) as d, avg(c) from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a desc) where a > '2022-05-15 00:01:00.000' and a < '2022-05-21 00:01:08.000' interval(5h) fill(linear) order by avg(c) desc limit 2; _wstart | d | avg(c) | ================================================================================ - 2022-05-18 23:00:00.000 | 2022-05-19 00:01:00.000 | 243.000000000 | - 2022-05-15 00:00:00.000 | 2022-05-15 00:01:00.000 | 234.000000000 | + 2022-05-18 23:00:00.000 | 2022-05-19 00:01:00.000 | 243.000000000000000 | + 2022-05-15 00:00:00.000 | 2022-05-15 00:01:00.000 | 234.000000000000000 | taos> select _wstart, first(a) as d, avg(c) from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a desc) where a > '2022-05-15 00:01:00.000' and a < '2022-05-21 00:01:08.000' interval(5h) fill(linear) order by avg(c) desc limit 2,6; _wstart | d | avg(c) | ================================================================================ - 2022-05-19 04:00:00.000 | 2022-05-19 04:49:00.000 | 218.400000000 | - 2022-05-15 05:00:00.000 | 2022-05-15 06:01:00.000 | 209.500000000 | - 2022-05-18 18:00:00.000 | 2022-05-18 19:13:00.000 | 206.000000000 | - 2022-05-19 09:00:00.000 | 2022-05-19 09:37:00.000 | 193.800000000 | - 2022-05-15 10:00:00.000 | 2022-05-15 12:01:00.000 | 185.000000000 | - 2022-05-19 14:00:00.000 | 2022-05-19 14:25:00.000 | 169.200000000 | + 2022-05-19 04:00:00.000 | 2022-05-19 04:49:00.000 | 218.400000000000006 | + 2022-05-15 05:00:00.000 | 2022-05-15 06:01:00.000 | 209.500000000000000 | + 2022-05-18 18:00:00.000 | 2022-05-18 19:13:00.000 | 206.000000000000000 | + 2022-05-19 09:00:00.000 | 2022-05-19 09:37:00.000 | 193.800000000000011 | + 2022-05-15 10:00:00.000 | 2022-05-15 12:01:00.000 | 185.000000000000000 | + 2022-05-19 14:00:00.000 | 2022-05-19 14:25:00.000 | 169.199999999999989 | taos> select last(ts), c2 as d from d1 group by c2 order by c2 desc limit 10; last(ts) | d | diff --git a/tests/system-test/2-query/interval_limit_opt.py b/tests/system-test/2-query/interval_limit_opt.py new file mode 100644 index 0000000000..fef6e9facd --- /dev/null +++ b/tests/system-test/2-query/interval_limit_opt.py @@ -0,0 +1,266 @@ +import taos +import sys +import time +import socket +import os +import threading +import math + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +# from tmqCommon import * + +class TDTestCase: + def __init__(self): + self.vgroups = 4 + self.ctbNum = 10 + self.rowsPerTbl = 10000 + self.duraion = '1h' + + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), False) + + def create_database(self,tsql, dbName,dropFlag=1,vgroups=2,replica=1, duration:str='1d'): + if dropFlag == 1: + tsql.execute("drop database if exists %s"%(dbName)) + + tsql.execute("create database if not exists %s vgroups %d replica %d duration %s"%(dbName, vgroups, replica, duration)) + tdLog.debug("complete to create database %s"%(dbName)) + return + + def create_stable(self,tsql, paraDict): + colString = tdCom.gen_column_type_str(colname_prefix=paraDict["colPrefix"], column_elm_list=paraDict["colSchema"]) + tagString = tdCom.gen_tag_type_str(tagname_prefix=paraDict["tagPrefix"], tag_elm_list=paraDict["tagSchema"]) + sqlString = f"create table if not exists %s.%s (%s) tags (%s)"%(paraDict["dbName"], paraDict["stbName"], colString, tagString) + tdLog.debug("%s"%(sqlString)) + tsql.execute(sqlString) + return + + def create_ctable(self,tsql=None, dbName='dbx',stbName='stb',ctbPrefix='ctb',ctbNum=1,ctbStartIdx=0): + for i in range(ctbNum): + sqlString = "create table %s.%s%d using %s.%s tags(%d, 'tb%d', 'tb%d', %d, %d, %d)" % \ + (dbName,ctbPrefix,i+ctbStartIdx,dbName,stbName,(i+ctbStartIdx) % 5,i+ctbStartIdx,i+ctbStartIdx,i+ctbStartIdx,i+ctbStartIdx,i+ctbStartIdx) + tsql.execute(sqlString) + + tdLog.debug("complete to create %d child tables by %s.%s" %(ctbNum, dbName, stbName)) + return + + def insert_data(self,tsql,dbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs,tsStep): + tdLog.debug("start to insert data ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + for i in range(ctbNum): + rowsBatched = 0 + sql += " %s%d values "%(ctbPrefix,i) + for j in range(rowsPerTbl): + if (i < ctbNum/2): + sql += "(%d, %d, %d, %d,%d,%d,%d,true,'binary%d', 'nchar%d') "%(startTs + j*tsStep, j%10, j%10, j%10, j%10, j%10, j%10, j%10, j%10) + else: + sql += "(%d, %d, NULL, %d,NULL,%d,%d,true,'binary%d', 'nchar%d') "%(startTs + j*tsStep, j%10, j%10, j%10, j%10, j%10, j%10) + rowsBatched += 1 + if ((rowsBatched == batchNum) or (j == rowsPerTbl - 1)): + tsql.execute(sql) + rowsBatched = 0 + if j < rowsPerTbl - 1: + sql = "insert into %s%d values " %(ctbPrefix,i) + else: + sql = "insert into " + if sql != pre_insert: + tsql.execute(sql) + tdLog.debug("insert data ............ [OK]") + return + + def prepareTestEnv(self): + tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") + paraDict = {'dbName': 'test', + 'dropFlag': 1, + 'vgroups': 2, + 'stbName': 'meters', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'FLOAT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'smallint', 'count':1},{'type': 'tinyint', 'count':1},{'type': 'bool', 'count':1},{'type': 'binary', 'len':10, 'count':1},{'type': 'nchar', 'len':10, 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'nchar', 'len':20, 'count':1},{'type': 'binary', 'len':20, 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'smallint', 'count':1},{'type': 'DOUBLE', 'count':1}], + 'ctbPrefix': 't', + 'ctbStartIdx': 0, + 'ctbNum': 100, + 'rowsPerTbl': 10000, + 'batchNum': 3000, + 'startTs': 1537146000000, + 'tsStep': 600000} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + tdLog.info("create database") + self.create_database(tsql=tdSql, dbName=paraDict["dbName"], dropFlag=paraDict["dropFlag"], vgroups=paraDict["vgroups"], replica=self.replicaVar, duration=self.duraion) + + tdLog.info("create stb") + self.create_stable(tsql=tdSql, paraDict=paraDict) + + tdLog.info("create child tables") + self.create_ctable(tsql=tdSql, dbName=paraDict["dbName"], \ + stbName=paraDict["stbName"],ctbPrefix=paraDict["ctbPrefix"],\ + ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict["ctbStartIdx"]) + self.insert_data(tsql=tdSql, dbName=paraDict["dbName"],\ + ctbPrefix=paraDict["ctbPrefix"],ctbNum=paraDict["ctbNum"],\ + rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],\ + startTs=paraDict["startTs"],tsStep=paraDict["tsStep"]) + return + + def check_first_rows(self, all_rows, limited_rows, offset: int = 0): + for i in range(0, len(limited_rows) - 1): + if limited_rows[i] != all_rows[i + offset]: + tdLog.info("row: %d, row in all: %s" % (i+offset+1, str(all_rows[i+offset]))) + tdLog.info("row: %d, row in limted: %s" % (i+1, str(limited_rows[i]))) + tdLog.exit("row data check failed") + tdLog.info("all rows are the same as query without limit..") + + def query_and_check_with_slimit(self, sql: str, max_limit: int, step: int, offset: int = 0): + self.query_and_check_with_limit(sql, max_limit, step, offset, ' slimit ') + + def query_and_check_with_limit(self, sql: str, max_limit: int, step: int, offset: int = 0, limit_str: str = ' limit '): + for limit in range(0, max_limit, step): + limited_sql = sql + limit_str + str(offset) + "," + str(limit) + tdLog.info("query with sql: %s " % (sql) + limit_str + " %d,%d" % (offset, limit)) + all_rows = tdSql.getResult(sql) + limited_rows = tdSql.getResult(limited_sql) + tdLog.info("all rows: %d, limited rows: %d" % (len(all_rows), len(limited_rows))) + if limit_str == ' limit ': + if limit + offset <= len(all_rows) and len(limited_rows) != limit: + tdLog.exit("limited sql has less rows than limit value which is not right, \ + limit: %d, limited_rows: %d, all_rows: %d, offset: %d" % (limit, len(limited_rows), len(all_rows), offset)) + elif limit + offset > len(all_rows) and offset < len(all_rows) and offset + len(limited_rows) != len(all_rows): + tdLog.exit("limited sql has less rows than all_rows which is not right, \ + limit: %d, limited_rows: %d, all_rows: %d, offset: %d" % (limit, len(limited_rows), len(all_rows), offset)) + elif offset >= len(all_rows) and len(limited_rows) != 0: + tdLog.exit("limited rows should be zero, \ + limit: %d, limited_rows: %d, all_rows: %d, offset: %d" % (limit, len(limited_rows), len(all_rows), offset)) + + self.check_first_rows(all_rows, limited_rows, offset) + + def test_interval_limit_asc(self, offset: int = 0): + sqls = ["select _wstart, _wend, count(*), sum(c1), avg(c2), first(ts) from meters interval(1s) ", + "select _wstart, _wend, count(*), sum(c1), avg(c2), first(ts) from meters interval(1m) ", + "select _wstart, _wend, count(*), sum(c1), avg(c2), first(ts) from meters interval(1h) ", + "select _wstart, _wend, count(*), sum(c1), avg(c2), first(ts) from meters interval(1d) ", + "select _wstart, _wend, count(*), sum(c1), avg(c2), first(ts) from t1 interval(1s) ", + "select _wstart, _wend, count(*), sum(c1), avg(c2), first(ts) from t1 interval(1m) ", + "select _wstart, _wend, count(*), sum(c1), avg(c2), first(ts) from t1 interval(1h) ", + "select _wstart, _wend, count(*), sum(c1), avg(c2), first(ts) from t1 interval(1d) "] + for sql in sqls: + self.query_and_check_with_limit(sql, 5000, 500, offset) + + def test_interval_limit_desc(self, offset: int = 0): + sqls = ["select _wstart, _wend, count(*), sum(c1), avg(c2), last(ts) from meters interval(1s) ", + "select _wstart, _wend, count(*), sum(c1), avg(c2), last(ts) from meters interval(1m) ", + "select _wstart, _wend, count(*), sum(c1), avg(c2), last(ts) from meters interval(1h) ", + "select _wstart, _wend, count(*), sum(c1), avg(c2), last(ts) from meters interval(1d) ", + "select _wstart, _wend, count(*), sum(c1), avg(c2), last(ts) from t1 interval(1s) ", + "select _wstart, _wend, count(*), sum(c1), avg(c2), last(ts) from t1 interval(1m) ", + "select _wstart, _wend, count(*), sum(c1), avg(c2), last(ts) from t1 interval(1h) ", + "select _wstart, _wend, count(*), sum(c1), avg(c2), last(ts) from t1 interval(1d) "] + for sql in sqls: + self.query_and_check_with_limit(sql, 5000, 500, offset) + + def test_interval_limit_offset(self): + for offset in range(0, 1000, 500): + self.test_interval_limit_asc(offset) + self.test_interval_limit_desc(offset) + self.test_interval_fill_limit(offset) + self.test_interval_order_by_limit(offset) + self.test_interval_partition_by_slimit(offset) + + def test_interval_fill_limit(self, offset: int = 0): + sqls = [ + "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ + where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-09-17 09:30:00.000' interval(1s) fill(linear)", + "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ + where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-09-17 09:30:00.000' interval(1m) fill(linear)", + "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ + where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-09-17 09:30:00.000' interval(1h) fill(linear)", + "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ + where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-09-17 09:30:00.000' interval(1d) fill(linear)" + ] + for sql in sqls: + self.query_and_check_with_limit(sql, 5000, 1000, offset) + + def test_interval_order_by_limit(self, offset: int = 0): + sqls = [ + "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ + where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) order by b", + "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ + where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) order by a desc", + "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), last(ts) from meters \ + where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) order by a desc", + "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ + where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) order by count(*), sum(c1), a", + "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ + where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) order by a, count(*), sum(c1)", + "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ + where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) fill(linear) order by b", + "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ + where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) fill(linear) order by a desc", + "select _wstart as a, _wend as b, count(*), sum(c1), last(c2), first(ts) from meters \ + where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) fill(linear) order by a desc", + "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ + where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) fill(linear) order by count(*), sum(c1), a", + "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ + where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) fill(linear) order by a, count(*), sum(c1)", + ] + for sql in sqls: + self.query_and_check_with_limit(sql, 6000, 2000, offset) + + def test_interval_partition_by_slimit(self, offset: int = 0): + sqls = [ + "select _wstart as a, _wend as b, count(*), sum(c1), last(c2), first(ts) from meters " + "where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' partition by t1 interval(1m)", + "select _wstart as a, _wend as b, count(*), sum(c1), last(c2), first(ts) from meters " + "where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' partition by t1 interval(1h)", + "select _wstart as a, _wend as b, count(*), sum(c1), last(c2), first(ts) from meters " + "where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' partition by c3 interval(1m)", + ] + for sql in sqls: + self.query_and_check_with_slimit(sql, 10, 2, offset) + + def test_interval_partition_by_slimit_limit(self): + sql = "select * from (select _wstart as a, _wend as b, count(*), sum(c1), last(c2), first(ts),c3 from meters " \ + "where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' partition by c3 interval(1m) slimit 10 limit 2) order by c3 asc" + tdSql.query(sql) + tdSql.checkRows(20) + tdSql.checkData(0, 4, 0) + tdSql.checkData(1, 4, 0) + tdSql.checkData(2, 4, 1) + tdSql.checkData(3, 4, 1) + tdSql.checkData(18, 4, 9) + tdSql.checkData(19, 4, 9) + + sql = "select * from (select _wstart as a, _wend as b, count(*), sum(c1), last(c2), first(ts),c3 from meters " \ + "where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' partition by c3 interval(1m) slimit 2,2 limit 2) order by c3 asc" + tdSql.query(sql) + tdSql.checkRows(4) + tdSql.checkData(0, 4, 2) + tdSql.checkData(1, 4, 2) + tdSql.checkData(2, 4, 9) + tdSql.checkData(3, 4, 9) + + def run(self): + self.prepareTestEnv() + self.test_interval_limit_offset() + self.test_interval_partition_by_slimit_limit() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) From 87783a965082757fb358a5f4ceb8882f6303dfe6 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Fri, 4 Aug 2023 15:37:52 +0800 Subject: [PATCH 014/122] s3query/get: pull object to local --- source/dnode/vnode/src/inc/vndCos.h | 3 + .../dnode/vnode/src/tsdb/tsdbReaderWriter.c | 21 +++++- source/dnode/vnode/src/vnd/vnodeCos.c | 75 +++++++++++++++++++ 3 files changed, 96 insertions(+), 3 deletions(-) diff --git a/source/dnode/vnode/src/inc/vndCos.h b/source/dnode/vnode/src/inc/vndCos.h index b8510213d7..d4e19e9031 100644 --- a/source/dnode/vnode/src/inc/vndCos.h +++ b/source/dnode/vnode/src/inc/vndCos.h @@ -28,6 +28,9 @@ int32_t s3Init(); void s3CleanUp(); void s3PutObjectFromFile(const char *file, const char *object); void s3DeleteObjects(const char *object_name[], int nobject); +bool s3Exists(const char *object_name); +void s3Get(const char *object_name, const char *path); +void s3EvictCache(); #ifdef __cplusplus } diff --git a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c index 89b7d019ae..96037ff6be 100644 --- a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c +++ b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c @@ -14,6 +14,7 @@ */ #include "tsdb.h" +#include "vndCos.h" // =============== PAGE-WISE FILE =============== int32_t tsdbOpenFile(const char *path, int32_t szPage, int32_t flag, STsdbFD **ppFD) { @@ -34,9 +35,23 @@ int32_t tsdbOpenFile(const char *path, int32_t szPage, int32_t flag, STsdbFD **p pFD->flag = flag; pFD->pFD = taosOpenFile(path, flag); if (pFD->pFD == NULL) { - code = TAOS_SYSTEM_ERROR(errno); - taosMemoryFree(pFD); - goto _exit; + const char *object_name = taosDirEntryBaseName((char *)path); + if (!strncmp(path + strlen(path) - 5, ".data", 5) && s3Exists(object_name)) { + s3EvictCache(); + s3Get(object_name, path); + + pFD->pFD = taosOpenFile(path, flag); + + if (pFD->pFD == NULL) { + code = TAOS_SYSTEM_ERROR(errno); + taosMemoryFree(pFD); + goto _exit; + } + } else { + code = TAOS_SYSTEM_ERROR(errno); + taosMemoryFree(pFD); + goto _exit; + } } pFD->szPage = szPage; pFD->pgno = 0; diff --git a/source/dnode/vnode/src/vnd/vnodeCos.c b/source/dnode/vnode/src/vnd/vnodeCos.c index 1507df7074..696632fc6f 100644 --- a/source/dnode/vnode/src/vnd/vnodeCos.c +++ b/source/dnode/vnode/src/vnd/vnodeCos.c @@ -112,3 +112,78 @@ void s3DeleteObjects(const char *object_name[], int nobject) { cos_warn_log("delete objects failed\n"); } } + +bool s3Exists(const char *object_name) { + bool ret = false; + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers; + cos_table_t *headers = NULL; + cos_object_exist_status_e object_exist; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + s3InitRequestOptions(options, is_cname); + cos_str_set(&bucket, tsS3BucketName); + cos_str_set(&object, object_name); + + s = cos_check_object_exist(options, &bucket, &object, headers, &object_exist, &resp_headers); + if (object_exist == COS_OBJECT_NON_EXIST) { + cos_warn_log("object: %.*s non exist.\n", object.len, object.data); + } else if (object_exist == COS_OBJECT_EXIST) { + ret = true; + cos_warn_log("object: %.*s exist.\n", object.len, object.data); + } else { + cos_warn_log("object: %.*s unknown status.\n", object.len, object.data); + log_status(s); + } + + cos_pool_destroy(p); + + return ret; +} + +void s3Get(const char *object_name, const char *path) { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t file; + cos_table_t *resp_headers = NULL; + cos_table_t *headers = NULL; + int traffic_limit = 0; + + //创建内存池 + cos_pool_create(&p, NULL); + + //初始化请求选项 + options = cos_request_options_create(p); + s3InitRequestOptions(options, is_cname); + cos_str_set(&bucket, tsS3BucketName); + if (traffic_limit) { + //限速值设置范围为819200 - 838860800,即100KB/s - 100MB/s,如果超出该范围将返回400错误 + headers = cos_table_make(p, 1); + cos_table_add_int(headers, "x-cos-traffic-limit", 819200); + } + + //下载对象 + cos_str_set(&file, path); + cos_str_set(&object, object_name); + s = cos_get_object_to_file(options, &bucket, &object, headers, NULL, &file, &resp_headers); + if (cos_status_is_ok(s)) { + cos_warn_log("get object succeeded\n"); + } else { + cos_warn_log("get object failed\n"); + } + + //销毁内存池 + cos_pool_destroy(p); +} + +void s3EvictCache() {} From 65d8af19ed74f697ac0fef7fb007b6cf67a23f9e Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Fri, 4 Aug 2023 16:35:35 +0800 Subject: [PATCH 015/122] s3: new api s3Size --- source/dnode/vnode/src/inc/vndCos.h | 4 +- .../dnode/vnode/src/tsdb/tsdbReaderWriter.c | 3 +- source/dnode/vnode/src/vnd/vnodeCos.c | 45 ++++++++++++++++++- 3 files changed, 48 insertions(+), 4 deletions(-) diff --git a/source/dnode/vnode/src/inc/vndCos.h b/source/dnode/vnode/src/inc/vndCos.h index d4e19e9031..6e0984c400 100644 --- a/source/dnode/vnode/src/inc/vndCos.h +++ b/source/dnode/vnode/src/inc/vndCos.h @@ -29,9 +29,9 @@ void s3CleanUp(); void s3PutObjectFromFile(const char *file, const char *object); void s3DeleteObjects(const char *object_name[], int nobject); bool s3Exists(const char *object_name); -void s3Get(const char *object_name, const char *path); +bool s3Get(const char *object_name, const char *path); void s3EvictCache(); - +long s3Size(const char *object_name); #ifdef __cplusplus } #endif diff --git a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c index 96037ff6be..872042d9d5 100644 --- a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c +++ b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c @@ -36,7 +36,8 @@ int32_t tsdbOpenFile(const char *path, int32_t szPage, int32_t flag, STsdbFD **p pFD->pFD = taosOpenFile(path, flag); if (pFD->pFD == NULL) { const char *object_name = taosDirEntryBaseName((char *)path); - if (!strncmp(path + strlen(path) - 5, ".data", 5) && s3Exists(object_name)) { + long s3_size = s3Size(object_name); + if (!strncmp(path + strlen(path) - 5, ".data", 5) && s3_size > 0) { s3EvictCache(); s3Get(object_name, path); diff --git a/source/dnode/vnode/src/vnd/vnodeCos.c b/source/dnode/vnode/src/vnd/vnodeCos.c index 696632fc6f..a7b166b6c7 100644 --- a/source/dnode/vnode/src/vnd/vnodeCos.c +++ b/source/dnode/vnode/src/vnd/vnodeCos.c @@ -147,7 +147,8 @@ bool s3Exists(const char *object_name) { return ret; } -void s3Get(const char *object_name, const char *path) { +bool s3Get(const char *object_name, const char *path) { + bool ret = false; cos_pool_t *p = NULL; int is_cname = 0; cos_status_t *s = NULL; @@ -177,6 +178,7 @@ void s3Get(const char *object_name, const char *path) { cos_str_set(&object, object_name); s = cos_get_object_to_file(options, &bucket, &object, headers, NULL, &file, &resp_headers); if (cos_status_is_ok(s)) { + ret = true; cos_warn_log("get object succeeded\n"); } else { cos_warn_log("get object failed\n"); @@ -184,6 +186,47 @@ void s3Get(const char *object_name, const char *path) { //销毁内存池 cos_pool_destroy(p); + + return ret; } void s3EvictCache() {} + +long s3Size(const char *object_name) { + long size = 0; + + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers = NULL; + + //创建内存池 + cos_pool_create(&p, NULL); + + //初始化请求选项 + options = cos_request_options_create(p); + s3InitRequestOptions(options, is_cname); + cos_str_set(&bucket, tsS3BucketName); + + //获取对象元数据 + cos_str_set(&object, object_name); + s = cos_head_object(options, &bucket, &object, NULL, &resp_headers); + // print_headers(resp_headers); + if (cos_status_is_ok(s)) { + char *content_length_str = (char *)apr_table_get(resp_headers, COS_CONTENT_LENGTH); + if (content_length_str != NULL) { + size = atol(content_length_str); + } + cos_warn_log("head object succeeded: %ld\n", size); + } else { + cos_warn_log("head object failed\n"); + } + + //销毁内存池 + cos_pool_destroy(p); + + return size; +} From 5154d0e1a3a23eb79b2446e02e428b36cdbdfccc Mon Sep 17 00:00:00 2001 From: Shungang Li Date: Fri, 4 Aug 2023 17:24:35 +0800 Subject: [PATCH 016/122] fix: alter ttlChangeOnWrite note info --- source/dnode/mnode/impl/src/mndDnode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c index 1f566b14c7..234e81a670 100644 --- a/source/dnode/mnode/impl/src/mndDnode.c +++ b/source/dnode/mnode/impl/src/mndDnode.c @@ -41,7 +41,7 @@ static const char *offlineReason[] = { "timezone not match", "locale not match", "charset not match", - "ttl change on write not match" + "ttlChangeOnWrite not match", "unknown", }; From d9a16952571cfff06894a8210de783b5b3376360 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Fri, 4 Aug 2023 18:07:44 +0800 Subject: [PATCH 017/122] fix:add log --- source/client/test/clientTests.cpp | 174 ++++++++++++++++++++ source/dnode/mgmt/mgmt_vnode/src/vmWorker.c | 2 + source/dnode/vnode/src/tq/tq.c | 6 + source/dnode/vnode/src/vnd/vnodeSvr.c | 2 +- 4 files changed, 183 insertions(+), 1 deletion(-) diff --git a/source/client/test/clientTests.cpp b/source/client/test/clientTests.cpp index 6f978b0143..69c688f61c 100644 --- a/source/client/test/clientTests.cpp +++ b/source/client/test/clientTests.cpp @@ -1442,4 +1442,178 @@ TEST(clientCase, sub_tb_mt_test) { } } +TEST(clientCase, ts_3756) { +// taos_options(TSDB_OPTION_CONFIGDIR, "~/first/cfg"); + + TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); + ASSERT_NE(pConn, nullptr); + + tmq_conf_t* conf = tmq_conf_new(); + + tmq_conf_set(conf, "enable.auto.commit", "false"); + tmq_conf_set(conf, "auto.commit.interval.ms", "2000"); + tmq_conf_set(conf, "group.id", "group_id_2"); + tmq_conf_set(conf, "td.connect.user", "root"); + tmq_conf_set(conf, "td.connect.pass", "taosdata"); + tmq_conf_set(conf, "auto.offset.reset", "latest"); + tmq_conf_set(conf, "msg.with.table.name", "false"); + + tmq_t* tmq = tmq_consumer_new(conf, NULL, 0); + tmq_conf_destroy(conf); + + // 创建订阅 topics 列表 + tmq_list_t* topicList = tmq_list_new(); + tmq_list_append(topicList, "tp"); + + // 启动订阅 + tmq_subscribe(tmq, topicList); + tmq_list_destroy(topicList); + + TAOS_FIELD* fields = NULL; + int32_t numOfFields = 0; + int32_t precision = 0; + int32_t totalRows = 0; + int32_t msgCnt = 0; + int32_t timeout = 200; + + int32_t count = 0; + + tmq_topic_assignment* pAssign = NULL; + int32_t numOfAssign = 0; + + int32_t code = tmq_get_topic_assignment(tmq, "tp", &pAssign, &numOfAssign); + if (code != 0) { + printf("error occurs:%s\n", tmq_err2str(code)); + tmq_free_assignment(pAssign); + tmq_consumer_close(tmq); + taos_close(pConn); + fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows); + return; + } + + for(int i = 0; i < numOfAssign; i++){ + printf("assign i:%d, vgId:%d, offset:%lld, start:%lld, end:%lld\n", i, pAssign[i].vgId, pAssign[i].currentOffset, pAssign[i].begin, pAssign[i].end); + } + +// tmq_offset_seek(tmq, "tp", pAssign[0].vgId, 4); + tmq_free_assignment(pAssign); + + code = tmq_get_topic_assignment(tmq, "tp", &pAssign, &numOfAssign); + if (code != 0) { + printf("error occurs:%s\n", tmq_err2str(code)); + tmq_free_assignment(pAssign); + tmq_consumer_close(tmq); + taos_close(pConn); + fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows); + return; + } + + for(int i = 0; i < numOfAssign; i++){ + printf("assign i:%d, vgId:%d, offset:%lld, start:%lld, end:%lld\n", i, pAssign[i].vgId, pAssign[i].currentOffset, pAssign[i].begin, pAssign[i].end); + } + + tmq_free_assignment(pAssign); + + code = tmq_get_topic_assignment(tmq, "tp", &pAssign, &numOfAssign); + if (code != 0) { + printf("error occurs:%s\n", tmq_err2str(code)); + tmq_free_assignment(pAssign); + tmq_consumer_close(tmq); + taos_close(pConn); + fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows); + return; + } + + for(int i = 0; i < numOfAssign; i++){ + printf("assign i:%d, vgId:%d, offset:%lld, start:%lld, end:%lld\n", i, pAssign[i].vgId, pAssign[i].currentOffset, pAssign[i].begin, pAssign[i].end); + } + + while (1) { + printf("start to poll\n"); + TAOS_RES* pRes = tmq_consumer_poll(tmq, timeout); + if (pRes) { + char buf[128]; + + const char* topicName = tmq_get_topic_name(pRes); +// const char* dbName = tmq_get_db_name(pRes); +// int32_t vgroupId = tmq_get_vgroup_id(pRes); +// +// printf("topic: %s\n", topicName); +// printf("db: %s\n", dbName); +// printf("vgroup id: %d\n", vgroupId); + + printSubResults(pRes, &totalRows); + + tmq_topic_assignment* pAssignTmp = NULL; + int32_t numOfAssignTmp = 0; + + code = tmq_get_topic_assignment(tmq, "tp", &pAssignTmp, &numOfAssignTmp); + if (code != 0) { + printf("error occurs:%s\n", tmq_err2str(code)); + tmq_free_assignment(pAssign); + tmq_consumer_close(tmq); + taos_close(pConn); + fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows); + return; + } + + for(int i = 0; i < numOfAssign; i++){ + printf("assign i:%d, vgId:%d, offset:%lld, start:%lld, end:%lld\n", i, pAssignTmp[i].vgId, pAssignTmp[i].currentOffset, pAssignTmp[i].begin, pAssignTmp[i].end); + } + if(numOfAssign != 0){ + int i = 0; + for(; i < numOfAssign; i++){ + if(pAssign[i].currentOffset != pAssignTmp[i].currentOffset){ + break; + } + } + if(i == numOfAssign){ + printf("all position is same\n"); + break; + } + tmq_free_assignment(pAssign); + } + numOfAssign = numOfAssignTmp; + pAssign = pAssignTmp; + + } else { +// tmq_offset_seek(tmq, "tp", pAssign[0].vgId, pAssign[0].currentOffset); +// tmq_offset_seek(tmq, "tp", pAssign[1].vgId, pAssign[1].currentOffset); +// tmq_commit_sync(tmq, pRes); + continue; + } + +// tmq_commit_sync(tmq, pRes); + if (pRes != NULL) { + taos_free_result(pRes); + // if ((++count) > 1) { + // break; + // } + } else { +// break; + } + +// tmq_offset_seek(tmq, "tp", pAssign[0].vgId, pAssign[0].begin); + } + + tmq_free_assignment(pAssign); + + code = tmq_get_topic_assignment(tmq, "tp", &pAssign, &numOfAssign); + if (code != 0) { + printf("error occurs:%s\n", tmq_err2str(code)); + tmq_free_assignment(pAssign); + tmq_consumer_close(tmq); + taos_close(pConn); + fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows); + return; + } + + for(int i = 0; i < numOfAssign; i++){ + printf("assign i:%d, vgId:%d, offset:%lld, start:%lld, end:%lld\n", i, pAssign[i].vgId, pAssign[i].currentOffset, pAssign[i].begin, pAssign[i].end); + } + + tmq_consumer_close(tmq); + taos_close(pConn); + fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows); +} #pragma GCC diagnostic pop diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c index 247c1729a3..f47f193e17 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c @@ -116,6 +116,8 @@ static void vmProcessFetchQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO int32_t code = vnodeProcessFetchMsg(pVnode->pImpl, pMsg, pInfo); if (code != 0) { + dGError("vnodeProcessFetchMsg vgId:%d, msg:%p failed code:%s", pVnode->vgId, pMsg, tstrerror(code)); + if (terrno != 0) { code = terrno; } diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 4d433042ad..b02debd1a6 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -677,6 +677,9 @@ int32_t tqProcessVgCommittedInfoReq(STQ* pTq, SRpcMsg* pMsg) { tmsgSendRsp(&rsp); +// if(terrno == TSDB_CODE_REF_NOT_EXIST){ +// ASSERT(0); +// } return 0; } @@ -758,6 +761,9 @@ int32_t tqProcessVgWalInfoReq(STQ* pTq, SRpcMsg* pMsg) { tqDoSendDataRsp(&pMsg->info, &dataRsp, req.epoch, req.consumerId, TMQ_MSG_TYPE__WALINFO_RSP, sver, ever); tDeleteMqDataRsp(&dataRsp); +// if(terrno == TSDB_CODE_REF_NOT_EXIST){ +// ASSERT(0); +// } return 0; } diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index 743470aac8..ca1b7ea2f9 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -594,7 +594,7 @@ int32_t vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) { } int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) { - vTrace("vgId:%d, msg:%p in fetch queue is processing", pVnode->config.vgId, pMsg); + vTrace("vgId:%d, msg:%p in fetch queue is processing, terrno:%d", pVnode->config.vgId, pMsg, terrno); if ((pMsg->msgType == TDMT_SCH_FETCH || pMsg->msgType == TDMT_VND_TABLE_META || pMsg->msgType == TDMT_VND_TABLE_CFG || pMsg->msgType == TDMT_VND_BATCH_META) && !syncIsReadyForRead(pVnode->sync)) { From 36e841b6d0079f7d9c8b4b6bc2a7c653786a11c7 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Fri, 4 Aug 2023 18:59:17 +0800 Subject: [PATCH 018/122] fix invalid free --- source/libs/stream/src/streamBackendRocksdb.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/source/libs/stream/src/streamBackendRocksdb.c b/source/libs/stream/src/streamBackendRocksdb.c index 8534f3b0a1..3c75607481 100644 --- a/source/libs/stream/src/streamBackendRocksdb.c +++ b/source/libs/stream/src/streamBackendRocksdb.c @@ -218,11 +218,11 @@ _EXIT: } void streamBackendCleanup(void* arg) { SBackendWrapper* pHandle = (SBackendWrapper*)arg; - RocksdbCfInst** pIter = (RocksdbCfInst**)taosHashIterate(pHandle->cfInst, NULL); + void* pIter = taosHashIterate(pHandle->cfInst, NULL); while (pIter != NULL) { - RocksdbCfInst* inst = *pIter; + RocksdbCfInst* inst = *(RocksdbCfInst**)pIter; destroyRocksdbCfInst(inst); - taosHashIterate(pHandle->cfInst, pIter); + pIter = taosHashIterate(pHandle->cfInst, pIter); } taosHashCleanup(pHandle->cfInst); From 4b137f1ca0e65c7d583d8515fb9aa8f4a0d817cc Mon Sep 17 00:00:00 2001 From: jiacy-jcy <714897623@qq.com> Date: Sun, 6 Aug 2023 23:32:54 +0800 Subject: [PATCH 019/122] fix: mktime on windows platform --- source/os/src/osTime.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/os/src/osTime.c b/source/os/src/osTime.c index 39d1de0437..a9965c57c2 100644 --- a/source/os/src/osTime.c +++ b/source/os/src/osTime.c @@ -368,7 +368,7 @@ int32_t taosGetTimeOfDay(struct timeval *tv) { time_t taosTime(time_t *t) { return time(t); } time_t taosMktime(struct tm *timep) { -#ifdef WINDOWS +#ifdef WINDOWS_STASH struct tm tm1 = {0}; LARGE_INTEGER t; FILETIME f; From 5f02e2ddfb811b6ed3218c28dd127dc1f92ed17a Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Mon, 7 Aug 2023 09:31:29 +0800 Subject: [PATCH 020/122] fix:ref is not there --- source/dnode/mgmt/mgmt_vnode/src/vmWorker.c | 1 + source/dnode/vnode/src/tq/tq.c | 6 ------ source/dnode/vnode/src/vnd/vnodeSvr.c | 2 +- 3 files changed, 2 insertions(+), 7 deletions(-) diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c index f47f193e17..30051f72ed 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c @@ -114,6 +114,7 @@ static void vmProcessFetchQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO const STraceId *trace = &pMsg->info.traceId; dGTrace("vgId:%d, msg:%p get from vnode-fetch queue", pVnode->vgId, pMsg); + terrno = 0; int32_t code = vnodeProcessFetchMsg(pVnode->pImpl, pMsg, pInfo); if (code != 0) { dGError("vnodeProcessFetchMsg vgId:%d, msg:%p failed code:%s", pVnode->vgId, pMsg, tstrerror(code)); diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index b02debd1a6..4d433042ad 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -677,9 +677,6 @@ int32_t tqProcessVgCommittedInfoReq(STQ* pTq, SRpcMsg* pMsg) { tmsgSendRsp(&rsp); -// if(terrno == TSDB_CODE_REF_NOT_EXIST){ -// ASSERT(0); -// } return 0; } @@ -761,9 +758,6 @@ int32_t tqProcessVgWalInfoReq(STQ* pTq, SRpcMsg* pMsg) { tqDoSendDataRsp(&pMsg->info, &dataRsp, req.epoch, req.consumerId, TMQ_MSG_TYPE__WALINFO_RSP, sver, ever); tDeleteMqDataRsp(&dataRsp); -// if(terrno == TSDB_CODE_REF_NOT_EXIST){ -// ASSERT(0); -// } return 0; } diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index ca1b7ea2f9..743470aac8 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -594,7 +594,7 @@ int32_t vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) { } int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) { - vTrace("vgId:%d, msg:%p in fetch queue is processing, terrno:%d", pVnode->config.vgId, pMsg, terrno); + vTrace("vgId:%d, msg:%p in fetch queue is processing", pVnode->config.vgId, pMsg); if ((pMsg->msgType == TDMT_SCH_FETCH || pMsg->msgType == TDMT_VND_TABLE_META || pMsg->msgType == TDMT_VND_TABLE_CFG || pMsg->msgType == TDMT_VND_BATCH_META) && !syncIsReadyForRead(pVnode->sync)) { From 9ecabb30f5566d2560ca1cf12267cdad36d41102 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Mon, 7 Aug 2023 10:27:44 +0800 Subject: [PATCH 021/122] fix:ref is not there --- source/dnode/mgmt/mgmt_vnode/src/vmWorker.c | 2 +- source/dnode/vnode/src/tq/tq.c | 12 ++++++++---- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c index 30051f72ed..4ee9a60ae6 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c @@ -114,7 +114,7 @@ static void vmProcessFetchQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO const STraceId *trace = &pMsg->info.traceId; dGTrace("vgId:%d, msg:%p get from vnode-fetch queue", pVnode->vgId, pMsg); - terrno = 0; +// terrno = 0; int32_t code = vnodeProcessFetchMsg(pVnode->pImpl, pMsg, pInfo); if (code != 0) { dGError("vnodeProcessFetchMsg vgId:%d, msg:%p failed code:%s", pVnode->vgId, pMsg, tstrerror(code)); diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 4d433042ad..0371e979cc 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -646,7 +646,8 @@ int32_t tqProcessVgCommittedInfoReq(STQ* pTq, SRpcMsg* pMsg) { SDecoder decoder; tDecoderInit(&decoder, (uint8_t*)data, len); if (tDecodeMqVgOffset(&decoder, &vgOffset) < 0) { - return TSDB_CODE_OUT_OF_MEMORY; + terrno = TSDB_CODE_OUT_OF_MEMORY; + return terrno; } tDecoderClear(&decoder); @@ -654,19 +655,22 @@ int32_t tqProcessVgCommittedInfoReq(STQ* pTq, SRpcMsg* pMsg) { STqOffset* pOffset = &vgOffset.offset; STqOffset* pSavedOffset = tqOffsetRead(pTq->pOffsetStore, pOffset->subKey); if (pSavedOffset == NULL) { - return TSDB_CODE_TMQ_NO_COMMITTED; + terrno = TSDB_CODE_TMQ_NO_COMMITTED; + return terrno; } vgOffset.offset = *pSavedOffset; int32_t code = 0; tEncodeSize(tEncodeMqVgOffset, &vgOffset, len, code); if (code < 0) { - return TSDB_CODE_INVALID_PARA; + terrno = TSDB_CODE_INVALID_PARA; + return terrno; } void* buf = rpcMallocCont(len); if (buf == NULL) { - return TSDB_CODE_OUT_OF_MEMORY; + terrno = TSDB_CODE_OUT_OF_MEMORY; + return terrno; } SEncoder encoder; tEncoderInit(&encoder, buf, len); From 5daf38c0d56a3be6e25e67807c9218a5d28ed78b Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Mon, 7 Aug 2023 11:29:54 +0800 Subject: [PATCH 022/122] fix:ref is not there --- source/dnode/mgmt/mgmt_vnode/src/vmWorker.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c index 4ee9a60ae6..44a27fb791 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c @@ -114,12 +114,10 @@ static void vmProcessFetchQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO const STraceId *trace = &pMsg->info.traceId; dGTrace("vgId:%d, msg:%p get from vnode-fetch queue", pVnode->vgId, pMsg); -// terrno = 0; + terrno = 0; int32_t code = vnodeProcessFetchMsg(pVnode->pImpl, pMsg, pInfo); if (code != 0) { - dGError("vnodeProcessFetchMsg vgId:%d, msg:%p failed code:%s", pVnode->vgId, pMsg, tstrerror(code)); - - if (terrno != 0) { + if (code == -1 && terrno != 0) { code = terrno; } From 773a9454d513f40ee2820a34cb0958ec4508518a Mon Sep 17 00:00:00 2001 From: slzhou Date: Thu, 27 Jul 2023 09:07:13 +0800 Subject: [PATCH 023/122] enhance: subquery can use expr primary key +/- value as primary key --- source/libs/parser/src/parTranslater.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 8ce68a5c8c..554dc7cce8 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -821,7 +821,19 @@ static bool isPrimaryKeyImpl(SNode* pExpr) { FUNCTION_TYPE_IROWTS == pFunc->funcType) { return true; } - } + } else if (QUERY_NODE_OPERATOR == nodeType(pExpr)) { + SOperatorNode* pOper = (SOperatorNode*)pExpr; + if (OP_TYPE_ADD != pOper->opType && OP_TYPE_SUB != pOper->opType) { + return false; + } + if (!isPrimaryKeyImpl(pOper->pLeft)) { + return false; + } + if (QUERY_NODE_VALUE != nodeType(pOper->pRight)) { + return false; + } + return true; + } return false; } From 3e7187b2229f5705429446a0d5d3e90f4a33258c Mon Sep 17 00:00:00 2001 From: slzhou Date: Fri, 4 Aug 2023 14:17:18 +0800 Subject: [PATCH 024/122] fix: add test case --- tests/parallel_test/cases.task | 1 + tests/script/tsim/query/join_pk.sim | 42 +++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+) create mode 100644 tests/script/tsim/query/join_pk.sim diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 586425ec1d..7a1e8d61c8 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -954,6 +954,7 @@ ,,n,script,./test.sh -f tsim/query/udfpy.sim ,,y,script,./test.sh -f tsim/query/udf_with_const.sim ,,y,script,./test.sh -f tsim/query/join_interval.sim +,,y,script,./test.sh -f tsim/query/join_pk.sim ,,y,script,./test.sh -f tsim/query/unionall_as_table.sim ,,y,script,./test.sh -f tsim/query/multi_order_by.sim ,,y,script,./test.sh -f tsim/query/sys_tbname.sim diff --git a/tests/script/tsim/query/join_pk.sim b/tests/script/tsim/query/join_pk.sim new file mode 100644 index 0000000000..66bb20da24 --- /dev/null +++ b/tests/script/tsim/query/join_pk.sim @@ -0,0 +1,42 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sql connect + +sql create database test; +sql use test; +sql create table st(ts timestamp, f int) tags(t int); +sql insert into ct1 using st tags(1) values(now, 0)(now+1s, 1) +sql insert into ct2 using st tags(2) values(now+2s, 2)(now+3s, 3) +sql select * from (select _wstart - 1s as ts, count(*) as num1 from st interval(1s)) as t1 inner join (select _wstart as ts, count(*) as num2 from st interval(1s)) as t2 on t1.ts = t2.ts + +if $rows != 3 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data11 != 1 then + return -1 +endi + +if $data21 != 1 then + return -1 +endi +if $data03 != 1 then + return -1 +endi + +if $data13 != 1 then + return -1 +endi +if $data23 != 1 then + return -1 +endi +sql select * from (select _wstart - 1d as ts, count(*) as num1 from st interval(1s)) as t1 inner join (select _wstart as ts, count(*) as num2 from st interval(1s)) as t2 on t1.ts = t2.ts + +sql select * from (select _wstart + 1a as ts, count(*) as num1 from st interval(1s)) as t1 inner join (select _wstart as ts, count(*) as num2 from st interval(1s)) as t2 on t1.ts = t2.ts + +sql_error select * from (select _wstart * 3 as ts, count(*) as num1 from st interval(1s)) as t1 inner join (select _wstart as ts, count(*) as num2 from st interval(1s)) as t2 on t1.ts = t2.ts +#system sh/exec.sh -n dnode1 -s stop -x SIGINT + From 6cccc155eb547336325f04c2acf611cef399e256 Mon Sep 17 00:00:00 2001 From: slzhou Date: Mon, 7 Aug 2023 13:39:30 +0800 Subject: [PATCH 025/122] enhance: enhance test case --- tests/script/tsim/query/join_pk.sim | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tests/script/tsim/query/join_pk.sim b/tests/script/tsim/query/join_pk.sim index 66bb20da24..da5c13e9c0 100644 --- a/tests/script/tsim/query/join_pk.sim +++ b/tests/script/tsim/query/join_pk.sim @@ -38,5 +38,18 @@ sql select * from (select _wstart - 1d as ts, count(*) as num1 from st interval( sql select * from (select _wstart + 1a as ts, count(*) as num1 from st interval(1s)) as t1 inner join (select _wstart as ts, count(*) as num2 from st interval(1s)) as t2 on t1.ts = t2.ts sql_error select * from (select _wstart * 3 as ts, count(*) as num1 from st interval(1s)) as t1 inner join (select _wstart as ts, count(*) as num2 from st interval(1s)) as t2 on t1.ts = t2.ts +sql create table sst(ts timestamp, ts2 timestamp, f int) tags(t int); +sql insert into sct1 using sst tags(1) values('2023-08-07 13:30:56', '2023-08-07 13:30:56', 0)('2023-08-07 13:30:57', '2023-08-07 13:30:57', 1) +sql insert into sct2 using sst tags(2) values('2023-08-07 13:30:58', '2023-08-07 13:30:58', 2)('2023-08-07 13:30:59', '2023-08-07 13:30:59', 3) +sql select * from (select ts - 1s as jts from sst) as t1 inner join (select ts-1s as jts from sst) as t2 on t1.jts = t2.jts +if $rows != 4 then + return -1 +endi +sql select * from (select ts - 1s as jts from sst) as t1 inner join (select ts as jts from sst) as t2 on t1.jts = t2.jts +if $rows != 3 then + return -1 +endi +sql_error select * from (select ts2 - 1s as jts from sst) as t1 inner join (select ts2 as jts from sst) as t2 on t1.jts = t2.jts + #system sh/exec.sh -n dnode1 -s stop -x SIGINT From fac7e521e957ae1aaa279af48c0acd04c646817b Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Mon, 7 Aug 2023 15:59:37 +0800 Subject: [PATCH 026/122] s3/evict: fetch atime from stat file --- include/os/osFile.h | 2 +- source/dnode/mgmt/mgmt_mnode/src/mmFile.c | 4 +- source/dnode/mgmt/mgmt_vnode/src/vmFile.c | 2 +- source/dnode/mgmt/node_util/src/dmEps.c | 6 +- source/dnode/mgmt/node_util/src/dmFile.c | 2 +- source/dnode/vnode/src/inc/vndCos.h | 3 +- source/dnode/vnode/src/tq/tqOffsetSnapshot.c | 2 +- source/dnode/vnode/src/tsdb/tsdbFS.c | 10 +- .../dnode/vnode/src/tsdb/tsdbReaderWriter.c | 4 +- source/dnode/vnode/src/vnd/vnodeCos.c | 66 +++++- source/libs/function/src/udfd.c | 8 +- source/libs/index/src/indexFstFile.c | 4 +- source/libs/parser/src/parTranslater.c | 207 +++++++++--------- source/libs/sync/src/syncRaftStore.c | 2 +- source/libs/wal/src/walMeta.c | 12 +- source/os/src/osFile.c | 28 ++- source/util/src/tlog.c | 60 ++--- tools/shell/src/shellEngine.c | 100 ++++----- utils/test/c/tmqDemo.c | 3 +- 19 files changed, 300 insertions(+), 225 deletions(-) diff --git a/include/os/osFile.h b/include/os/osFile.h index 0e93002706..da1f8f8b57 100644 --- a/include/os/osFile.h +++ b/include/os/osFile.h @@ -76,7 +76,7 @@ int32_t taosUnLockFile(TdFilePtr pFile); int32_t taosUmaskFile(int32_t maskVal); -int32_t taosStatFile(const char *path, int64_t *size, int32_t *mtime); +int32_t taosStatFile(const char *path, int64_t *size, int32_t *mtime, int32_t *atime); int32_t taosDevInoFile(TdFilePtr pFile, int64_t *stDev, int64_t *stIno); int32_t taosFStatFile(TdFilePtr pFile, int64_t *size, int32_t *mtime); bool taosCheckExistFile(const char *pathname); diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmFile.c b/source/dnode/mgmt/mgmt_mnode/src/mmFile.c index cb0849f4b9..64e18ef06d 100644 --- a/source/dnode/mgmt/mgmt_mnode/src/mmFile.c +++ b/source/dnode/mgmt/mgmt_mnode/src/mmFile.c @@ -46,7 +46,7 @@ static int32_t mmDecodeOption(SJson *pJson, SMnodeOpt *pOption) { if (code < 0) return -1; tjsonGetInt32ValueFromDouble(replica, "role", pOption->nodeRoles[i], code); if (code < 0) return -1; - if(pOption->nodeRoles[i] == TAOS_SYNC_ROLE_VOTER){ + if (pOption->nodeRoles[i] == TAOS_SYNC_ROLE_VOTER) { pOption->numOfReplicas++; } } @@ -65,7 +65,7 @@ int32_t mmReadFile(const char *path, SMnodeOpt *pOption) { char file[PATH_MAX] = {0}; snprintf(file, sizeof(file), "%s%smnode.json", path, TD_DIRSEP); - if (taosStatFile(file, NULL, NULL) < 0) { + if (taosStatFile(file, NULL, NULL, NULL) < 0) { dInfo("mnode file:%s not exist", file); return 0; } diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c index da7f4d4a56..ed32e75d18 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c @@ -97,7 +97,7 @@ int32_t vmGetVnodeListFromFile(SVnodeMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t SWrapperCfg *pCfgs = NULL; snprintf(file, sizeof(file), "%s%svnodes.json", pMgmt->path, TD_DIRSEP); - if (taosStatFile(file, NULL, NULL) < 0) { + if (taosStatFile(file, NULL, NULL, NULL) < 0) { dInfo("vnode file:%s not exist", file); return 0; } diff --git a/source/dnode/mgmt/node_util/src/dmEps.c b/source/dnode/mgmt/node_util/src/dmEps.c index 1564a09035..88f6b5da40 100644 --- a/source/dnode/mgmt/node_util/src/dmEps.c +++ b/source/dnode/mgmt/node_util/src/dmEps.c @@ -100,7 +100,7 @@ int32_t dmReadEps(SDnodeData *pData) { goto _OVER; } - if (taosStatFile(file, NULL, NULL) < 0) { + if (taosStatFile(file, NULL, NULL, NULL) < 0) { dInfo("dnode file:%s not exist", file); code = 0; goto _OVER; @@ -350,7 +350,7 @@ void dmRotateMnodeEpSet(SDnodeData *pData) { } void dmGetMnodeEpSetForRedirect(SDnodeData *pData, SRpcMsg *pMsg, SEpSet *pEpSet) { - if(!pData->validMnodeEps) return; + if (!pData->validMnodeEps) return; dmGetMnodeEpSet(pData, pEpSet); dTrace("msg is redirected, handle:%p num:%d use:%d", pMsg->info.handle, pEpSet->numOfEps, pEpSet->inUse); for (int32_t i = 0; i < pEpSet->numOfEps; ++i) { @@ -469,7 +469,7 @@ static int32_t dmReadDnodePairs(SDnodeData *pData) { char file[PATH_MAX] = {0}; snprintf(file, sizeof(file), "%s%sdnode%sep.json", tsDataDir, TD_DIRSEP, TD_DIRSEP); - if (taosStatFile(file, NULL, NULL) < 0) { + if (taosStatFile(file, NULL, NULL, NULL) < 0) { dDebug("dnode file:%s not exist", file); code = 0; goto _OVER; diff --git a/source/dnode/mgmt/node_util/src/dmFile.c b/source/dnode/mgmt/node_util/src/dmFile.c index fb05f08c0c..c81efddcc1 100644 --- a/source/dnode/mgmt/node_util/src/dmFile.c +++ b/source/dnode/mgmt/node_util/src/dmFile.c @@ -38,7 +38,7 @@ int32_t dmReadFile(const char *path, const char *name, bool *pDeployed) { char file[PATH_MAX] = {0}; snprintf(file, sizeof(file), "%s%s%s.json", path, TD_DIRSEP, name); - if (taosStatFile(file, NULL, NULL) < 0) { + if (taosStatFile(file, NULL, NULL, NULL) < 0) { dInfo("file:%s not exist", file); code = 0; goto _OVER; diff --git a/source/dnode/vnode/src/inc/vndCos.h b/source/dnode/vnode/src/inc/vndCos.h index 6e0984c400..f6db7f096e 100644 --- a/source/dnode/vnode/src/inc/vndCos.h +++ b/source/dnode/vnode/src/inc/vndCos.h @@ -30,8 +30,9 @@ void s3PutObjectFromFile(const char *file, const char *object); void s3DeleteObjects(const char *object_name[], int nobject); bool s3Exists(const char *object_name); bool s3Get(const char *object_name, const char *path); -void s3EvictCache(); +void s3EvictCache(const char *path, long object_size); long s3Size(const char *object_name); + #ifdef __cplusplus } #endif diff --git a/source/dnode/vnode/src/tq/tqOffsetSnapshot.c b/source/dnode/vnode/src/tq/tqOffsetSnapshot.c index a4428aed43..6a66da30c6 100644 --- a/source/dnode/vnode/src/tq/tqOffsetSnapshot.c +++ b/source/dnode/vnode/src/tq/tqOffsetSnapshot.c @@ -60,7 +60,7 @@ int32_t tqOffsetSnapRead(STqOffsetReader* pReader, uint8_t** ppData) { } int64_t sz = 0; - if (taosStatFile(fname, &sz, NULL) < 0) { + if (taosStatFile(fname, &sz, NULL, NULL) < 0) { taosCloseFile(&pFile); taosMemoryFree(fname); return -1; diff --git a/source/dnode/vnode/src/tsdb/tsdbFS.c b/source/dnode/vnode/src/tsdb/tsdbFS.c index ec116c717e..c0c74d6b87 100644 --- a/source/dnode/vnode/src/tsdb/tsdbFS.c +++ b/source/dnode/vnode/src/tsdb/tsdbFS.c @@ -176,7 +176,7 @@ static int32_t tsdbScanAndTryFixFS(STsdb *pTsdb) { // SDelFile if (pTsdb->fs.pDelFile) { tsdbDelFileName(pTsdb, pTsdb->fs.pDelFile, fname); - if (taosStatFile(fname, &size, NULL)) { + if (taosStatFile(fname, &size, NULL, NULL)) { code = TAOS_SYSTEM_ERROR(errno); TSDB_CHECK_CODE(code, lino, _exit); } @@ -195,7 +195,7 @@ static int32_t tsdbScanAndTryFixFS(STsdb *pTsdb) { // head ========= tsdbHeadFileName(pTsdb, pSet->diskId, pSet->fid, pSet->pHeadF, fname); - if (taosStatFile(fname, &size, NULL)) { + if (taosStatFile(fname, &size, NULL, NULL)) { code = TAOS_SYSTEM_ERROR(errno); TSDB_CHECK_CODE(code, lino, _exit); } @@ -206,7 +206,7 @@ static int32_t tsdbScanAndTryFixFS(STsdb *pTsdb) { // data ========= tsdbDataFileName(pTsdb, pSet->diskId, pSet->fid, pSet->pDataF, fname); - if (taosStatFile(fname, &size, NULL)) { + if (taosStatFile(fname, &size, NULL, NULL)) { code = TAOS_SYSTEM_ERROR(errno); TSDB_CHECK_CODE(code, lino, _exit); } @@ -221,7 +221,7 @@ static int32_t tsdbScanAndTryFixFS(STsdb *pTsdb) { // sma ============= tsdbSmaFileName(pTsdb, pSet->diskId, pSet->fid, pSet->pSmaF, fname); - if (taosStatFile(fname, &size, NULL)) { + if (taosStatFile(fname, &size, NULL, NULL)) { code = TAOS_SYSTEM_ERROR(errno); TSDB_CHECK_CODE(code, lino, _exit); } @@ -237,7 +237,7 @@ static int32_t tsdbScanAndTryFixFS(STsdb *pTsdb) { // stt =========== for (int32_t iStt = 0; iStt < pSet->nSttF; iStt++) { tsdbSttFileName(pTsdb, pSet->diskId, pSet->fid, pSet->aSttF[iStt], fname); - if (taosStatFile(fname, &size, NULL)) { + if (taosStatFile(fname, &size, NULL, NULL)) { code = TAOS_SYSTEM_ERROR(errno); TSDB_CHECK_CODE(code, lino, _exit); } diff --git a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c index 872042d9d5..4d3b53bc5a 100644 --- a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c +++ b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c @@ -38,7 +38,7 @@ int32_t tsdbOpenFile(const char *path, int32_t szPage, int32_t flag, STsdbFD **p const char *object_name = taosDirEntryBaseName((char *)path); long s3_size = s3Size(object_name); if (!strncmp(path + strlen(path) - 5, ".data", 5) && s3_size > 0) { - s3EvictCache(); + s3EvictCache(path, s3_size); s3Get(object_name, path); pFD->pFD = taosOpenFile(path, flag); @@ -66,7 +66,7 @@ int32_t tsdbOpenFile(const char *path, int32_t szPage, int32_t flag, STsdbFD **p // not check file size when reading data files. if (flag != TD_FILE_READ) { - if (taosStatFile(path, &pFD->szFile, NULL) < 0) { + if (taosStatFile(path, &pFD->szFile, NULL, NULL) < 0) { code = TAOS_SYSTEM_ERROR(errno); taosMemoryFree(pFD->pBuf); taosCloseFile(&pFD->pFD); diff --git a/source/dnode/vnode/src/vnd/vnodeCos.c b/source/dnode/vnode/src/vnd/vnodeCos.c index a7b166b6c7..bac38f7c35 100644 --- a/source/dnode/vnode/src/vnd/vnodeCos.c +++ b/source/dnode/vnode/src/vnd/vnodeCos.c @@ -190,7 +190,71 @@ bool s3Get(const char *object_name, const char *path) { return ret; } -void s3EvictCache() {} +typedef struct { + int64_t size; + int32_t atime; + char name[TSDB_FILENAME_LEN]; +} SEvictFile; + +static int32_t evictFileCompareAsce(const void *pLeft, const void *pRight) { + SEvictFile *lhs = (SEvictFile *)pLeft; + SEvictFile *rhs = (SEvictFile *)pRight; + return lhs->atime < rhs->atime ? -1 : 1; +} + +void s3EvictCache(const char *path, long object_size) { + SDiskSize disk_size = {0}; + if (taosGetDiskSize((char *)path, &disk_size) < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + vError("failed to get disk:%s size since %s", path, terrstr()); + return; + } + + if (object_size >= disk_size.avail + 1 << 30) { + // evict too old files + // 1, list data files' atime under dir(path) + char dir_name[TSDB_FILENAME_LEN] = "\0"; + tstrncpy(dir_name, path, TSDB_FILENAME_LEN); + taosDirName(dir_name); + + tdbDirPtr pDir = taosOpenDir(dir_name); + if (pDir == NULL) { + terrno = TAOS_SYSTEM_ERROR(errno); + vError("failed to open %s since %s", dir_name, terrstr()); + } + SArray *evict_files = taosArrayInit(16, sizeof(SEvictFile)); + tdbDirEntryPtr pDirEntry; + while ((pDirEntry = taosReadDir(pDir)) != NULL) { + char *name = taosGetDirEntryName(pDirEntry); + if (!strncmp(name + strlen(name) - 5, ".data", 5)) { + SEvictFile e_file = {0}; + + tstrncpy(e_file.name, name, TSDB_FILENAME_LEN); + taosStatFile(name, &e_file.size, NULL, &e_file.atime); + + taosArrayPush(evict_files, &e_file); + } + } + taosCloseDir(&pDir); + + // 2, sort by atime + taosArraySort(evict_files, evictFileCompareAsce); + + // 3, remove files ascendingly until we get enough object_size space + long evict_size = 0; + size_t ef_size = TARRAY_SIZE(evict_files); + for (size_t i = 0; i < ef_size; ++i) { + SEvictFile *evict_file = taosArrayGet(evict_files, i); + taosRemoveFile(evict_file->name); + evict_size += evict_file->size; + if (evict_size >= object_size) { + break; + } + } + + taosArrayDestroy(evict_files); + } +} long s3Size(const char *object_name) { long size = 0; diff --git a/source/libs/function/src/udfd.c b/source/libs/function/src/udfd.c index 7371017111..575bce09bb 100644 --- a/source/libs/function/src/udfd.c +++ b/source/libs/function/src/udfd.c @@ -378,9 +378,9 @@ int32_t udfdInitializePythonPlugin(SUdfScriptPlugin *plugin) { "pyUdfDestroy", "pyUdfScalarProc", "pyUdfAggStart", "pyUdfAggFinish", "pyUdfAggProc", "pyUdfAggMerge"}; void **funcs[UDFD_MAX_PLUGIN_FUNCS] = { - (void **)&plugin->openFunc, (void **)&plugin->closeFunc, (void **)&plugin->udfInitFunc, - (void **)&plugin->udfDestroyFunc, (void **)&plugin->udfScalarProcFunc, (void **)&plugin->udfAggStartFunc, - (void **)&plugin->udfAggFinishFunc, (void **)&plugin->udfAggProcFunc, (void **)&plugin->udfAggMergeFunc}; + (void **)&plugin->openFunc, (void **)&plugin->closeFunc, (void **)&plugin->udfInitFunc, + (void **)&plugin->udfDestroyFunc, (void **)&plugin->udfScalarProcFunc, (void **)&plugin->udfAggStartFunc, + (void **)&plugin->udfAggFinishFunc, (void **)&plugin->udfAggProcFunc, (void **)&plugin->udfAggMergeFunc}; int32_t err = udfdLoadSharedLib(plugin->libPath, &plugin->lib, funcName, funcs, UDFD_MAX_PLUGIN_FUNCS); if (err != 0) { fnError("can not load python plugin. lib path %s", plugin->libPath); @@ -848,7 +848,7 @@ int32_t udfdSaveFuncBodyToFile(SFuncInfo *pFuncInfo, SUdf *udf) { char path[PATH_MAX] = {0}; udfdGetFuncBodyPath(udf, path); - bool fileExist = !(taosStatFile(path, NULL, NULL) < 0); + bool fileExist = !(taosStatFile(path, NULL, NULL, NULL) < 0); if (fileExist) { strncpy(udf->path, path, PATH_MAX); fnInfo("udfd func body file. reuse existing file %s", path); diff --git a/source/libs/index/src/indexFstFile.c b/source/libs/index/src/indexFstFile.c index e18d0bbad3..43f15f5196 100644 --- a/source/libs/index/src/indexFstFile.c +++ b/source/libs/index/src/indexFstFile.c @@ -162,7 +162,7 @@ static FORCE_INLINE int idxFileCtxGetSize(IFileCtx* ctx) { return ctx->offset; } else { int64_t file_size = 0; - taosStatFile(ctx->file.buf, &file_size, NULL); + taosStatFile(ctx->file.buf, &file_size, NULL, NULL); return (int)file_size; } } @@ -199,7 +199,7 @@ IFileCtx* idxFileCtxCreate(WriterType type, const char* path, bool readOnly, int code = taosFtruncateFile(ctx->file.pFile, 0); UNUSED(code); - code = taosStatFile(path, &ctx->file.size, NULL); + code = taosStatFile(path, &ctx->file.size, NULL, NULL); UNUSED(code); ctx->file.wBufOffset = 0; diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 8ce68a5c8c..6845496a03 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -31,8 +31,8 @@ #define SYSTABLE_SHOW_TYPE_OFFSET QUERY_NODE_SHOW_DNODES_STMT typedef struct SRewriteTbNameContext { - int32_t errCode; - char* pTbName; + int32_t errCode; + char* pTbName; } SRewriteTbNameContext; typedef struct STranslateContext { @@ -54,7 +54,7 @@ typedef struct STranslateContext { bool stableQuery; bool showRewrite; SNode* pPrevRoot; - SNode* pPostRoot; + SNode* pPostRoot; } STranslateContext; typedef struct SBuildTopicContext { @@ -278,10 +278,11 @@ static const SSysTableShowAdapter sysTableShowAdapter[] = { static int32_t translateSubquery(STranslateContext* pCxt, SNode* pNode); static int32_t translateQuery(STranslateContext* pCxt, SNode* pNode); static EDealRes translateValue(STranslateContext* pCxt, SValueNode* pVal); -static int32_t createSimpleSelectStmtFromProjList(const char* pDb, const char* pTable, SNodeList* pProjectionList, SSelectStmt** pStmt); -static int32_t createLastTsSelectStmt(char* pDb, char* pTable, STableMeta* pMeta, SNode** pQuery); -static int32_t setQuery(STranslateContext* pCxt, SQuery* pQuery); -static int32_t setRefreshMate(STranslateContext* pCxt, SQuery* pQuery); +static int32_t createSimpleSelectStmtFromProjList(const char* pDb, const char* pTable, SNodeList* pProjectionList, + SSelectStmt** pStmt); +static int32_t createLastTsSelectStmt(char* pDb, char* pTable, STableMeta* pMeta, SNode** pQuery); +static int32_t setQuery(STranslateContext* pCxt, SQuery* pQuery); +static int32_t setRefreshMate(STranslateContext* pCxt, SQuery* pQuery); static bool afterGroupBy(ESqlClause clause) { return clause > SQL_CLAUSE_GROUP_BY; } @@ -772,7 +773,8 @@ static SNodeList* getProjectList(const SNode* pNode) { static bool isTimeLineQuery(SNode* pStmt) { if (QUERY_NODE_SELECT_STMT == nodeType(pStmt)) { - return (TIME_LINE_MULTI == ((SSelectStmt*)pStmt)->timeLineResMode) || (TIME_LINE_GLOBAL == ((SSelectStmt*)pStmt)->timeLineResMode); + return (TIME_LINE_MULTI == ((SSelectStmt*)pStmt)->timeLineResMode) || + (TIME_LINE_GLOBAL == ((SSelectStmt*)pStmt)->timeLineResMode); } else if (QUERY_NODE_SET_OPERATOR == nodeType(pStmt)) { return TIME_LINE_GLOBAL == ((SSetOperator*)pStmt)->timeLineResMode; } else { @@ -791,7 +793,7 @@ static bool isGlobalTimeLineQuery(SNode* pStmt) { } static bool isTimeLineAlignedQuery(SNode* pStmt) { - SSelectStmt *pSelect = (SSelectStmt *)pStmt; + SSelectStmt* pSelect = (SSelectStmt*)pStmt; if (isGlobalTimeLineQuery(((STempTableNode*)pSelect->pFromTable)->pSubquery)) { return true; } @@ -801,7 +803,7 @@ static bool isTimeLineAlignedQuery(SNode* pStmt) { if (QUERY_NODE_SELECT_STMT != nodeType(((STempTableNode*)pSelect->pFromTable)->pSubquery)) { return false; } - SSelectStmt *pSub = (SSelectStmt *)((STempTableNode*)pSelect->pFromTable)->pSubquery; + SSelectStmt* pSub = (SSelectStmt*)((STempTableNode*)pSelect->pFromTable)->pSubquery; if (nodesListMatch(pSelect->pPartitionByList, pSub->pPartitionByList)) { return true; } @@ -1394,7 +1396,7 @@ static bool isCountStar(SFunctionNode* pFunc) { } static int32_t rewriteCountStarAsCount1(STranslateContext* pCxt, SFunctionNode* pCount) { - int32_t code = TSDB_CODE_SUCCESS; + int32_t code = TSDB_CODE_SUCCESS; SValueNode* pVal = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE); if (NULL == pVal) { return TSDB_CODE_OUT_OF_MEMORY; @@ -1596,9 +1598,11 @@ static int32_t translateInterpFunc(STranslateContext* pCxt, SFunctionNode* pFunc return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC); } - if (pSelect->hasInterpFunc && (FUNC_RETURN_ROWS_INDEFINITE == pSelect->returnRows || pSelect->returnRows != fmGetFuncReturnRows(pFunc))) { + if (pSelect->hasInterpFunc && + (FUNC_RETURN_ROWS_INDEFINITE == pSelect->returnRows || pSelect->returnRows != fmGetFuncReturnRows(pFunc))) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC, - "%s ignoring null value options cannot be used when applying to multiple columns", pFunc->functionName); + "%s ignoring null value options cannot be used when applying to multiple columns", + pFunc->functionName); } if (NULL != pSelect->pWindow || NULL != pSelect->pGroupByList) { @@ -1636,7 +1640,8 @@ static int32_t translateTimelineFunc(STranslateContext* pCxt, SFunctionNode* pFu } SSelectStmt* pSelect = (SSelectStmt*)pCxt->pCurrStmt; if (NULL != pSelect->pFromTable && QUERY_NODE_TEMP_TABLE == nodeType(pSelect->pFromTable) && - !isGlobalTimeLineQuery(((STempTableNode*)pSelect->pFromTable)->pSubquery) && !isTimeLineAlignedQuery(pCxt->pCurrStmt)) { + !isGlobalTimeLineQuery(((STempTableNode*)pSelect->pFromTable)->pSubquery) && + !isTimeLineAlignedQuery(pCxt->pCurrStmt)) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC, "%s function requires valid time series input", pFunc->functionName); } @@ -1706,8 +1711,8 @@ static int32_t translateForbidSysTableFunc(STranslateContext* pCxt, SFunctionNod return TSDB_CODE_SUCCESS; } - SSelectStmt* pSelect = (SSelectStmt*)pCxt->pCurrStmt; - SNode* pTable = pSelect->pFromTable; + SSelectStmt* pSelect = (SSelectStmt*)pCxt->pCurrStmt; + SNode* pTable = pSelect->pFromTable; if (NULL != pTable && QUERY_NODE_REAL_TABLE == nodeType(pTable) && TSDB_SYSTEM_TABLE == ((SRealTableNode*)pTable)->pMeta->tableType) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED_FUNC, pFunc->functionName); @@ -2296,7 +2301,8 @@ static EDealRes doCheckExprForGroupBy(SNode** pNode, void* pContext) { } } if (isScanPseudoColumnFunc(*pNode) || QUERY_NODE_COLUMN == nodeType(*pNode)) { - if (pSelect->selectFuncNum > 1 || pSelect->hasOtherVectorFunc || !pSelect->hasSelectFunc || (isDistinctOrderBy(pCxt) && pCxt->currClause == SQL_CLAUSE_ORDER_BY)) { + if (pSelect->selectFuncNum > 1 || pSelect->hasOtherVectorFunc || !pSelect->hasSelectFunc || + (isDistinctOrderBy(pCxt) && pCxt->currClause == SQL_CLAUSE_ORDER_BY)) { return generateDealNodeErrMsg(pCxt, getGroupByErrorCode(pCxt), ((SExprNode*)(*pNode))->userAlias); } else { return rewriteColToSelectValFunc(pCxt, pNode); @@ -2391,14 +2397,14 @@ static int32_t checkHavingGroupBy(STranslateContext* pCxt, SSelectStmt* pSelect) if (NULL != pSelect->pHaving) { code = checkExprForGroupBy(pCxt, &pSelect->pHaving); } -/* - if (TSDB_CODE_SUCCESS == code && NULL != pSelect->pProjectionList) { - code = checkExprListForGroupBy(pCxt, pSelect, pSelect->pProjectionList); - } - if (TSDB_CODE_SUCCESS == code && NULL != pSelect->pOrderByList) { - code = checkExprListForGroupBy(pCxt, pSelect, pSelect->pOrderByList); - } -*/ + /* + if (TSDB_CODE_SUCCESS == code && NULL != pSelect->pProjectionList) { + code = checkExprListForGroupBy(pCxt, pSelect, pSelect->pProjectionList); + } + if (TSDB_CODE_SUCCESS == code && NULL != pSelect->pOrderByList) { + code = checkExprListForGroupBy(pCxt, pSelect, pSelect->pOrderByList); + } + */ return code; } @@ -2657,10 +2663,10 @@ static int32_t setTableCacheLastMode(STranslateContext* pCxt, SSelectStmt* pSele static EDealRes doTranslateTbName(SNode** pNode, void* pContext) { switch (nodeType(*pNode)) { case QUERY_NODE_FUNCTION: { - SFunctionNode *pFunc = (SFunctionNode *)*pNode; + SFunctionNode* pFunc = (SFunctionNode*)*pNode; if (FUNCTION_TYPE_TBNAME == pFunc->funcType) { - SRewriteTbNameContext *pCxt = (SRewriteTbNameContext*)pContext; - SValueNode* pVal = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE); + SRewriteTbNameContext* pCxt = (SRewriteTbNameContext*)pContext; + SValueNode* pVal = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE); if (NULL == pVal) { pCxt->errCode = TSDB_CODE_OUT_OF_MEMORY; return DEAL_RES_ERROR; @@ -2699,11 +2705,12 @@ static int32_t replaceTbName(STranslateContext* pCxt, SSelectStmt* pSelect) { } SRealTableNode* pTable = (SRealTableNode*)pSelect->pFromTable; - if (TSDB_CHILD_TABLE != pTable->pMeta->tableType && TSDB_NORMAL_TABLE != pTable->pMeta->tableType && TSDB_SYSTEM_TABLE != pTable->pMeta->tableType) { + if (TSDB_CHILD_TABLE != pTable->pMeta->tableType && TSDB_NORMAL_TABLE != pTable->pMeta->tableType && + TSDB_SYSTEM_TABLE != pTable->pMeta->tableType) { return TSDB_CODE_SUCCESS; } - SNode** pNode = NULL; + SNode** pNode = NULL; SRewriteTbNameContext pRewriteCxt = {0}; pRewriteCxt.pTbName = pTable->table.tableName; @@ -3110,7 +3117,8 @@ static int32_t convertFillValue(STranslateContext* pCxt, SDataType dt, SNodeList code = scalarCalculateConstants(pCastFunc, &pCell->pNode); } if (TSDB_CODE_SUCCESS == code && QUERY_NODE_VALUE != nodeType(pCell->pNode)) { - code = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Fill value can only accept constant"); + code = + generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Fill value can only accept constant"); } else if (TSDB_CODE_SUCCESS != code) { code = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Filled data type mismatch"); } @@ -3576,7 +3584,6 @@ static int32_t createDefaultEveryNode(STranslateContext* pCxt, SNode** pOutput) pEvery->isDuration = true; pEvery->literal = taosStrdup("1s"); - *pOutput = (SNode*)pEvery; return TSDB_CODE_SUCCESS; } @@ -3671,15 +3678,15 @@ static int32_t translateInterp(STranslateContext* pCxt, SSelectStmt* pSelect) { static int32_t translatePartitionBy(STranslateContext* pCxt, SSelectStmt* pSelect) { pCxt->currClause = SQL_CLAUSE_PARTITION_BY; int32_t code = TSDB_CODE_SUCCESS; - + if (pSelect->pPartitionByList) { int8_t typeType = getTableTypeFromTableNode(pSelect->pFromTable); SNode* pPar = nodesListGetNode(pSelect->pPartitionByList, 0); - if (!((TSDB_NORMAL_TABLE == typeType || TSDB_CHILD_TABLE == typeType) && - 1 == pSelect->pPartitionByList->length && (QUERY_NODE_FUNCTION == nodeType(pPar) && FUNCTION_TYPE_TBNAME == ((SFunctionNode*)pPar)->funcType))) { + if (!((TSDB_NORMAL_TABLE == typeType || TSDB_CHILD_TABLE == typeType) && 1 == pSelect->pPartitionByList->length && + (QUERY_NODE_FUNCTION == nodeType(pPar) && FUNCTION_TYPE_TBNAME == ((SFunctionNode*)pPar)->funcType))) { pSelect->timeLineResMode = TIME_LINE_MULTI; } - + code = translateExprList(pCxt, pSelect->pPartitionByList); } if (TSDB_CODE_SUCCESS == code) { @@ -3943,9 +3950,9 @@ static int32_t translateSetOperProject(STranslateContext* pCxt, SSetOperator* pS } snprintf(pRightExpr->aliasName, sizeof(pRightExpr->aliasName), "%s", pLeftExpr->aliasName); SNode* pProj = createSetOperProject(pSetOperator->stmtName, pLeft); - if (QUERY_NODE_COLUMN == nodeType(pLeft) && QUERY_NODE_COLUMN == nodeType(pRight) - && ((SColumnNode*)pLeft)->colId == PRIMARYKEY_TIMESTAMP_COL_ID - && ((SColumnNode*)pRight)->colId == PRIMARYKEY_TIMESTAMP_COL_ID) { + if (QUERY_NODE_COLUMN == nodeType(pLeft) && QUERY_NODE_COLUMN == nodeType(pRight) && + ((SColumnNode*)pLeft)->colId == PRIMARYKEY_TIMESTAMP_COL_ID && + ((SColumnNode*)pRight)->colId == PRIMARYKEY_TIMESTAMP_COL_ID) { ((SColumnNode*)pProj)->colId = PRIMARYKEY_TIMESTAMP_COL_ID; } if (TSDB_CODE_SUCCESS != nodesListMakeStrictAppend(&pSetOperator->pProjectionList, pProj)) { @@ -5725,7 +5732,6 @@ static int32_t translateRestoreDnode(STranslateContext* pCxt, SRestoreComponentN return buildCmdMsg(pCxt, TDMT_MND_RESTORE_DNODE, (FSerializeFunc)tSerializeSRestoreDnodeReq, &restoreReq); } - static int32_t getSmaIndexDstVgId(STranslateContext* pCxt, const char* pDbName, const char* pTableName, int32_t* pVgId) { SVgroupInfo vg = {0}; @@ -5853,7 +5859,7 @@ static int32_t checkCreateSmaIndex(STranslateContext* pCxt, SCreateIndexStmt* pS } static int32_t translateCreateSmaIndex(STranslateContext* pCxt, SCreateIndexStmt* pStmt) { - int32_t code = checkCreateSmaIndex(pCxt, pStmt); + int32_t code = checkCreateSmaIndex(pCxt, pStmt); pStmt->pReq = taosMemoryCalloc(1, sizeof(SMCreateSmaReq)); if (pStmt->pReq == NULL) code = TSDB_CODE_OUT_OF_MEMORY; if (TSDB_CODE_SUCCESS == code) { @@ -5867,13 +5873,15 @@ int32_t createIntervalFromCreateSmaIndexStmt(SCreateIndexStmt* pStmt, SInterval* pInterval->interval = ((SValueNode*)pStmt->pOptions->pInterval)->datum.i; pInterval->intervalUnit = ((SValueNode*)pStmt->pOptions->pInterval)->unit; pInterval->offset = NULL != pStmt->pOptions->pOffset ? ((SValueNode*)pStmt->pOptions->pOffset)->datum.i : 0; - pInterval->sliding = NULL != pStmt->pOptions->pSliding ? ((SValueNode*)pStmt->pOptions->pSliding)->datum.i : pInterval->interval; - pInterval->slidingUnit = NULL != pStmt->pOptions->pSliding ? ((SValueNode*)pStmt->pOptions->pSliding)->unit : pInterval->intervalUnit; + pInterval->sliding = + NULL != pStmt->pOptions->pSliding ? ((SValueNode*)pStmt->pOptions->pSliding)->datum.i : pInterval->interval; + pInterval->slidingUnit = + NULL != pStmt->pOptions->pSliding ? ((SValueNode*)pStmt->pOptions->pSliding)->unit : pInterval->intervalUnit; pInterval->precision = pStmt->pOptions->tsPrecision; return TSDB_CODE_SUCCESS; } -int32_t translatePostCreateSmaIndex(SParseContext* pParseCxt, SQuery* pQuery, void ** pResRow) { +int32_t translatePostCreateSmaIndex(SParseContext* pParseCxt, SQuery* pQuery, void** pResRow) { int32_t code = TSDB_CODE_SUCCESS; SCreateIndexStmt* pStmt = (SCreateIndexStmt*)pQuery->pRoot; int64_t lastTs = 0; @@ -6041,7 +6049,7 @@ static int32_t buildCreateTopicReq(STranslateContext* pCxt, SCreateTopicStmt* pS toName(pCxt->pParseCxt->acctId, pStmt->subDbName, pStmt->subSTbName, &name); tNameGetFullDbName(&name, pReq->subDbName); tNameExtractFullName(&name, pReq->subStbName); - if(pStmt->pQuery != NULL) { + if (pStmt->pQuery != NULL) { code = nodesNodeToString(pStmt->pQuery, false, &pReq->ast, NULL); } } else if ('\0' != pStmt->subDbName[0]) { @@ -6096,11 +6104,12 @@ static EDealRes checkColumnTagsInCond(SNode* pNode, void* pContext) { addTagList(&pCxt->pTags, nodesCloneNode(pNode)); } } - + return DEAL_RES_CONTINUE; } -static int32_t checkCollectTopicTags(STranslateContext* pCxt, SCreateTopicStmt* pStmt, STableMeta* pMeta, SNodeList** ppProjection) { +static int32_t checkCollectTopicTags(STranslateContext* pCxt, SCreateTopicStmt* pStmt, STableMeta* pMeta, + SNodeList** ppProjection) { SBuildTopicContext colCxt = {.colExists = false, .colNotFound = false, .pMeta = pMeta, .pTags = NULL}; nodesWalkExprPostOrder(pStmt->pWhere, checkColumnTagsInCond, &colCxt); if (colCxt.colNotFound) { @@ -6110,18 +6119,18 @@ static int32_t checkCollectTopicTags(STranslateContext* pCxt, SCreateTopicStmt* nodesDestroyList(colCxt.pTags); return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR, "Columns are forbidden in where clause"); } - if (NULL == colCxt.pTags) { // put one column to select -// for (int32_t i = 0; i < pMeta->tableInfo.numOfColumns; ++i) { - SSchema* column = &pMeta->schema[0]; - SColumnNode* col = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN); - if (NULL == col) { - return TSDB_CODE_OUT_OF_MEMORY; - } - strcpy(col->colName, column->name); - strcpy(col->node.aliasName, col->colName); - strcpy(col->node.userAlias, col->colName); - addTagList(&colCxt.pTags, (SNode*)col); -// } + if (NULL == colCxt.pTags) { // put one column to select + // for (int32_t i = 0; i < pMeta->tableInfo.numOfColumns; ++i) { + SSchema* column = &pMeta->schema[0]; + SColumnNode* col = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN); + if (NULL == col) { + return TSDB_CODE_OUT_OF_MEMORY; + } + strcpy(col->colName, column->name); + strcpy(col->node.aliasName, col->colName); + strcpy(col->node.userAlias, col->colName); + addTagList(&colCxt.pTags, (SNode*)col); + // } } *ppProjection = colCxt.pTags; @@ -6129,13 +6138,13 @@ static int32_t checkCollectTopicTags(STranslateContext* pCxt, SCreateTopicStmt* } static int32_t buildQueryForTableTopic(STranslateContext* pCxt, SCreateTopicStmt* pStmt, SNode** pSelect) { - SParseContext* pParCxt = pCxt->pParseCxt; - SRequestConnInfo connInfo = {.pTrans = pParCxt->pTransporter, - .requestId = pParCxt->requestId, + SParseContext* pParCxt = pCxt->pParseCxt; + SRequestConnInfo connInfo = {.pTrans = pParCxt->pTransporter, + .requestId = pParCxt->requestId, .requestObjRefId = pParCxt->requestRid, .mgmtEps = pParCxt->mgmtEpSet}; - SName name; - STableMeta* pMeta = NULL; + SName name; + STableMeta* pMeta = NULL; int32_t code = getTableMetaImpl(pCxt, toName(pParCxt->acctId, pStmt->subDbName, pStmt->subSTbName, &name), &pMeta); if (code) { taosMemoryFree(pMeta); @@ -6144,7 +6153,7 @@ static int32_t buildQueryForTableTopic(STranslateContext* pCxt, SCreateTopicStmt if (TSDB_SUPER_TABLE != pMeta->tableType) { taosMemoryFree(pMeta); return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR, "Only supertable table can be used"); - } + } SNodeList* pProjection = NULL; code = checkCollectTopicTags(pCxt, pStmt, pMeta, &pProjection); @@ -6542,7 +6551,8 @@ static int32_t checkStreamQuery(STranslateContext* pCxt, SCreateStreamStmt* pStm return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "SUBTABLE expression must be of VARCHAR type"); } - if (NULL != pSelect->pSubtable && 0 == LIST_LENGTH(pSelect->pPartitionByList) && subtableExprHasColumnOrPseudoColumn(pSelect->pSubtable)) { + if (NULL != pSelect->pSubtable && 0 == LIST_LENGTH(pSelect->pPartitionByList) && + subtableExprHasColumnOrPseudoColumn(pSelect->pSubtable)) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "SUBTABLE expression must not has column when no partition by clause"); } @@ -6895,28 +6905,28 @@ static int32_t createLastTsSelectStmt(char* pDb, char* pTable, STableMeta* pMeta if (NULL == col) { return TSDB_CODE_OUT_OF_MEMORY; } - + strcpy(col->tableAlias, pTable); strcpy(col->colName, pMeta->schema[0].name); SNodeList* pParamterList = nodesMakeList(); if (NULL == pParamterList) { - nodesDestroyNode((SNode *)col); + nodesDestroyNode((SNode*)col); return TSDB_CODE_OUT_OF_MEMORY; } - - int32_t code = nodesListStrictAppend(pParamterList, (SNode *)col); + + int32_t code = nodesListStrictAppend(pParamterList, (SNode*)col); if (code) { - nodesDestroyNode((SNode *)col); + nodesDestroyNode((SNode*)col); nodesDestroyList(pParamterList); return code; } - + SNode* pFunc = (SNode*)createFunction("last", pParamterList); if (NULL == pFunc) { nodesDestroyList(pParamterList); return TSDB_CODE_OUT_OF_MEMORY; } - + SNodeList* pProjectionList = nodesMakeList(); if (NULL == pProjectionList) { nodesDestroyList(pParamterList); @@ -6928,8 +6938,8 @@ static int32_t createLastTsSelectStmt(char* pDb, char* pTable, STableMeta* pMeta nodesDestroyList(pProjectionList); return code; } - - code = createSimpleSelectStmtFromProjList(pDb, pTable, pProjectionList, (SSelectStmt **)pQuery); + + code = createSimpleSelectStmtFromProjList(pDb, pTable, pProjectionList, (SSelectStmt**)pQuery); if (code) { nodesDestroyList(pProjectionList); return code; @@ -6967,14 +6977,14 @@ static int32_t buildCreateStreamQuery(STranslateContext* pCxt, SCreateStreamStmt if (TSDB_CODE_SUCCESS == code && pStmt->pOptions->fillHistory) { SRealTableNode* pTable = (SRealTableNode*)(((SSelectStmt*)pStmt->pQuery)->pFromTable); code = createLastTsSelectStmt(pTable->table.dbName, pTable->table.tableName, pTable->pMeta, &pStmt->pPrevQuery); -/* - if (TSDB_CODE_SUCCESS == code) { - STranslateContext cxt = {0}; - int32_t code = initTranslateContext(pCxt->pParseCxt, pCxt->pMetaCache, &cxt); - code = translateQuery(&cxt, pStmt->pPrevQuery); - destroyTranslateContext(&cxt); - } -*/ + /* + if (TSDB_CODE_SUCCESS == code) { + STranslateContext cxt = {0}; + int32_t code = initTranslateContext(pCxt->pParseCxt, pCxt->pMetaCache, &cxt); + code = translateQuery(&cxt, pStmt->pPrevQuery); + destroyTranslateContext(&cxt); + } + */ } taosMemoryFree(pMeta); return code; @@ -7069,7 +7079,7 @@ static int32_t buildIntervalForCreateStream(SCreateStreamStmt* pStmt, SInterval* if (NULL == pSelect->pWindow || QUERY_NODE_INTERVAL_WINDOW != nodeType(pSelect->pWindow)) { return code; } - + SIntervalWindowNode* pWindow = (SIntervalWindowNode*)pSelect->pWindow; pInterval->interval = ((SValueNode*)pWindow->pInterval)->datum.i; pInterval->intervalUnit = ((SValueNode*)pWindow->pInterval)->unit; @@ -7077,16 +7087,16 @@ static int32_t buildIntervalForCreateStream(SCreateStreamStmt* pStmt, SInterval* pInterval->sliding = (NULL != pWindow->pSliding ? ((SValueNode*)pWindow->pSliding)->datum.i : pInterval->interval); pInterval->slidingUnit = (NULL != pWindow->pSliding ? ((SValueNode*)pWindow->pSliding)->unit : pInterval->intervalUnit); - pInterval->precision = ((SColumnNode*)pWindow->pCol)->node.resType.precision; + pInterval->precision = ((SColumnNode*)pWindow->pCol)->node.resType.precision; return code; } int32_t translatePostCreateStream(SParseContext* pParseCxt, SQuery* pQuery, void** pResRow) { SCreateStreamStmt* pStmt = (SCreateStreamStmt*)pQuery->pRoot; - STranslateContext cxt = {0}; - SInterval interval = {0}; - int64_t lastTs = 0; + STranslateContext cxt = {0}; + SInterval interval = {0}; + int64_t lastTs = 0; int32_t code = initTranslateContext(pParseCxt, NULL, &cxt); if (TSDB_CODE_SUCCESS == code) { @@ -7121,7 +7131,6 @@ int32_t translatePostCreateStream(SParseContext* pParseCxt, SQuery* pQuery, void return code; } - static int32_t translateDropStream(STranslateContext* pCxt, SDropStreamStmt* pStmt) { SMDropStreamReq dropReq = {0}; SName name; @@ -7152,7 +7161,7 @@ static int32_t translateResumeStream(STranslateContext* pCxt, SResumeStreamStmt* static int32_t readFromFile(char* pName, int32_t* len, char** buf) { int64_t filesize = 0; - if (taosStatFile(pName, &filesize, NULL) < 0) { + if (taosStatFile(pName, &filesize, NULL, NULL) < 0) { return TAOS_SYSTEM_ERROR(errno); } @@ -7246,7 +7255,7 @@ static int32_t translateGrantTagCond(STranslateContext* pCxt, SGrantStmt* pStmt, } } - int32_t code = createRealTableForGrantTable(pStmt, &pTable); + int32_t code = createRealTableForGrantTable(pStmt, &pTable); if (TSDB_CODE_SUCCESS == code) { SName name; code = getTableMetaImpl(pCxt, toName(pCxt->pParseCxt->acctId, pTable->table.dbName, pTable->table.tableName, &name), @@ -7806,7 +7815,8 @@ static SNodeList* createProjectCols(int32_t ncols, const char* const pCols[]) { return pProjections; } -static int32_t createSimpleSelectStmtImpl(const char* pDb, const char* pTable, SNodeList* pProjectionList, SSelectStmt** pStmt) { +static int32_t createSimpleSelectStmtImpl(const char* pDb, const char* pTable, SNodeList* pProjectionList, + SSelectStmt** pStmt) { SSelectStmt* pSelect = (SSelectStmt*)nodesMakeNode(QUERY_NODE_SELECT_STMT); if (NULL == pSelect) { return TSDB_CODE_OUT_OF_MEMORY; @@ -7829,9 +7839,8 @@ static int32_t createSimpleSelectStmtImpl(const char* pDb, const char* pTable, S return TSDB_CODE_SUCCESS; } - static int32_t createSimpleSelectStmtFromCols(const char* pDb, const char* pTable, int32_t numOfProjs, - const char* const pProjCol[], SSelectStmt** pStmt) { + const char* const pProjCol[], SSelectStmt** pStmt) { SNodeList* pProjectionList = NULL; if (numOfProjs >= 0) { pProjectionList = createProjectCols(numOfProjs, pProjCol); @@ -7843,13 +7852,15 @@ static int32_t createSimpleSelectStmtFromCols(const char* pDb, const char* pTabl return createSimpleSelectStmtImpl(pDb, pTable, pProjectionList, pStmt); } -static int32_t createSimpleSelectStmtFromProjList(const char* pDb, const char* pTable, SNodeList* pProjectionList, SSelectStmt** pStmt) { +static int32_t createSimpleSelectStmtFromProjList(const char* pDb, const char* pTable, SNodeList* pProjectionList, + SSelectStmt** pStmt) { return createSimpleSelectStmtImpl(pDb, pTable, pProjectionList, pStmt); } static int32_t createSelectStmtForShow(ENodeType showType, SSelectStmt** pStmt) { const SSysTableShowAdapter* pShow = &sysTableShowAdapter[showType - SYSTABLE_SHOW_TYPE_OFFSET]; - return createSimpleSelectStmtFromCols(pShow->pDbName, pShow->pTableName, pShow->numOfShowCols, pShow->pShowCols, pStmt); + return createSimpleSelectStmtFromCols(pShow->pDbName, pShow->pTableName, pShow->numOfShowCols, pShow->pShowCols, + pStmt); } static int32_t createSelectStmtForShowTableDist(SShowTableDistributedStmt* pStmt, SSelectStmt** pOutput) { @@ -7987,8 +7998,8 @@ static int32_t createShowTableTagsProjections(SNodeList** pProjections, SNodeLis static int32_t rewriteShowStableTags(STranslateContext* pCxt, SQuery* pQuery) { SShowTableTagsStmt* pShow = (SShowTableTagsStmt*)pQuery->pRoot; SSelectStmt* pSelect = NULL; - int32_t code = createSimpleSelectStmtFromCols(((SValueNode*)pShow->pDbName)->literal, ((SValueNode*)pShow->pTbName)->literal, - -1, NULL, &pSelect); + int32_t code = createSimpleSelectStmtFromCols(((SValueNode*)pShow->pDbName)->literal, + ((SValueNode*)pShow->pTbName)->literal, -1, NULL, &pSelect); if (TSDB_CODE_SUCCESS == code) { code = createShowTableTagsProjections(&pSelect->pProjectionList, &pShow->pTags); } diff --git a/source/libs/sync/src/syncRaftStore.c b/source/libs/sync/src/syncRaftStore.c index bd15567c87..051106b99d 100644 --- a/source/libs/sync/src/syncRaftStore.c +++ b/source/libs/sync/src/syncRaftStore.c @@ -42,7 +42,7 @@ int32_t raftStoreReadFile(SSyncNode *pNode) { const char *file = pNode->raftStorePath; SRaftStore *pStore = &pNode->raftStore; - if (taosStatFile(file, NULL, NULL) < 0) { + if (taosStatFile(file, NULL, NULL, NULL) < 0) { sInfo("vgId:%d, raft store file:%s not exist, use default value", pNode->vgId, file); pStore->currentTerm = 0; pStore->voteFor.addr = 0; diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c index 01d23a7e96..2acdd975e5 100644 --- a/source/libs/wal/src/walMeta.c +++ b/source/libs/wal/src/walMeta.c @@ -53,7 +53,7 @@ static FORCE_INLINE int64_t walScanLogGetLastVer(SWal* pWal, int32_t fileIdx) { walBuildLogName(pWal, pFileInfo->firstVer, fnameStr); int64_t fileSize = 0; - taosStatFile(fnameStr, &fileSize, NULL); + taosStatFile(fnameStr, &fileSize, NULL, NULL); TdFilePtr pFile = taosOpenFile(fnameStr, TD_FILE_READ | TD_FILE_WRITE); if (pFile == NULL) { @@ -304,7 +304,7 @@ int walRepairLogFileTs(SWal* pWal, bool* updateMeta) { walBuildLogName(pWal, pFileInfo->firstVer, fnameStr); int32_t mtime = 0; - if (taosStatFile(fnameStr, NULL, &mtime) < 0) { + if (taosStatFile(fnameStr, NULL, &mtime, NULL) < 0) { terrno = TAOS_SYSTEM_ERROR(errno); wError("vgId:%d, failed to stat file due to %s, file:%s", pWal->cfg.vgId, strerror(errno), fnameStr); return -1; @@ -353,7 +353,7 @@ int walTrimIdxFile(SWal* pWal, int32_t fileIdx) { walBuildIdxName(pWal, pFileInfo->firstVer, fnameStr); int64_t fileSize = 0; - taosStatFile(fnameStr, &fileSize, NULL); + taosStatFile(fnameStr, &fileSize, NULL, NULL); int64_t records = TMAX(0, pFileInfo->lastVer - pFileInfo->firstVer + 1); int64_t lastEndOffset = records * sizeof(SWalIdxEntry); @@ -436,7 +436,7 @@ int walCheckAndRepairMeta(SWal* pWal) { SWalFileInfo* pFileInfo = taosArrayGet(pWal->fileInfoSet, fileIdx); walBuildLogName(pWal, pFileInfo->firstVer, fnameStr); - int32_t code = taosStatFile(fnameStr, &fileSize, NULL); + int32_t code = taosStatFile(fnameStr, &fileSize, NULL, NULL); if (code < 0) { terrno = TAOS_SYSTEM_ERROR(errno); wError("failed to stat file since %s. file:%s", terrstr(), fnameStr); @@ -522,7 +522,7 @@ int walCheckAndRepairIdxFile(SWal* pWal, int32_t fileIdx) { walBuildLogName(pWal, pFileInfo->firstVer, fLogNameStr); int64_t fileSize = 0; - if (taosStatFile(fnameStr, &fileSize, NULL) < 0 && errno != ENOENT) { + if (taosStatFile(fnameStr, &fileSize, NULL, NULL) < 0 && errno != ENOENT) { wError("vgId:%d, failed to stat file due to %s. file:%s", pWal->cfg.vgId, strerror(errno), fnameStr); terrno = TAOS_SYSTEM_ERROR(errno); return -1; @@ -935,7 +935,7 @@ int walLoadMeta(SWal* pWal) { walBuildMetaName(pWal, metaVer, fnameStr); // read metafile int64_t fileSize = 0; - taosStatFile(fnameStr, &fileSize, NULL); + taosStatFile(fnameStr, &fileSize, NULL, NULL); if (fileSize == 0) { (void)taosRemoveFile(fnameStr); wDebug("vgId:%d, wal find empty meta ver %d", pWal->cfg.vgId, metaVer); diff --git a/source/os/src/osFile.c b/source/os/src/osFile.c index dd670595f0..c4309b2c55 100644 --- a/source/os/src/osFile.c +++ b/source/os/src/osFile.c @@ -191,7 +191,7 @@ int32_t taosRenameFile(const char *oldName, const char *newName) { #endif } -int32_t taosStatFile(const char *path, int64_t *size, int32_t *mtime) { +int32_t taosStatFile(const char *path, int64_t *size, int32_t *mtime, int32_t *atime) { #ifdef WINDOWS struct _stati64 fileStat; int32_t code = _stati64(path, &fileStat); @@ -211,6 +211,10 @@ int32_t taosStatFile(const char *path, int64_t *size, int32_t *mtime) { *mtime = fileStat.st_mtime; } + if (atime != NULL) { + *atime = fileStat.st_mtime; + } + return 0; } int32_t taosDevInoFile(TdFilePtr pFile, int64_t *stDev, int64_t *stIno) { @@ -540,7 +544,7 @@ int32_t taosFStatFile(TdFilePtr pFile, int64_t *size, int32_t *mtime) { #ifdef WINDOWS struct __stat64 fileStat; - int32_t code = _fstat64(pFile->fd, &fileStat); + int32_t code = _fstat64(pFile->fd, &fileStat); #else struct stat fileStat; int32_t code = fstat(pFile->fd, &fileStat); @@ -897,17 +901,17 @@ int32_t taosCompressFile(char *srcFileName, char *destFileName) { goto cmp_end; } - dstFp = gzdopen(pFile->fd, "wb6f"); - if (dstFp == NULL) { - ret = -3; - taosCloseFile(&pFile); - goto cmp_end; - } + dstFp = gzdopen(pFile->fd, "wb6f"); + if (dstFp == NULL) { + ret = -3; + taosCloseFile(&pFile); + goto cmp_end; + } - while (!feof(pSrcFile->fp)) { - len = (int32_t)fread(data, 1, compressSize, pSrcFile->fp); - (void)gzwrite(dstFp, data, len); - } + while (!feof(pSrcFile->fp)) { + len = (int32_t)fread(data, 1, compressSize, pSrcFile->fp); + (void)gzwrite(dstFp, data, len); + } cmp_end: if (pSrcFile) { diff --git a/source/util/src/tlog.c b/source/util/src/tlog.c index de7ad848ed..4a15b5b976 100644 --- a/source/util/src/tlog.c +++ b/source/util/src/tlog.c @@ -17,8 +17,8 @@ #include "tlog.h" #include "os.h" #include "tconfig.h" -#include "tjson.h" #include "tglobal.h" +#include "tjson.h" #define LOG_MAX_LINE_SIZE (10024) #define LOG_MAX_LINE_BUFFER_SIZE (LOG_MAX_LINE_SIZE + 3) @@ -74,12 +74,12 @@ static SLogObj tsLogObj = {.fileNum = 1}; static int64_t tsAsyncLogLostLines = 0; static int32_t tsDaylightActive; /* Currently in daylight saving time. */ -bool tsLogEmbedded = 0; -bool tsAsyncLog = true; +bool tsLogEmbedded = 0; +bool tsAsyncLog = true; #ifdef ASSERT_NOT_CORE -bool tsAssert = false; +bool tsAssert = false; #else -bool tsAssert = true; +bool tsAssert = true; #endif int32_t tsNumOfLogLines = 10000000; int32_t tsLogKeepDays = 0; @@ -160,7 +160,7 @@ int32_t taosInitSlowLog() { tsLogObj.slowHandle = taosLogBuffNew(LOG_SLOW_BUF_SIZE); if (tsLogObj.slowHandle == NULL) return -1; - + taosUmaskFile(0); tsLogObj.slowHandle->pFile = taosOpenFile(fullName, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_APPEND); if (tsLogObj.slowHandle->pFile == NULL) { @@ -403,13 +403,13 @@ static int32_t taosOpenLogFile(char *fn, int32_t maxLines, int32_t maxFileNum) { strcpy(name, fn); strcat(name, ".0"); } - bool log0Exist = taosStatFile(name, NULL, &logstat0_mtime) >= 0; + bool log0Exist = taosStatFile(name, NULL, &logstat0_mtime, NULL) >= 0; if (strlen(fn) < LOG_FILE_NAME_LEN + 50 - 2) { strcpy(name, fn); strcat(name, ".1"); } - bool log1Exist = taosStatFile(name, NULL, &logstat1_mtime) >= 0; + bool log1Exist = taosStatFile(name, NULL, &logstat1_mtime, NULL) >= 0; // if none of the log files exist, open 0, if both exists, open the old one if (!log0Exist && !log1Exist) { @@ -576,7 +576,7 @@ void taosPrintSlowLog(const char *format, ...) { } else { taosWriteFile(tsLogObj.slowHandle->pFile, buffer, len); } - + taosMemoryFree(buffer); } @@ -769,12 +769,12 @@ static void taosWriteLog(SLogBuff *pLogBuf) { static void *taosAsyncOutputLog(void *param) { SLogBuff *pLogBuf = (SLogBuff *)tsLogObj.logHandle; SLogBuff *pSlowBuf = (SLogBuff *)tsLogObj.slowHandle; - + setThreadName("log"); int32_t count = 0; int32_t updateCron = 0; int32_t writeInterval = 0; - + while (1) { writeInterval = TMIN(pLogBuf->writeInterval, pSlowBuf->writeInterval); count += writeInterval; @@ -834,12 +834,12 @@ bool taosAssertDebug(bool condition, const char *file, int32_t line, const char return true; } -void taosLogCrashInfo(char* nodeType, char* pMsg, int64_t msgLen, int signum, void *sigInfo) { +void taosLogCrashInfo(char *nodeType, char *pMsg, int64_t msgLen, int signum, void *sigInfo) { const char *flags = "UTL FATAL "; ELogLevel level = DEBUG_FATAL; int32_t dflag = 255; - char filepath[PATH_MAX] = {0}; - TdFilePtr pFile = NULL; + char filepath[PATH_MAX] = {0}; + TdFilePtr pFile = NULL; if (pMsg && msgLen > 0) { snprintf(filepath, sizeof(filepath), "%s%s.%sCrashLog", tsLogDir, TD_DIRSEP, nodeType); @@ -856,16 +856,16 @@ void taosLogCrashInfo(char* nodeType, char* pMsg, int64_t msgLen, int signum, vo int64_t writeSize = taosWriteFile(pFile, &msgLen, sizeof(msgLen)); if (sizeof(msgLen) != writeSize) { taosUnLockFile(pFile); - taosPrintLog(flags, level, dflag, "failed to write len to file:%s,%p wlen:%" PRId64 " tlen:%lu since %s", - filepath, pFile, writeSize, sizeof(msgLen), terrstr()); + taosPrintLog(flags, level, dflag, "failed to write len to file:%s,%p wlen:%" PRId64 " tlen:%lu since %s", + filepath, pFile, writeSize, sizeof(msgLen), terrstr()); goto _return; } writeSize = taosWriteFile(pFile, pMsg, msgLen); if (msgLen != writeSize) { taosUnLockFile(pFile); - taosPrintLog(flags, level, dflag, "failed to write file:%s,%p wlen:%" PRId64 " tlen:%" PRId64 " since %s", - filepath, pFile, writeSize, msgLen, terrstr()); + taosPrintLog(flags, level, dflag, "failed to write file:%s,%p wlen:%" PRId64 " tlen:%" PRId64 " since %s", + filepath, pFile, writeSize, msgLen, terrstr()); goto _return; } @@ -883,7 +883,7 @@ _return: taosPrintTrace(flags, level, dflag, 4); #elif !defined(WINDOWS) taosPrintLog(flags, level, dflag, "sender PID:%d cmdline:%s", ((siginfo_t *)sigInfo)->si_pid, - taosGetCmdlineByPID(((siginfo_t *)sigInfo)->si_pid)); + taosGetCmdlineByPID(((siginfo_t *)sigInfo)->si_pid)); taosPrintTrace(flags, level, dflag, 3); #else taosPrintTrace(flags, level, dflag, 8); @@ -892,17 +892,17 @@ _return: taosMemoryFree(pMsg); } -void taosReadCrashInfo(char* filepath, char** pMsg, int64_t* pMsgLen, TdFilePtr* pFd) { +void taosReadCrashInfo(char *filepath, char **pMsg, int64_t *pMsgLen, TdFilePtr *pFd) { const char *flags = "UTL FATAL "; ELogLevel level = DEBUG_FATAL; int32_t dflag = 255; TdFilePtr pFile = NULL; bool truncateFile = false; - char* buf = NULL; + char *buf = NULL; if (NULL == *pFd) { int64_t filesize = 0; - if (taosStatFile(filepath, &filesize, NULL) < 0) { + if (taosStatFile(filepath, &filesize, NULL, NULL) < 0) { if (ENOENT == errno) { return; } @@ -916,7 +916,7 @@ void taosReadCrashInfo(char* filepath, char** pMsg, int64_t* pMsgLen, TdFilePtr* return; } - pFile = taosOpenFile(filepath, TD_FILE_READ|TD_FILE_WRITE); + pFile = taosOpenFile(filepath, TD_FILE_READ | TD_FILE_WRITE); if (pFile == NULL) { if (ENOENT == errno) { return; @@ -926,7 +926,7 @@ void taosReadCrashInfo(char* filepath, char** pMsg, int64_t* pMsgLen, TdFilePtr* taosPrintLog(flags, level, dflag, "failed to open file:%s since %s", filepath, terrstr()); return; } - + taosLockFile(pFile); } else { pFile = *pFd; @@ -937,8 +937,8 @@ void taosReadCrashInfo(char* filepath, char** pMsg, int64_t* pMsgLen, TdFilePtr* if (sizeof(msgLen) != readSize) { truncateFile = true; if (readSize < 0) { - taosPrintLog(flags, level, dflag, "failed to read len from file:%s,%p wlen:%" PRId64 " tlen:%lu since %s", - filepath, pFile, readSize, sizeof(msgLen), terrstr()); + taosPrintLog(flags, level, dflag, "failed to read len from file:%s,%p wlen:%" PRId64 " tlen:%lu since %s", + filepath, pFile, readSize, sizeof(msgLen), terrstr()); } goto _return; } @@ -948,12 +948,12 @@ void taosReadCrashInfo(char* filepath, char** pMsg, int64_t* pMsgLen, TdFilePtr* taosPrintLog(flags, level, dflag, "failed to malloc buf, size:%" PRId64, msgLen); goto _return; } - + readSize = taosReadFile(pFile, buf, msgLen); if (msgLen != readSize) { truncateFile = true; - taosPrintLog(flags, level, dflag, "failed to read file:%s,%p wlen:%" PRId64 " tlen:%" PRId64 " since %s", - filepath, pFile, readSize, msgLen, terrstr()); + taosPrintLog(flags, level, dflag, "failed to read file:%s,%p wlen:%" PRId64 " tlen:%" PRId64 " since %s", filepath, + pFile, readSize, msgLen, terrstr()); goto _return; } @@ -981,7 +981,7 @@ void taosReleaseCrashLogFile(TdFilePtr pFile, bool truncateFile) { if (truncateFile) { taosFtruncateFile(pFile, 0); } - + taosUnLockFile(pFile); taosCloseFile(&pFile); } diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c index e9dd067ac4..860622ea18 100644 --- a/tools/shell/src/shellEngine.c +++ b/tools/shell/src/shellEngine.c @@ -18,9 +18,9 @@ #define _GNU_SOURCE #define _XOPEN_SOURCE #define _DEFAULT_SOURCE -#include "shellInt.h" -#include "shellAuto.h" #include "geosWrapper.h" +#include "shellAuto.h" +#include "shellInt.h" static bool shellIsEmptyCommand(const char *cmd); static int32_t shellRunSingleCommand(char *command); @@ -41,9 +41,9 @@ static bool shellIsCommentLine(char *line); static void shellSourceFile(const char *file); static void shellGetGrantInfo(); -static void shellCleanup(void *arg); -static void *shellCancelHandler(void *arg); -static void *shellThreadLoop(void *arg); +static void shellCleanup(void *arg); +static void *shellCancelHandler(void *arg); +static void *shellThreadLoop(void *arg); bool shellIsEmptyCommand(const char *cmd) { for (char c = *cmd++; c != 0; c = *cmd++) { @@ -66,7 +66,7 @@ int32_t shellRunSingleCommand(char *command) { if (shellRegexMatch(command, "^[\t ]*clear[ \t;]*$", REG_EXTENDED | REG_ICASE)) { #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-result" - system("clear"); + system("clear"); #pragma GCC diagnostic pop return 0; } @@ -142,8 +142,8 @@ int32_t shellRunCommand(char *command, bool recordHistory) { return 0; } - // add help or help; - if(strncasecmp(command, "help;", 5) == 0) { + // add help or help; + if (strncasecmp(command, "help;", 5) == 0) { showHelp(); return 0; } @@ -223,14 +223,14 @@ void shellRunSingleCommandImp(char *command) { } // pre string - char * pre = "Query OK"; + char *pre = "Query OK"; if (shellRegexMatch(command, "^\\s*delete\\s*from\\s*.*", REG_EXTENDED | REG_ICASE)) { pre = "Delete OK"; - } else if(shellRegexMatch(command, "^\\s*insert\\s*into\\s*.*", REG_EXTENDED | REG_ICASE)) { + } else if (shellRegexMatch(command, "^\\s*insert\\s*into\\s*.*", REG_EXTENDED | REG_ICASE)) { pre = "Insert OK"; - } else if(shellRegexMatch(command, "^\\s*create\\s*.*", REG_EXTENDED | REG_ICASE)) { + } else if (shellRegexMatch(command, "^\\s*create\\s*.*", REG_EXTENDED | REG_ICASE)) { pre = "Create OK"; - } else if(shellRegexMatch(command, "^\\s*drop\\s*.*", REG_EXTENDED | REG_ICASE)) { + } else if (shellRegexMatch(command, "^\\s*drop\\s*.*", REG_EXTENDED | REG_ICASE)) { pre = "Drop OK"; } @@ -295,7 +295,7 @@ char *shellFormatTimestamp(char *buf, int64_t val, int32_t precision) { if (taosLocalTime(&tt, &ptm, buf) == NULL) { return buf; } - size_t pos = strftime(buf, 35, "%Y-%m-%d %H:%M:%S", &ptm); + size_t pos = strftime(buf, 35, "%Y-%m-%d %H:%M:%S", &ptm); if (precision == TSDB_TIME_PRECISION_NANO) { sprintf(buf + pos, ".%09d", ms); @@ -387,22 +387,20 @@ void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, i break; case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_NCHAR: - case TSDB_DATA_TYPE_JSON: - { - int32_t bufIndex = 0; - for (int32_t i = 0; i < length; i++) { + case TSDB_DATA_TYPE_JSON: { + int32_t bufIndex = 0; + for (int32_t i = 0; i < length; i++) { + buf[bufIndex] = val[i]; + bufIndex++; + if (val[i] == '\"') { buf[bufIndex] = val[i]; bufIndex++; - if (val[i] == '\"') { - buf[bufIndex] = val[i]; - bufIndex++; - } } - buf[bufIndex] = 0; - - taosFprintfFile(pFile, "%s%s%s", quotationStr, buf, quotationStr); } - break; + buf[bufIndex] = 0; + + taosFprintfFile(pFile, "%s%s%s", quotationStr, buf, quotationStr); + } break; case TSDB_DATA_TYPE_GEOMETRY: shellDumpHexValue(buf, val, length); taosFprintfFile(pFile, "%s", buf); @@ -535,12 +533,10 @@ void shellPrintString(const char *str, int32_t width) { if (width == 0) { printf("%s", str); - } - else if (len > width) { + } else if (len > width) { if (width <= 3) { printf("%.*s.", width - 1, str); - } - else { + } else { printf("%.*s...", width - 3, str); } } else { @@ -549,7 +545,7 @@ void shellPrintString(const char *str, int32_t width) { } void shellPrintGeometry(const unsigned char *val, int32_t length, int32_t width) { - if (length == 0) { //empty value + if (length == 0) { // empty value shellPrintString("", width); return; } @@ -565,7 +561,7 @@ void shellPrintGeometry(const unsigned char *val, int32_t length, int32_t width) char *outputWKT = NULL; code = doAsText(val, length, &outputWKT); if (code != TSDB_CODE_SUCCESS) { - shellPrintString(getThreadLocalGeosCtx()->errMsg, width); //should NOT happen + shellPrintString(getThreadLocalGeosCtx()->errMsg, width); // should NOT happen return; } @@ -612,27 +608,26 @@ void shellPrintField(const char *val, TAOS_FIELD *field, int32_t width, int32_t break; case TSDB_DATA_TYPE_FLOAT: if (tsEnableScience) { - printf("%*.7e",width,GET_FLOAT_VAL(val)); + printf("%*.7e", width, GET_FLOAT_VAL(val)); } else { n = snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.7f", width, GET_FLOAT_VAL(val)); if (n > SHELL_FLOAT_WIDTH) { - - printf("%*.7e", width,GET_FLOAT_VAL(val)); + printf("%*.7e", width, GET_FLOAT_VAL(val)); } else { - printf("%s", buf); + printf("%s", buf); } } break; case TSDB_DATA_TYPE_DOUBLE: if (tsEnableScience) { - snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.15e", width,GET_DOUBLE_VAL(val)); + snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.15e", width, GET_DOUBLE_VAL(val)); printf("%s", buf); } else { n = snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.15f", width, GET_DOUBLE_VAL(val)); if (n > SHELL_DOUBLE_WIDTH) { - printf("%*.15e", width, GET_DOUBLE_VAL(val)); + printf("%*.15e", width, GET_DOUBLE_VAL(val)); } else { - printf("%*s", width,buf); + printf("%*s", width, buf); } } break; @@ -905,7 +900,7 @@ void shellReadHistory() { TdFilePtr pFile = taosOpenFile(pHistory->file, TD_FILE_READ | TD_FILE_STREAM); if (pFile == NULL) return; - char *line = taosMemoryMalloc(TSDB_MAX_ALLOWED_SQL_LEN + 1); + char *line = taosMemoryMalloc(TSDB_MAX_ALLOWED_SQL_LEN + 1); int32_t read_size = 0; while ((read_size = taosGetsFile(pFile, TSDB_MAX_ALLOWED_SQL_LEN, line)) != -1) { line[read_size - 1] = '\0'; @@ -922,8 +917,8 @@ void shellReadHistory() { taosMemoryFreeClear(line); taosCloseFile(&pFile); int64_t file_size; - if (taosStatFile(pHistory->file, &file_size, NULL) == 0 && file_size > SHELL_MAX_COMMAND_SIZE) { - TdFilePtr pFile = taosOpenFile(pHistory->file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_STREAM | TD_FILE_TRUNC); + if (taosStatFile(pHistory->file, &file_size, NULL, NULL) == 0 && file_size > SHELL_MAX_COMMAND_SIZE) { + TdFilePtr pFile = taosOpenFile(pHistory->file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_STREAM | TD_FILE_TRUNC); if (pFile == NULL) return; int32_t endIndex = pHistory->hstart; if (endIndex != 0) { @@ -945,7 +940,7 @@ void shellReadHistory() { void shellWriteHistory() { SShellHistory *pHistory = &shell.history; if (pHistory->hend == pHistory->hstart) return; - TdFilePtr pFile = taosOpenFile(pHistory->file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_STREAM | TD_FILE_APPEND); + TdFilePtr pFile = taosOpenFile(pHistory->file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_STREAM | TD_FILE_APPEND); if (pFile == NULL) return; for (int32_t i = pHistory->hstart; i != pHistory->hend;) { @@ -991,7 +986,7 @@ void shellSourceFile(const char *file) { tstrncpy(fullname, file, PATH_MAX); } - sprintf(sourceFileCommand, "source %s;",fullname); + sprintf(sourceFileCommand, "source %s;", fullname); shellRecordCommandToHistory(sourceFileCommand); TdFilePtr pFile = taosOpenFile(fullname, TD_FILE_READ | TD_FILE_STREAM); @@ -1001,7 +996,7 @@ void shellSourceFile(const char *file) { return; } - char *line = taosMemoryMalloc(TSDB_MAX_ALLOWED_SQL_LEN + 1); + char *line = taosMemoryMalloc(TSDB_MAX_ALLOWED_SQL_LEN + 1); while ((read_len = taosGetsFile(pFile, TSDB_MAX_ALLOWED_SQL_LEN, line)) != -1) { if (read_len >= TSDB_MAX_ALLOWED_SQL_LEN) continue; line[--read_len] = '\0'; @@ -1044,7 +1039,8 @@ void shellGetGrantInfo() { int32_t code = taos_errno(tres); if (code != TSDB_CODE_SUCCESS) { - if (code != TSDB_CODE_OPS_NOT_SUPPORT && code != TSDB_CODE_MND_NO_RIGHTS && code != TSDB_CODE_PAR_PERMISSION_DENIED) { + if (code != TSDB_CODE_OPS_NOT_SUPPORT && code != TSDB_CODE_MND_NO_RIGHTS && + code != TSDB_CODE_PAR_PERMISSION_DENIED) { fprintf(stderr, "Failed to check Server Edition, Reason:0x%04x:%s\r\n\r\n", code, taos_errstr(tres)); } return; @@ -1080,7 +1076,8 @@ void shellGetGrantInfo() { } else if (strcmp(expiretime, "unlimited") == 0) { fprintf(stdout, "Server is Enterprise %s Edition, %s and will never expire.\r\n", serverVersion, sinfo); } else { - fprintf(stdout, "Server is Enterprise %s Edition, %s and will expire at %s.\r\n", serverVersion, sinfo, expiretime); + fprintf(stdout, "Server is Enterprise %s Edition, %s and will expire at %s.\r\n", serverVersion, sinfo, + expiretime); } taos_free_result(tres); @@ -1123,9 +1120,9 @@ void *shellCancelHandler(void *arg) { #ifdef WEBSOCKET } #endif - #ifdef WINDOWS +#ifdef WINDOWS printf("\n%s", shell.info.promptHeader); - #endif +#endif } return NULL; @@ -1165,8 +1162,7 @@ void *shellThreadLoop(void *arg) { } int32_t shellExecute() { - printf(shell.info.clientVersion, shell.info.cusName, - taos_get_client_info(), shell.info.cusName); + printf(shell.info.clientVersion, shell.info.cusName, taos_get_client_info(), shell.info.cusName); fflush(stdout); SShellArgs *pArgs = &shell.args; @@ -1233,13 +1229,13 @@ int32_t shellExecute() { taosSetSignal(SIGTERM, shellQueryInterruptHandler); taosSetSignal(SIGHUP, shellQueryInterruptHandler); taosSetSignal(SIGINT, shellQueryInterruptHandler); - + #ifdef WEBSOCKET if (!shell.args.restful && !shell.args.cloud) { #endif #ifndef WINDOWS printfIntroduction(); -#endif +#endif shellGetGrantInfo(); #ifdef WEBSOCKET } diff --git a/utils/test/c/tmqDemo.c b/utils/test/c/tmqDemo.c index ce069c2b05..64f536433e 100644 --- a/utils/test/c/tmqDemo.c +++ b/utils/test/c/tmqDemo.c @@ -221,7 +221,7 @@ int64_t getDirectorySize(char* dir) { totalSize += subDirSize; } else if (0 == strcmp(strchr(fileName, '.'), ".log")) { // only calc .log file size, and not include .idx file int64_t file_size = 0; - taosStatFile(subdir, &file_size, NULL); + taosStatFile(subdir, &file_size, NULL, NULL); totalSize += file_size; } } @@ -702,4 +702,3 @@ int main(int32_t argc, char* argv[]) { taosCloseFile(&g_fp); return 0; } - From 9ddab11d98b19b46f346502fc0de03323438cc3a Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Mon, 7 Aug 2023 17:13:49 +0800 Subject: [PATCH 027/122] Revert "fix(tsdb/read2): reset stt reader when suspended" This reverts commit 079d7ff69ec525c69be84f30c4295662843cb547. --- source/dnode/vnode/src/tsdb/tsdbRead2.c | 133 +++++++++++++----------- 1 file changed, 70 insertions(+), 63 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead2.c b/source/dnode/vnode/src/tsdb/tsdbRead2.c index e1756333c5..d5d8ba130c 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead2.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead2.c @@ -1729,41 +1729,45 @@ static int32_t mergeFileBlockAndLastBlock(STsdbReader* pReader, SLastBlockReader // row in last file block TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex); - int64_t ts = getCurrentKeyInLastBlock(pLastBlockReader); - + int64_t tsLast = getCurrentKeyInLastBlock(pLastBlockReader); if (ASCENDING_TRAVERSE(pReader->info.order)) { - if (key < ts) { // imem, mem are all empty, file blocks (data blocks and last block) exist + if (key < tsLast) { return mergeRowsInFileBlocks(pBlockData, pBlockScanInfo, key, pReader); - } else if (key == ts) { - SRow* pTSRow = NULL; - int32_t code = tsdbRowMergerAdd(pMerger, &fRow, pReader->info.pSchema); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - - doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader); - - TSDBROW* pRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree); - tsdbRowMergerAdd(pMerger, pRow1, NULL); - - doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, ts, pMerger, &pReader->info.verRange, pReader->idStr); - - code = tsdbRowMergerGetRow(pMerger, &pTSRow); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - - code = doAppendRowFromTSRow(pReader->resBlockInfo.pResBlock, pReader, pTSRow, pBlockScanInfo); - - taosMemoryFree(pTSRow); - tsdbRowMergerClear(pMerger); - return code; - } else { // key > ts + } else if (key > tsLast) { + return doMergeFileBlockAndLastBlock(pLastBlockReader, pReader, pBlockScanInfo, NULL, false); + } + } else { + if (key > tsLast) { + return mergeRowsInFileBlocks(pBlockData, pBlockScanInfo, key, pReader); + } else if (key < tsLast) { return doMergeFileBlockAndLastBlock(pLastBlockReader, pReader, pBlockScanInfo, NULL, false); } - } else { // desc order - return doMergeFileBlockAndLastBlock(pLastBlockReader, pReader, pBlockScanInfo, pBlockData, true); } + // the following for key == tsLast + SRow* pTSRow = NULL; + int32_t code = tsdbRowMergerAdd(pMerger, &fRow, pReader->info.pSchema); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader); + + TSDBROW* pRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree); + tsdbRowMergerAdd(pMerger, pRow1, NULL); + + doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLast, pMerger, &pReader->info.verRange, pReader->idStr); + + code = tsdbRowMergerGetRow(pMerger, &pTSRow); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + code = doAppendRowFromTSRow(pReader->resBlockInfo.pResBlock, pReader, pTSRow, pBlockScanInfo); + + taosMemoryFree(pTSRow); + tsdbRowMergerClear(pMerger); + return code; + } else { // only last block exists return doMergeFileBlockAndLastBlock(pLastBlockReader, pReader, pBlockScanInfo, NULL, false); } @@ -2190,7 +2194,8 @@ static int32_t buildComposedDataBlockImpl(STsdbReader* pReader, STableBlockScanI SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo; TSDBROW *pRow = NULL, *piRow = NULL; - int64_t key = (pBlockData->nRow > 0 && (!pDumpInfo->allDumped)) ? pBlockData->aTSKEY[pDumpInfo->rowIndex] : INT64_MIN; + int64_t key = (pBlockData->nRow > 0 && (!pDumpInfo->allDumped)) ? pBlockData->aTSKEY[pDumpInfo->rowIndex] : + (ASCENDING_TRAVERSE(pReader->info.order) ? INT64_MAX : INT64_MIN); if (pBlockScanInfo->iter.hasVal) { pRow = getValidMemRow(&pBlockScanInfo->iter, pBlockScanInfo->delSkyline, pReader); } @@ -2564,9 +2569,8 @@ static int32_t doLoadLastBlockSequentially(STsdbReader* pReader) { // load the last data block of current table STableBlockScanInfo* pScanInfo = *(STableBlockScanInfo**)pStatus->pTableIter; - if (pReader->pIgnoreTables && taosHashGet(*pReader->pIgnoreTables, &pScanInfo->uid, sizeof(pScanInfo->uid))) { - // reset the index in last block when handing a new file - // doCleanupTableScanInfo(pScanInfo); + if (pScanInfo == NULL) { + tsdbError("table Iter is null, invalid pScanInfo, try next table %s", pReader->idStr); bool hasNexTable = moveToNextTable(pUidList, pStatus); if (!hasNexTable) { return TSDB_CODE_SUCCESS; @@ -2575,8 +2579,15 @@ static int32_t doLoadLastBlockSequentially(STsdbReader* pReader) { continue; } - // reset the index in last block when handing a new file - // doCleanupTableScanInfo(pScanInfo); + if (pReader->pIgnoreTables && taosHashGet(*pReader->pIgnoreTables, &pScanInfo->uid, sizeof(pScanInfo->uid))) { + // reset the index in last block when handing a new file + bool hasNexTable = moveToNextTable(pUidList, pStatus); + if (!hasNexTable) { + return TSDB_CODE_SUCCESS; + } + + continue; + } bool hasDataInLastFile = initLastBlockReader(pLastBlockReader, pScanInfo, pReader); if (!hasDataInLastFile) { @@ -2667,16 +2678,32 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) { (ASCENDING_TRAVERSE(pReader->info.order)) ? pBlockInfo->record.firstKey : pBlockInfo->record.lastKey; code = buildDataBlockFromBuf(pReader, pScanInfo, endKey); } else { - if (hasDataInLastBlock(pLastBlockReader) && !ASCENDING_TRAVERSE(pReader->info.order)) { - // only return the rows in last block - int64_t tsLast = getCurrentKeyInLastBlock(pLastBlockReader); - ASSERT(tsLast >= pBlockInfo->record.lastKey); + bool bHasDataInLastBlock = hasDataInLastBlock(pLastBlockReader); + int64_t tsLast = bHasDataInLastBlock ? getCurrentKeyInLastBlock(pLastBlockReader) : INT64_MIN; + if (!bHasDataInLastBlock || ((ASCENDING_TRAVERSE(pReader->info.order) && pBlockInfo->record.lastKey < tsLast) || + (!ASCENDING_TRAVERSE(pReader->info.order) && pBlockInfo->record.firstKey > tsLast))) { + // whole block is required, return it directly + SDataBlockInfo* pInfo = &pReader->resBlockInfo.pResBlock->info; + pInfo->rows = pBlockInfo->record.numRow; + pInfo->id.uid = pScanInfo->uid; + pInfo->dataLoad = 0; + pInfo->window = (STimeWindow){.skey = pBlockInfo->record.firstKey, .ekey = pBlockInfo->record.lastKey}; + setComposedBlockFlag(pReader, false); + setBlockAllDumped(&pStatus->fBlockDumpInfo, pBlockInfo->record.lastKey, pReader->info.order); + // update the last key for the corresponding table + pScanInfo->lastKey = ASCENDING_TRAVERSE(pReader->info.order) ? pInfo->window.ekey : pInfo->window.skey; + tsdbDebug("%p uid:%" PRIu64 + " clean file block retrieved from file, global index:%d, " + "table index:%d, rows:%d, brange:%" PRId64 "-%" PRId64 ", %s", + pReader, pScanInfo->uid, pBlockIter->index, pBlockInfo->tbBlockIdx, pBlockInfo->record.numRow, + pBlockInfo->record.firstKey, pBlockInfo->record.lastKey, pReader->idStr); + } else { SBlockData* pBData = &pReader->status.fileBlockData; tBlockDataReset(pBData); SSDataBlock* pResBlock = pReader->resBlockInfo.pResBlock; - tsdbDebug("load data in last block firstly, due to desc scan data, %s", pReader->idStr); + tsdbDebug("load data in last block firstly %s", pReader->idStr); int64_t st = taosGetTimestampUs(); @@ -2707,23 +2734,8 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) { pReader, pResBlock->info.id.uid, pResBlock->info.window.skey, pResBlock->info.window.ekey, pResBlock->info.rows, el, pReader->idStr); } - } else { // whole block is required, return it directly - SDataBlockInfo* pInfo = &pReader->resBlockInfo.pResBlock->info; - pInfo->rows = pBlockInfo->record.numRow; - pInfo->id.uid = pScanInfo->uid; - pInfo->dataLoad = 0; - pInfo->window = (STimeWindow){.skey = pBlockInfo->record.firstKey, .ekey = pBlockInfo->record.lastKey}; - setComposedBlockFlag(pReader, false); - setBlockAllDumped(&pStatus->fBlockDumpInfo, pBlockInfo->record.lastKey, pReader->info.order); - - // update the last key for the corresponding table - pScanInfo->lastKey = ASCENDING_TRAVERSE(pReader->info.order) ? pInfo->window.ekey : pInfo->window.skey; - tsdbDebug("%p uid:%" PRIu64 - " clean file block retrieved from file, global index:%d, " - "table index:%d, rows:%d, brange:%" PRId64 "-%" PRId64 ", %s", - pReader, pScanInfo->uid, pBlockIter->index, pBlockInfo->tbBlockIdx, pBlockInfo->record.numRow, - pBlockInfo->record.firstKey, pBlockInfo->record.lastKey, pReader->idStr); } + } return (pReader->code != TSDB_CODE_SUCCESS) ? pReader->code : code; @@ -4097,11 +4109,6 @@ int32_t tsdbReaderSuspend2(STsdbReader* pReader) { tsdbDataFileReaderClose(&pReader->pFileReader); - int64_t loadBlocks = 0; - double elapse = 0; - pReader->status.pLDataIterArray = destroySttBlockReader(pReader->status.pLDataIterArray, &loadBlocks, &elapse); - pReader->status.pLDataIterArray = taosArrayInit(4, POINTER_BYTES); - // resetDataBlockScanInfo excluding lastKey STableBlockScanInfo** p = NULL; int32_t iter = 0; @@ -4172,7 +4179,7 @@ int32_t tsdbReaderSuspend2(STsdbReader* pReader) { } } - tsdbUntakeReadSnap2(pReader, pReader->pReadSnap, false); + tsdbUntakeReadSnap(pReader, pReader->pReadSnap, false); pReader->pReadSnap = NULL; pReader->flag = READER_STATUS_SUSPEND; From 9e4da6c089b5db3f08716961983053476c207206 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Mon, 7 Aug 2023 17:14:58 +0800 Subject: [PATCH 028/122] s3/config: parsing s3 configuration --- source/common/src/tglobal.c | 41 +++++++++++++++++++++++---- source/dnode/vnode/src/vnd/vnodeCos.c | 8 +++--- 2 files changed, 39 insertions(+), 10 deletions(-) diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 1f6d0800a5..fbc98715f0 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -236,8 +236,9 @@ bool tsFilterScalarMode = false; int32_t tsKeepTimeOffset = 0; // latency of data migration char tsS3Endpoint[TSDB_FQDN_LEN] = ""; -char tsS3AcessKeyId[TSDB_FQDN_LEN] = ""; -char tsS3AcessKeySecret[TSDB_FQDN_LEN] = ""; +char tsS3AccessKey[TSDB_FQDN_LEN] = ""; +char tsS3AccessKeyId[TSDB_FQDN_LEN] = ""; +char tsS3AccessKeySecret[TSDB_FQDN_LEN] = ""; char tsS3BucketName[TSDB_FQDN_LEN] = ""; char tsS3AppId[TSDB_FQDN_LEN] = ""; int8_t tsS3Enabled = false; @@ -263,6 +264,35 @@ int32_t taosSetTfsCfg(SConfig *pCfg) { int32_t taosSetTfsCfg(SConfig *pCfg); #endif +int32_t taosSetS3Cfg(SConfig *pCfg) { + tstrncpy(tsS3AccessKey, cfgGetItem(pCfg, "s3Accesskey")->str, TSDB_FQDN_LEN); + char *colon = strchr(tsS3AccessKey, ':'); + if (!colon) { + uError("invalid access key:%s", tsS3AccessKey); + return -1; + } + *colon = '\0'; + tstrncpy(tsS3AccessKeyId, tsS3AccessKey, TSDB_FQDN_LEN); + tstrncpy(tsS3AccessKeySecret, colon + 1, TSDB_FQDN_LEN); + tstrncpy(tsS3Endpoint, cfgGetItem(pCfg, "s3Endpoint")->str, TSDB_FQDN_LEN); + tstrncpy(tsS3BucketName, cfgGetItem(pCfg, "s3BucketName")->str, TSDB_FQDN_LEN); + char *cos = strstr(tsS3Endpoint, "cos."); + if (cos) { + char *appid = strrchr(tsS3BucketName, '-'); + if (!appid) { + uError("failed to locate appid in bucket:%s", tsS3BucketName); + return -1; + } else { + tstrncpy(tsS3AppId, appid + 1, TSDB_FQDN_LEN); + } + } + if (tsS3BucketName[0] != '<' && tsDiskCfgNum > 1) { + tsS3Enabled = true; + } + + return 0; +} + struct SConfig *taosGetCfg() { return tsCfg; } @@ -582,6 +612,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "maxStreamBackendCache", tsMaxStreamBackendCache, 16, 1024, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddInt32(pCfg, "pqSortMemThreshold", tsPQSortMemThreshold, 1, 10240, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddString(pCfg, "s3Accesskey", tsS3AccessKey, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddString(pCfg, "s3Endpoint", tsS3Endpoint, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddString(pCfg, "s3BucketName", tsS3BucketName, CFG_SCOPE_SERVER) != 0) return -1; GRANT_CFG_ADD; @@ -972,8 +1004,6 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tsMaxStreamBackendCache = cfgGetItem(pCfg, "maxStreamBackendCache")->i32; tsPQSortMemThreshold = cfgGetItem(pCfg, "pqSortMemThreshold")->i32; - tstrncpy(tsS3BucketName, cfgGetItem(pCfg, "s3BucketName")->str, TSDB_FQDN_LEN); - GRANT_CFG_GET; return 0; } @@ -1298,8 +1328,6 @@ int32_t taosApplyLocalCfg(SConfig *pCfg, char *name) { taosGetFqdnPortFromEp(strlen(pFirstEpItem->str) == 0 ? defaultFirstEp : pFirstEpItem->str, &firstEp); snprintf(tsFirst, sizeof(tsFirst), "%s:%u", firstEp.fqdn, firstEp.port); cfgSetItem(pCfg, "firstEp", tsFirst, pFirstEpItem->stype); - } else if (strcasecmp("s3BucketName", name) == 0) { - tstrncpy(tsS3BucketName, cfgGetItem(pCfg, "s3BucketName")->str, TSDB_FQDN_LEN); } else if (strcasecmp("sDebugFlag", name) == 0) { sDebugFlag = cfgGetItem(pCfg, "sDebugFlag")->i32; } else if (strcasecmp("smaDebugFlag", name) == 0) { @@ -1498,6 +1526,7 @@ int32_t taosInitCfg(const char *cfgDir, const char **envCmd, const char *envFile if (taosSetServerCfg(tsCfg)) return -1; if (taosSetReleaseCfg(tsCfg)) return -1; if (taosSetTfsCfg(tsCfg) != 0) return -1; + if (taosSetS3Cfg(tsCfg) != 0) return -1; } taosSetSystemCfg(tsCfg); diff --git a/source/dnode/vnode/src/vnd/vnodeCos.c b/source/dnode/vnode/src/vnd/vnodeCos.c index bac38f7c35..a40e046972 100644 --- a/source/dnode/vnode/src/vnd/vnodeCos.c +++ b/source/dnode/vnode/src/vnd/vnodeCos.c @@ -7,8 +7,8 @@ #include "cos_log.h" extern char tsS3Endpoint[]; -extern char tsS3AcessKeyId[]; -extern char tsS3AcessKeySecret[]; +extern char tsS3AccessKeyId[]; +extern char tsS3AccessKeySecret[]; extern char tsS3BucketName[]; extern char tsS3AppId[]; @@ -41,8 +41,8 @@ static void s3InitRequestOptions(cos_request_options_t *options, int is_cname) { cos_config_t *config = options->config; cos_str_set(&config->endpoint, tsS3Endpoint); - cos_str_set(&config->access_key_id, tsS3AcessKeyId); - cos_str_set(&config->access_key_secret, tsS3AcessKeySecret); + cos_str_set(&config->access_key_id, tsS3AccessKeyId); + cos_str_set(&config->access_key_secret, tsS3AccessKeySecret); cos_str_set(&config->appid, tsS3AppId); config->is_cname = is_cname; From ec1270f75ca0b6062a68e2ca0ac2a99a3728eaba Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Mon, 7 Aug 2023 17:17:12 +0800 Subject: [PATCH 029/122] fix: restore stt block/data block merge back --- source/dnode/vnode/src/tsdb/tsdbRead2.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead2.c b/source/dnode/vnode/src/tsdb/tsdbRead2.c index d5d8ba130c..c5b3560029 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead2.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead2.c @@ -439,7 +439,7 @@ static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, void return code; _end: - tsdbReaderClose(pReader); + tsdbReaderClose2(pReader); *ppReader = NULL; return code; } @@ -4108,7 +4108,10 @@ int32_t tsdbReaderSuspend2(STsdbReader* pReader) { } tsdbDataFileReaderClose(&pReader->pFileReader); - + int64_t loadBlocks = 0; + double elapse = 0; + pReader->status.pLDataIterArray = destroySttBlockReader(pReader->status.pLDataIterArray, &loadBlocks, &elapse); + pReader->status.pLDataIterArray = taosArrayInit(4, POINTER_BYTES); // resetDataBlockScanInfo excluding lastKey STableBlockScanInfo** p = NULL; int32_t iter = 0; @@ -4179,7 +4182,7 @@ int32_t tsdbReaderSuspend2(STsdbReader* pReader) { } } - tsdbUntakeReadSnap(pReader, pReader->pReadSnap, false); + tsdbUntakeReadSnap2(pReader, pReader->pReadSnap, false); pReader->pReadSnap = NULL; pReader->flag = READER_STATUS_SUSPEND; From 1290f529daf34cb3cfd6fa3cff92b43e94694a10 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Mon, 7 Aug 2023 17:17:43 +0800 Subject: [PATCH 030/122] cos/example: turn head object on --- contrib/test/cos/main.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/contrib/test/cos/main.c b/contrib/test/cos/main.c index faaceee2e3..7e5e7c8c8b 100644 --- a/contrib/test/cos/main.c +++ b/contrib/test/cos/main.c @@ -530,7 +530,12 @@ void test_head_object() { s = cos_head_object(options, &bucket, &object, NULL, &resp_headers); print_headers(resp_headers); if (cos_status_is_ok(s)) { - printf("head object succeeded\n"); + long size = 0; + char *content_length_str = (char *)apr_table_get(resp_headers, COS_CONTENT_LENGTH); + if (content_length_str != NULL) { + size = atol(content_length_str); + } + printf("head object succeeded: %ld\n", size); } else { printf("head object failed\n"); } @@ -3045,7 +3050,7 @@ int main(int argc, char *argv[]) { // test_object(); // test_put_object_with_limit(); // test_get_object_with_limit(); - // test_head_object(); + test_head_object(); // test_gen_object_url(); // test_list_objects(); // test_list_directory(); From 6fd2ae138e7ab2d67d6212d654ab2b9e98218244 Mon Sep 17 00:00:00 2001 From: jiacy-jcy <714897623@qq.com> Date: Mon, 7 Aug 2023 17:32:20 +0800 Subject: [PATCH 031/122] fix: taosMktime on windows platform --- include/os/osTime.h | 2 ++ source/common/src/ttime.c | 40 ------------------------------ source/os/src/osTime.c | 51 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 53 insertions(+), 40 deletions(-) diff --git a/include/os/osTime.h b/include/os/osTime.h index 51a285a139..87df3a2650 100644 --- a/include/os/osTime.h +++ b/include/os/osTime.h @@ -95,6 +95,8 @@ struct tm *taosLocalTime(const time_t *timep, struct tm *result, char *buf); struct tm *taosLocalTimeNolock(struct tm *result, const time_t *timep, int dst); time_t taosTime(time_t *t); time_t taosMktime(struct tm *timep); +int64_t user_mktime64(const uint32_t year, const uint32_t mon, const uint32_t day, const uint32_t hour, + const uint32_t min, const uint32_t sec, int64_t time_zone); #ifdef __cplusplus } diff --git a/source/common/src/ttime.c b/source/common/src/ttime.c index 7a5581efbe..e9313e0591 100644 --- a/source/common/src/ttime.c +++ b/source/common/src/ttime.c @@ -25,46 +25,6 @@ #include "tlog.h" -/* - * mktime64 - Converts date to seconds. - * Converts Gregorian date to seconds since 1970-01-01 00:00:00. - * Assumes input in normal date format, i.e. 1980-12-31 23:59:59 - * => year=1980, mon=12, day=31, hour=23, min=59, sec=59. - * - * [For the Julian calendar (which was used in Russia before 1917, - * Britain & colonies before 1752, anywhere else before 1582, - * and is still in use by some communities) leave out the - * -year/100+year/400 terms, and add 10.] - * - * This algorithm was first published by Gauss (I think). - * - * A leap second can be indicated by calling this function with sec as - * 60 (allowable under ISO 8601). The leap second is treated the same - * as the following second since they don't exist in UNIX time. - * - * An encoding of midnight at the end of the day as 24:00:00 - ie. midnight - * tomorrow - (allowable under ISO 8601) is supported. - */ -static int64_t user_mktime64(const uint32_t year0, const uint32_t mon0, const uint32_t day, const uint32_t hour, - const uint32_t min, const uint32_t sec, int64_t time_zone) { - uint32_t mon = mon0, year = year0; - - /* 1..12 -> 11,12,1..10 */ - if (0 >= (int32_t)(mon -= 2)) { - mon += 12; /* Puts Feb last since it has leap day */ - year -= 1; - } - - // int64_t res = (((((int64_t) (year/4 - year/100 + year/400 + 367*mon/12 + day) + - // year*365 - 719499)*24 + hour)*60 + min)*60 + sec); - int64_t res; - res = 367 * ((int64_t)mon) / 12; - res += year / 4 - year / 100 + year / 400 + day + ((int64_t)year) * 365 - 719499; - res = res * 24; - res = ((res + hour) * 60 + min) * 60 + sec; - - return (res + time_zone); -} // ==== mktime() kernel code =================// static int64_t m_deltaUtc = 0; diff --git a/source/os/src/osTime.c b/source/os/src/osTime.c index 39d1de0437..0430dd70fc 100644 --- a/source/os/src/osTime.c +++ b/source/os/src/osTime.c @@ -367,8 +367,50 @@ int32_t taosGetTimeOfDay(struct timeval *tv) { time_t taosTime(time_t *t) { return time(t); } +/* + * mktime64 - Converts date to seconds. + * Converts Gregorian date to seconds since 1970-01-01 00:00:00. + * Assumes input in normal date format, i.e. 1980-12-31 23:59:59 + * => year=1980, mon=12, day=31, hour=23, min=59, sec=59. + * + * [For the Julian calendar (which was used in Russia before 1917, + * Britain & colonies before 1752, anywhere else before 1582, + * and is still in use by some communities) leave out the + * -year/100+year/400 terms, and add 10.] + * + * This algorithm was first published by Gauss (I think). + * + * A leap second can be indicated by calling this function with sec as + * 60 (allowable under ISO 8601). The leap second is treated the same + * as the following second since they don't exist in UNIX time. + * + * An encoding of midnight at the end of the day as 24:00:00 - ie. midnight + * tomorrow - (allowable under ISO 8601) is supported. + */ +int64_t user_mktime64(const uint32_t year, const uint32_t mon, const uint32_t day, const uint32_t hour, + const uint32_t min, const uint32_t sec, int64_t time_zone) { + uint32_t _mon = mon, _year = year; + + /* 1..12 -> 11,12,1..10 */ + if (0 >= (int32_t)(_mon -= 2)) { + _mon += 12; /* Puts Feb last since it has leap day */ + _year -= 1; + } + + // int64_t _res = (((((int64_t) (_year/4 - _year/100 + _year/400 + 367*_mon/12 + day) + + // _year*365 - 719499)*24 + hour)*60 + min)*60 + sec); + int64_t _res; + _res = 367 * ((int64_t)_mon) / 12; + _res += _year / 4 - _year / 100 + _year / 400 + day + ((int64_t)_year) * 365 - 719499; + _res = _res * 24; + _res = ((_res + hour) * 60 + min) * 60 + sec; + + return (_res + time_zone); +} + time_t taosMktime(struct tm *timep) { #ifdef WINDOWS +#if 0 struct tm tm1 = {0}; LARGE_INTEGER t; FILETIME f; @@ -405,6 +447,15 @@ time_t taosMktime(struct tm *timep) { t.QuadPart -= offset.QuadPart; return (time_t)(t.QuadPart / 10000000); +#else +#ifdef _MSC_VER +#if _MSC_VER >= 1900 + int64_t tz = _timezone; +#endif +#endif + return user_mktime64(timep->tm_year + 1900, timep->tm_mon + 1, timep->tm_mday, timep->tm_hour, timep->tm_min, + timep->tm_sec, tz); +#endif #else return mktime(timep); #endif From 59100a7251fa358c43cded44a1bf054ffc485e02 Mon Sep 17 00:00:00 2001 From: kailixu Date: Mon, 7 Aug 2023 17:52:43 +0800 Subject: [PATCH 032/122] enh: code optimize for mktime --- source/os/src/osTime.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/source/os/src/osTime.c b/source/os/src/osTime.c index 0430dd70fc..e758055529 100644 --- a/source/os/src/osTime.c +++ b/source/os/src/osTime.c @@ -399,13 +399,12 @@ int64_t user_mktime64(const uint32_t year, const uint32_t mon, const uint32_t da // int64_t _res = (((((int64_t) (_year/4 - _year/100 + _year/400 + 367*_mon/12 + day) + // _year*365 - 719499)*24 + hour)*60 + min)*60 + sec); - int64_t _res; - _res = 367 * ((int64_t)_mon) / 12; + int64_t _res = 367 * ((int64_t)_mon) / 12; _res += _year / 4 - _year / 100 + _year / 400 + day + ((int64_t)_year) * 365 - 719499; - _res = _res * 24; + _res *= 24; _res = ((_res + hour) * 60 + min) * 60 + sec; - return (_res + time_zone); + return _res + time_zone; } time_t taosMktime(struct tm *timep) { From b598c2651df65f5c91b5bfad301ce78d3777a096 Mon Sep 17 00:00:00 2001 From: kailixu Date: Mon, 7 Aug 2023 19:30:54 +0800 Subject: [PATCH 033/122] fix: use mktime after 19700101 on windows --- source/os/src/osTime.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/source/os/src/osTime.c b/source/os/src/osTime.c index e758055529..05233065fa 100644 --- a/source/os/src/osTime.c +++ b/source/os/src/osTime.c @@ -447,6 +447,10 @@ time_t taosMktime(struct tm *timep) { t.QuadPart -= offset.QuadPart; return (time_t)(t.QuadPart / 10000000); #else + time_t result = mktime(timep); + if (result != -1) { + return result; + } #ifdef _MSC_VER #if _MSC_VER >= 1900 int64_t tz = _timezone; From 864d07e1bcf1168c4eb728fdb9ef3bb2f25f5ff2 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Mon, 7 Aug 2023 19:39:06 +0800 Subject: [PATCH 034/122] feat:[TD-24559]support geomety type in schemaless --- source/client/CMakeLists.txt | 4 +-- source/client/inc/clientSml.h | 1 + source/client/src/clientSml.c | 1 + source/client/src/clientSmlLine.c | 39 ++++++++++++++++++---- source/client/test/CMakeLists.txt | 2 +- source/dnode/mnode/impl/src/mndSubscribe.c | 2 +- source/libs/parser/src/parInsertSml.c | 14 ++++++-- source/libs/parser/src/parInsertUtil.c | 2 +- 8 files changed, 50 insertions(+), 15 deletions(-) diff --git a/source/client/CMakeLists.txt b/source/client/CMakeLists.txt index d0cfd38fb9..e45ee9c932 100644 --- a/source/client/CMakeLists.txt +++ b/source/client/CMakeLists.txt @@ -16,7 +16,7 @@ target_include_directories( target_link_libraries( taos INTERFACE api - PRIVATE os util common transport nodes parser command planner catalog scheduler function qcom + PRIVATE os util common transport nodes parser command planner catalog scheduler function qcom geometry ) if(TD_DARWIN_ARM64) @@ -57,7 +57,7 @@ target_include_directories( target_link_libraries( taos_static INTERFACE api - PRIVATE os util common transport nodes parser command planner catalog scheduler function qcom + PRIVATE os util common transport nodes parser command planner catalog scheduler function qcom geometry ) if(${BUILD_TEST}) diff --git a/source/client/inc/clientSml.h b/source/client/inc/clientSml.h index 040064560c..11376052b6 100644 --- a/source/client/inc/clientSml.h +++ b/source/client/inc/clientSml.h @@ -33,6 +33,7 @@ extern "C" { #include "ttime.h" #include "ttypes.h" #include "cJSON.h" +#include "geosWrapper.h" #if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__) # define expect(expr,value) (__builtin_expect ((expr),(value)) ) diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c index ffff3df5d0..65175110cf 100644 --- a/source/client/src/clientSml.c +++ b/source/client/src/clientSml.c @@ -1191,6 +1191,7 @@ void freeSSmlKv(void *data) { SSmlKv *kv = (SSmlKv *)data; if (kv->keyEscaped) taosMemoryFree((void *)(kv->key)); if (kv->valueEscaped) taosMemoryFree((void *)(kv->value)); + if (kv->type == TSDB_DATA_TYPE_GEOMETRY) geosFreeBuffer((void *)(kv->value)); } void smlDestroyInfo(SSmlHandle *info) { diff --git a/source/client/src/clientSmlLine.c b/source/client/src/clientSmlLine.c index 1ee2cfbedf..9c0b2d7688 100644 --- a/source/client/src/clientSmlLine.c +++ b/source/client/src/clientSmlLine.c @@ -102,6 +102,30 @@ int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg) { return TSDB_CODE_TSC_INVALID_VALUE; } + if (pVal->value[0] == 'g' || pVal->value[0] == 'G') { // geometry + if (pVal->value[1] == '"' && pVal->value[pVal->length - 1] == '"' && pVal->length >= sizeof("POINT")+3) { + int32_t code = initCtxGeomFromText(); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + char* tmp = taosMemoryCalloc(pVal->length, 1); + memcmp(tmp, pVal->value + 2, pVal->length - 3); + code = doGeomFromText(tmp, (unsigned char **)&pVal->value, &pVal->length); + taosMemoryFree(tmp); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + pVal->type = TSDB_DATA_TYPE_GEOMETRY; + if (pVal->length > TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) { + geosFreeBuffer((void*)(pVal->value)); + return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN; + } + return TSDB_CODE_SUCCESS; + } + return TSDB_CODE_TSC_INVALID_VALUE; + } + if (pVal->value[0] == 't' || pVal->value[0] == 'T') { if (pVal->length == 1 || (pVal->length == 4 && (pVal->value[1] == 'r' || pVal->value[1] == 'R') && @@ -390,7 +414,7 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLin SSmlKv kv = {.key = tag->name, .keyLen = strlen(tag->name), .type = tag->type}; if (tag->type == TSDB_DATA_TYPE_NCHAR) { kv.length = (tag->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE; - } else if (tag->type == TSDB_DATA_TYPE_BINARY) { + } else if (tag->type == TSDB_DATA_TYPE_BINARY || tag->type == TSDB_DATA_TYPE_GEOMETRY) { kv.length = tag->bytes - VARSTR_HEADER_SIZE; } taosArrayPush((*tmp)->cols, &kv); @@ -663,14 +687,15 @@ int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine if (info->dataFormat) { uDebug("SML:0x%" PRIx64 " smlParseInfluxString format true, ts:%" PRId64, info->id, ts); ret = smlBuildCol(info->currTableDataCtx, info->currSTableMeta->schema, &kv, 0); - if (ret != TSDB_CODE_SUCCESS) { - return ret; - } - ret = smlBuildRow(info->currTableDataCtx); - if (ret != TSDB_CODE_SUCCESS) { - return ret; + if (ret == TSDB_CODE_SUCCESS) { + ret = smlBuildRow(info->currTableDataCtx); } + clearColValArray(info->currTableDataCtx->pValues); + if (unlikely(ret != TSDB_CODE_SUCCESS)) { + smlBuildInvalidDataMsg(&info->msgBuf, "smlBuildCol error", NULL); + return ret; + } } else { uDebug("SML:0x%" PRIx64 " smlParseInfluxString format false, ts:%" PRId64, info->id, ts); taosArraySet(elements->colArray, 0, &kv); diff --git a/source/client/test/CMakeLists.txt b/source/client/test/CMakeLists.txt index 34c377c6ea..91f0d1eef8 100644 --- a/source/client/test/CMakeLists.txt +++ b/source/client/test/CMakeLists.txt @@ -20,7 +20,7 @@ TARGET_LINK_LIBRARIES( ADD_EXECUTABLE(smlTest smlTest.cpp) TARGET_LINK_LIBRARIES( smlTest - PUBLIC os util common transport parser catalog scheduler function gtest taos_static qcom + PUBLIC os util common transport parser catalog scheduler function gtest taos_static qcom geometry ) TARGET_INCLUDE_DIRECTORIES( diff --git a/source/dnode/mnode/impl/src/mndSubscribe.c b/source/dnode/mnode/impl/src/mndSubscribe.c index 6f50b9ff9f..166e67b15b 100644 --- a/source/dnode/mnode/impl/src/mndSubscribe.c +++ b/source/dnode/mnode/impl/src/mndSubscribe.c @@ -489,7 +489,7 @@ static int32_t mndDoRebalance(SMnode *pMnode, const SMqRebInputObj *pInput, SMqR SMqVgEp *pVgEp = taosArrayGetP(pConsumerEpNew->vgs, i); if(pVgEp->vgId == d1->vgId){ jump = true; - mInfo("pSub->offsetRows jump, because consumer id:%"PRIx64 " and vgId:%d not change", pConsumerEp->consumerId, pVgEp->vgId); + mInfo("pSub->offsetRows jump, because consumer id:0x%"PRIx64 " and vgId:%d not change", pConsumerEp->consumerId, pVgEp->vgId); break; } } diff --git a/source/libs/parser/src/parInsertSml.c b/source/libs/parser/src/parInsertSml.c index 78b05b6df5..577b8961a7 100644 --- a/source/libs/parser/src/parInsertSml.c +++ b/source/libs/parser/src/parInsertSml.c @@ -22,7 +22,7 @@ static void clearColValArray(SArray* pCols) { int32_t num = taosArrayGetSize(pCols); for (int32_t i = 0; i < num; ++i) { SColVal* pCol = taosArrayGet(pCols, i); - if (TSDB_DATA_TYPE_NCHAR == pCol->type) { + if (TSDB_DATA_TYPE_NCHAR == pCol->type || TSDB_DATA_TYPE_GEOMETRY == pCol->type) { taosMemoryFreeClear(pCol->value.pData); } pCol->flag = CV_FLAG_NONE; @@ -237,9 +237,13 @@ int32_t smlBuildCol(STableDataCxt* pTableCxt, SSchema* schema, void* data, int32 } pVal->value.pData = pUcs4; pVal->value.nData = len; - } else if (kv->type == TSDB_DATA_TYPE_BINARY || kv->type == TSDB_DATA_TYPE_GEOMETRY) { + } else if (kv->type == TSDB_DATA_TYPE_BINARY) { pVal->value.nData = kv->length; pVal->value.pData = (uint8_t*)kv->value; + } else if (kv->type == TSDB_DATA_TYPE_GEOMETRY) { + pVal->value.nData = kv->length; + pVal->value.pData = taosMemoryMalloc(kv->length); + memcpy(pVal->value.pData, (uint8_t*)kv->value, kv->length); } else { memcpy(&pVal->value.val, &(kv->value), kv->length); } @@ -364,9 +368,13 @@ int32_t smlBindData(SQuery* query, bool dataFormat, SArray* tags, SArray* colsSc } pVal->value.pData = pUcs4; pVal->value.nData = len; - } else if (kv->type == TSDB_DATA_TYPE_BINARY || kv->type == TSDB_DATA_TYPE_GEOMETRY) { + } else if (kv->type == TSDB_DATA_TYPE_BINARY) { pVal->value.nData = kv->length; pVal->value.pData = (uint8_t*)kv->value; + } else if (kv->type == TSDB_DATA_TYPE_GEOMETRY) { + pVal->value.nData = kv->length; + pVal->value.pData = taosMemoryMalloc(kv->length); + memcpy(pVal->value.pData, (uint8_t*)kv->value, kv->length); } else { memcpy(&pVal->value.val, &(kv->value), kv->length); } diff --git a/source/libs/parser/src/parInsertUtil.c b/source/libs/parser/src/parInsertUtil.c index de7d154db6..33699ed857 100644 --- a/source/libs/parser/src/parInsertUtil.c +++ b/source/libs/parser/src/parInsertUtil.c @@ -333,7 +333,7 @@ int32_t insGetTableDataCxt(SHashObj* pHash, void* id, int32_t idLen, STableMeta* static void destroyColVal(void* p) { SColVal* pVal = p; - if (TSDB_DATA_TYPE_NCHAR == pVal->type) { + if (TSDB_DATA_TYPE_NCHAR == pVal->type || TSDB_DATA_TYPE_GEOMETRY == pVal->type) { taosMemoryFree(pVal->value.pData); } } From d0335bec974ee1516d74a6bafd196b0fea2859bd Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Tue, 8 Aug 2023 09:49:31 +0800 Subject: [PATCH 035/122] feat:[TD-24559]support geomety type in schemaless --- source/client/src/clientSmlLine.c | 2 +- utils/test/c/sml_test.c | 33 +++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/source/client/src/clientSmlLine.c b/source/client/src/clientSmlLine.c index 9c0b2d7688..d6f405e69d 100644 --- a/source/client/src/clientSmlLine.c +++ b/source/client/src/clientSmlLine.c @@ -109,7 +109,7 @@ int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg) { return code; } char* tmp = taosMemoryCalloc(pVal->length, 1); - memcmp(tmp, pVal->value + 2, pVal->length - 3); + memcpy(tmp, pVal->value + 2, pVal->length - 3); code = doGeomFromText(tmp, (unsigned char **)&pVal->value, &pVal->length); taosMemoryFree(tmp); if (code != TSDB_CODE_SUCCESS) { diff --git a/utils/test/c/sml_test.c b/utils/test/c/sml_test.c index e4ed6037a3..237bfc5092 100644 --- a/utils/test/c/sml_test.c +++ b/utils/test/c/sml_test.c @@ -1552,12 +1552,45 @@ int sml_ts3724_Test() { return code; } +int sml_td24559_Test() { + TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0); + + TAOS_RES *pRes = taos_query(taos, "drop database if exists td24559"); + taos_free_result(pRes); + + pRes = taos_query(taos, "create database if not exists td24559"); + taos_free_result(pRes); + + const char *sql[] = { + "stb,t1=1 f1=283i32,f2=g\"Point(4.343 89.342)\" 1632299372000", + "stb,t1=1 f2=G\"Point(4.343 89.342)\",f1=106i32 1632299373000", + "stb,t2=1 f2=G\"Point(4.343 89.342)\",f1=106i32 1632299374000", + "stb,t1=1 f1=106i32,f2=G\"GEOMETRYCOLLECTION (MULTIPOINT((0 0), (1 1)), POINT(3 4), LINESTRING(2 3, 3 4))\" 1632299378000", + }; + + pRes = taos_query(taos, "use td24559"); + taos_free_result(pRes); + + pRes = taos_schemaless_insert(taos, (char **)sql, sizeof(sql) / sizeof(sql[0]), TSDB_SML_LINE_PROTOCOL, + TSDB_SML_TIMESTAMP_MILLI_SECONDS); + + int code = taos_errno(pRes); + printf("%s result0:%s\n", __FUNCTION__, taos_errstr(pRes)); + taos_free_result(pRes); + + taos_close(taos); + + return code; +} + int main(int argc, char *argv[]) { if (argc == 2) { taos_options(TSDB_OPTION_CONFIGDIR, argv[1]); } int ret = 0; + ret = sml_td24559_Test(); + ASSERT(!ret); ret = sml_td24070_Test(); ASSERT(!ret); ret = sml_td23881_Test(); From 607132c18a4e00607a4fe64c2365a4e68c222d0e Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Tue, 8 Aug 2023 10:33:55 +0800 Subject: [PATCH 036/122] feat:[TD-24559]support geomety type in schemaless --- source/client/inc/clientSml.h | 2 +- source/client/src/clientSmlLine.c | 6 +++--- tests/system-test/2-query/sml.py | 5 +++++ 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/source/client/inc/clientSml.h b/source/client/inc/clientSml.h index 11376052b6..1839c14894 100644 --- a/source/client/inc/clientSml.h +++ b/source/client/inc/clientSml.h @@ -193,7 +193,7 @@ typedef struct { // SArray *preLineTagKV; SArray *maxTagKVs; - SArray *masColKVs; + SArray *maxColKVs; SSmlLineInfo preLine; STableMeta *currSTableMeta; diff --git a/source/client/src/clientSmlLine.c b/source/client/src/clientSmlLine.c index d6f405e69d..558c5f4ddb 100644 --- a/source/client/src/clientSmlLine.c +++ b/source/client/src/clientSmlLine.c @@ -421,7 +421,7 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLin } } info->currSTableMeta = (*tmp)->tableMeta; - info->masColKVs = (*tmp)->cols; + info->maxColKVs = (*tmp)->cols; } } @@ -536,13 +536,13 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLin freeSSmlKv(&kv); return TSDB_CODE_SUCCESS; } - if (cnt >= taosArrayGetSize(info->masColKVs)) { + if (cnt >= taosArrayGetSize(info->maxColKVs)) { info->dataFormat = false; info->reRun = true; freeSSmlKv(&kv); return TSDB_CODE_SUCCESS; } - SSmlKv *maxKV = (SSmlKv *)taosArrayGet(info->masColKVs, cnt); + SSmlKv *maxKV = (SSmlKv *)taosArrayGet(info->maxColKVs, cnt); if (kv.type != maxKV->type) { info->dataFormat = false; info->reRun = true; diff --git a/tests/system-test/2-query/sml.py b/tests/system-test/2-query/sml.py index b3aeb72194..cae012ece1 100644 --- a/tests/system-test/2-query/sml.py +++ b/tests/system-test/2-query/sml.py @@ -110,6 +110,11 @@ class TDTestCase: tdSql.query(f"select * from ts3724.`stb2.`") tdSql.checkRows(1) + + # tdSql.query(f"select * from td24559.stb order by _ts") + # tdSql.checkRows(4) + # tdSql.checkData(0, 2, "POINT (4.343000 89.342000)") + # tdSql.checkData(3, 2, "GEOMETRYCOLLECTION (MULTIPOINT ((0.000000 0.000000), (1.000000 1.000000)), POINT (3.000000 4.000000), LINESTRING (2.000000 3.000000, 3.000000 4.000000))") return def run(self): From 9b4bdd819bc4f9a2d2ceef8ac2b7928bc9590980 Mon Sep 17 00:00:00 2001 From: slzhou Date: Thu, 27 Jul 2023 09:07:13 +0800 Subject: [PATCH 037/122] enhance: subquery can use expr primary key +/- value as primary key --- source/libs/parser/src/parTranslater.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 8ce68a5c8c..554dc7cce8 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -821,7 +821,19 @@ static bool isPrimaryKeyImpl(SNode* pExpr) { FUNCTION_TYPE_IROWTS == pFunc->funcType) { return true; } - } + } else if (QUERY_NODE_OPERATOR == nodeType(pExpr)) { + SOperatorNode* pOper = (SOperatorNode*)pExpr; + if (OP_TYPE_ADD != pOper->opType && OP_TYPE_SUB != pOper->opType) { + return false; + } + if (!isPrimaryKeyImpl(pOper->pLeft)) { + return false; + } + if (QUERY_NODE_VALUE != nodeType(pOper->pRight)) { + return false; + } + return true; + } return false; } From d43db6e8a8250dbfd87de01e32ead4ccf299b7b7 Mon Sep 17 00:00:00 2001 From: slzhou Date: Fri, 4 Aug 2023 14:17:18 +0800 Subject: [PATCH 038/122] fix: add test case --- tests/parallel_test/cases.task | 1 + tests/script/tsim/query/join_pk.sim | 42 +++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+) create mode 100644 tests/script/tsim/query/join_pk.sim diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 586425ec1d..7a1e8d61c8 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -954,6 +954,7 @@ ,,n,script,./test.sh -f tsim/query/udfpy.sim ,,y,script,./test.sh -f tsim/query/udf_with_const.sim ,,y,script,./test.sh -f tsim/query/join_interval.sim +,,y,script,./test.sh -f tsim/query/join_pk.sim ,,y,script,./test.sh -f tsim/query/unionall_as_table.sim ,,y,script,./test.sh -f tsim/query/multi_order_by.sim ,,y,script,./test.sh -f tsim/query/sys_tbname.sim diff --git a/tests/script/tsim/query/join_pk.sim b/tests/script/tsim/query/join_pk.sim new file mode 100644 index 0000000000..66bb20da24 --- /dev/null +++ b/tests/script/tsim/query/join_pk.sim @@ -0,0 +1,42 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sql connect + +sql create database test; +sql use test; +sql create table st(ts timestamp, f int) tags(t int); +sql insert into ct1 using st tags(1) values(now, 0)(now+1s, 1) +sql insert into ct2 using st tags(2) values(now+2s, 2)(now+3s, 3) +sql select * from (select _wstart - 1s as ts, count(*) as num1 from st interval(1s)) as t1 inner join (select _wstart as ts, count(*) as num2 from st interval(1s)) as t2 on t1.ts = t2.ts + +if $rows != 3 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data11 != 1 then + return -1 +endi + +if $data21 != 1 then + return -1 +endi +if $data03 != 1 then + return -1 +endi + +if $data13 != 1 then + return -1 +endi +if $data23 != 1 then + return -1 +endi +sql select * from (select _wstart - 1d as ts, count(*) as num1 from st interval(1s)) as t1 inner join (select _wstart as ts, count(*) as num2 from st interval(1s)) as t2 on t1.ts = t2.ts + +sql select * from (select _wstart + 1a as ts, count(*) as num1 from st interval(1s)) as t1 inner join (select _wstart as ts, count(*) as num2 from st interval(1s)) as t2 on t1.ts = t2.ts + +sql_error select * from (select _wstart * 3 as ts, count(*) as num1 from st interval(1s)) as t1 inner join (select _wstart as ts, count(*) as num2 from st interval(1s)) as t2 on t1.ts = t2.ts +#system sh/exec.sh -n dnode1 -s stop -x SIGINT + From 67d4647fd56962da5e5131061d0d690316a5daf3 Mon Sep 17 00:00:00 2001 From: slzhou Date: Mon, 7 Aug 2023 13:39:30 +0800 Subject: [PATCH 039/122] enhance: enhance test case --- tests/script/tsim/query/join_pk.sim | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tests/script/tsim/query/join_pk.sim b/tests/script/tsim/query/join_pk.sim index 66bb20da24..da5c13e9c0 100644 --- a/tests/script/tsim/query/join_pk.sim +++ b/tests/script/tsim/query/join_pk.sim @@ -38,5 +38,18 @@ sql select * from (select _wstart - 1d as ts, count(*) as num1 from st interval( sql select * from (select _wstart + 1a as ts, count(*) as num1 from st interval(1s)) as t1 inner join (select _wstart as ts, count(*) as num2 from st interval(1s)) as t2 on t1.ts = t2.ts sql_error select * from (select _wstart * 3 as ts, count(*) as num1 from st interval(1s)) as t1 inner join (select _wstart as ts, count(*) as num2 from st interval(1s)) as t2 on t1.ts = t2.ts +sql create table sst(ts timestamp, ts2 timestamp, f int) tags(t int); +sql insert into sct1 using sst tags(1) values('2023-08-07 13:30:56', '2023-08-07 13:30:56', 0)('2023-08-07 13:30:57', '2023-08-07 13:30:57', 1) +sql insert into sct2 using sst tags(2) values('2023-08-07 13:30:58', '2023-08-07 13:30:58', 2)('2023-08-07 13:30:59', '2023-08-07 13:30:59', 3) +sql select * from (select ts - 1s as jts from sst) as t1 inner join (select ts-1s as jts from sst) as t2 on t1.jts = t2.jts +if $rows != 4 then + return -1 +endi +sql select * from (select ts - 1s as jts from sst) as t1 inner join (select ts as jts from sst) as t2 on t1.jts = t2.jts +if $rows != 3 then + return -1 +endi +sql_error select * from (select ts2 - 1s as jts from sst) as t1 inner join (select ts2 as jts from sst) as t2 on t1.jts = t2.jts + #system sh/exec.sh -n dnode1 -s stop -x SIGINT From bea434623287ebaa7d3898675277e2f7ca3ac58c Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Tue, 8 Aug 2023 16:05:21 +0800 Subject: [PATCH 040/122] enh: remove timeline dependence of certain functions --- source/libs/function/src/builtins.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index 6eb2be34b3..be94352f29 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -2348,7 +2348,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "leastsquares", .type = FUNCTION_TYPE_LEASTSQUARES, - .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_SYSTABLE_FUNC, + .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_SYSTABLE_FUNC, .translateFunc = translateLeastSQR, .getEnvFunc = getLeastSQRFuncEnv, .initFunc = leastSQRFunctionSetup, @@ -2914,8 +2914,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "tail", .type = FUNCTION_TYPE_TAIL, - .classification = FUNC_MGT_SELECT_FUNC | FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC | - FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC, + .classification = FUNC_MGT_SELECT_FUNC | FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC, .translateFunc = translateTail, .getEnvFunc = getTailFuncEnv, .initFunc = tailFunctionSetup, @@ -2926,8 +2925,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "unique", .type = FUNCTION_TYPE_UNIQUE, - .classification = FUNC_MGT_SELECT_FUNC | FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC | - FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC, + .classification = FUNC_MGT_SELECT_FUNC | FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC, .translateFunc = translateUnique, .getEnvFunc = getUniqueFuncEnv, .initFunc = uniqueFunctionSetup, From 2ed5ee835e8c6f10e782fe296f92c60ffa658fae Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Tue, 8 Aug 2023 16:08:59 +0800 Subject: [PATCH 041/122] fix docs --- docs/en/12-taos-sql/10-function.md | 2 +- docs/zh/12-taos-sql/10-function.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/12-taos-sql/10-function.md b/docs/en/12-taos-sql/10-function.md index afc1581c22..f3eb9516b6 100644 --- a/docs/en/12-taos-sql/10-function.md +++ b/docs/en/12-taos-sql/10-function.md @@ -698,7 +698,7 @@ ELAPSED(ts_primary_key [, time_unit]) LEASTSQUARES(expr, start_val, step_val) ``` -**Description**: The linear regression function of the specified column and the timestamp column (primary key), `start_val` is the initial value and `step_val` is the step value. +**Description**: The linear regression function of a specified column, `start_val` is the initial value and `step_val` is the step value. **Return value type**: A string in the format of "(slope, intercept)" diff --git a/docs/zh/12-taos-sql/10-function.md b/docs/zh/12-taos-sql/10-function.md index fc0cfbe330..4a51358fa0 100644 --- a/docs/zh/12-taos-sql/10-function.md +++ b/docs/zh/12-taos-sql/10-function.md @@ -700,7 +700,7 @@ ELAPSED(ts_primary_key [, time_unit]) LEASTSQUARES(expr, start_val, step_val) ``` -**功能说明**:统计表中某列的值是主键(时间戳)的拟合直线方程。start_val 是自变量初始值,step_val 是自变量的步长值。 +**功能说明**:统计表中某列的值的拟合直线方程。start_val 是自变量初始值,step_val 是自变量的步长值。 **返回数据类型**:字符串表达式(斜率, 截距)。 From d322cfce50b4be2c76219aa1e5d6c807b1694312 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Tue, 8 Aug 2023 16:28:51 +0800 Subject: [PATCH 042/122] fix:add hashIteratorCancel for hash iterator --- source/client/src/clientSml.c | 5 +++++ source/dnode/vnode/src/sma/smaRollup.c | 1 + source/dnode/vnode/src/tq/tqRead.c | 1 + 3 files changed, 7 insertions(+) diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c index 65175110cf..cad32842a0 100644 --- a/source/client/src/clientSml.c +++ b/source/client/src/clientSml.c @@ -1073,6 +1073,7 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) { return 0; end: + taosHashCancelIterate(info->superTables, tmp); taosHashCleanup(hashTmp); taosMemoryFreeClear(pTableMeta); catalogRefreshTableMeta(info->pCatalog, &conn, &pName, 1); @@ -1434,6 +1435,7 @@ static int32_t smlInsertData(SSmlHandle *info) { code = smlCheckAuth(info, &conn, pName.tname, AUTH_TYPE_WRITE); if(code != TSDB_CODE_SUCCESS){ taosMemoryFree(measure); + taosHashCancelIterate(info->childTables, oneTable); return code; } @@ -1442,6 +1444,7 @@ static int32_t smlInsertData(SSmlHandle *info) { if (code != TSDB_CODE_SUCCESS) { uError("SML:0x%" PRIx64 " catalogGetTableHashVgroup failed. table name: %s", info->id, tableData->childTableName); taosMemoryFree(measure); + taosHashCancelIterate(info->childTables, oneTable); return code; } taosHashPut(info->pVgHash, (const char *)&vg.vgId, sizeof(vg.vgId), (char *)&vg, sizeof(vg)); @@ -1451,6 +1454,7 @@ static int32_t smlInsertData(SSmlHandle *info) { if (unlikely(NULL == pMeta || NULL == (*pMeta)->tableMeta)) { uError("SML:0x%" PRIx64 " NULL == pMeta. table name: %s", info->id, tableData->childTableName); taosMemoryFree(measure); + taosHashCancelIterate(info->childTables, oneTable); return TSDB_CODE_SML_INTERNAL_ERROR; } @@ -1466,6 +1470,7 @@ static int32_t smlInsertData(SSmlHandle *info) { taosMemoryFree(measure); if (code != TSDB_CODE_SUCCESS) { uError("SML:0x%" PRIx64 " smlBindData failed", info->id); + taosHashCancelIterate(info->childTables, oneTable); return code; } oneTable = (SSmlTableInfo **)taosHashIterate(info->childTables, oneTable); diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c index 9fd4938448..2813f9059c 100644 --- a/source/dnode/vnode/src/sma/smaRollup.c +++ b/source/dnode/vnode/src/sma/smaRollup.c @@ -905,6 +905,7 @@ int32_t tdProcessRSmaSubmit(SSma *pSma, int64_t version, void *pReq, void *pMsg, tb_uid_t *pTbSuid = (tb_uid_t *)taosHashGetKey(pIter, NULL); if (tdExecuteRSmaAsync(pSma, version, pMsg, len, inputType, *pTbSuid) < 0) { smaError("vgId:%d, failed to process rsma submit exec 2 since: %s", SMA_VID(pSma), terrstr()); + taosHashCancelIterate(uidStore.uidHash, pIter); goto _err; } } diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index 9b8f1781cb..046502b49f 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -1095,6 +1095,7 @@ int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) { if(ret != TDB_CODE_SUCCESS) { tqError("qGetTableList in tqUpdateTbUidList error:%d handle %s consumer:0x%" PRIx64, ret, pTqHandle->subKey, pTqHandle->consumerId); taosArrayDestroy(list); + taosHashCancelIterate(pTq->pHandle, pIter); return ret; } tqReaderSetTbUidList(pTqHandle->execHandle.pTqReader, list, NULL); From 0ce702f002da515a4d776c0231b0a55073b9bcac Mon Sep 17 00:00:00 2001 From: sunpeng Date: Tue, 8 Aug 2023 16:58:04 +0800 Subject: [PATCH 043/122] docs: fix connection param in taosws in python connector --- docs/en/14-reference/03-connector/07-python.mdx | 2 +- docs/zh/08-connector/30-python.mdx | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/14-reference/03-connector/07-python.mdx b/docs/en/14-reference/03-connector/07-python.mdx index 831e79eeb7..5067c33e2d 100644 --- a/docs/en/14-reference/03-connector/07-python.mdx +++ b/docs/en/14-reference/03-connector/07-python.mdx @@ -373,7 +373,7 @@ conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (locat ```python -conn = taosws.connect(url="ws://localhost:6041") +conn = taosws.connect("taosws://localhost:6041") # Execute a sql, ignore the result set, just get affected rows. It's useful for DDL and DML statement. conn.execute("DROP DATABASE IF EXISTS test") conn.execute("CREATE DATABASE test") diff --git a/docs/zh/08-connector/30-python.mdx b/docs/zh/08-connector/30-python.mdx index 15c11d05c3..ab98b5b8de 100644 --- a/docs/zh/08-connector/30-python.mdx +++ b/docs/zh/08-connector/30-python.mdx @@ -375,7 +375,7 @@ conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (locat ```python -conn = taosws.connect(url="ws://localhost:6041") +conn = taosws.connect("taosws://localhost:6041") # Execute a sql, ignore the result set, just get affected rows. It's useful for DDL and DML statement. conn.execute("DROP DATABASE IF EXISTS test") conn.execute("CREATE DATABASE test") From f7b393562803b87298d1ab00f302502f7c8a5caa Mon Sep 17 00:00:00 2001 From: "chao.feng" Date: Tue, 8 Aug 2023 17:08:30 +0800 Subject: [PATCH 044/122] udpate user_privilege_show test case and add it to cases.task by charles --- tests/parallel_test/cases.task | 3 + .../0-others/user_privilege_all.py | 62 ++++++++++++++++++- 2 files changed, 64 insertions(+), 1 deletion(-) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 986d36e177..0e4894d741 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -155,6 +155,9 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/user_control.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/user_manage.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/user_privilege.py +,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/user_privilege_show.py +,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/user_privilege_all.py +,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/user_privilege_multi_users.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/fsync.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/multilevel.py ,,n,system-test,python3 ./test.py -f 0-others/compatibility.py diff --git a/tests/system-test/0-others/user_privilege_all.py b/tests/system-test/0-others/user_privilege_all.py index 2e796882c8..846b76317e 100644 --- a/tests/system-test/0-others/user_privilege_all.py +++ b/tests/system-test/0-others/user_privilege_all.py @@ -258,6 +258,66 @@ class TDTestCase: "insert into tb values(now, 20.0, 20);", "select * from tb;"], "res": [True, True, True, True, False, True, False] + }, + "test_db_all_childtable_none": { + "db_privilege": "all", + "stable_priviege": "none", + "child_table_ct1_privilege": "none", + "child_table_ct2_privilege": "none", + "table_tb_privilege": "none", + "sql": ["insert into ct2 using stb tags('ct2') values(now, 20.2, 20)", + "insert into ct1 using stb tags('ct1') values(now, 21.21, 21)", + "select * from stb;", + "select * from ct1;", + "select * from ct2;", + "insert into tb values(now, 22.22, 22);", + "select * from tb;"], + "res": [True, True, True, True, True, True, True] + }, + "test_db_none_stable_all_childtable_none": { + "db_privilege": "none", + "stable_priviege": "all", + "child_table_ct1_privilege": "none", + "child_table_ct2_privilege": "none", + "table_tb_privilege": "none", + "sql": ["insert into ct2 using stb tags('ct2') values(now, 23.23, 23)", + "insert into ct1 using stb tags('ct1') values(now, 24.24, 24)", + "select * from stb;", + "select * from ct1;", + "select * from ct2;", + "insert into tb values(now, 25.25, 25);", + "select * from tb;"], + "res": [True, True, True, True, True, False, False] + }, + "test_db_no_permission_childtable_all": { + "db_privilege": "none", + "stable_priviege": "none", + "child_table_ct1_privilege": "all", + "child_table_ct2_privilege": "none", + "table_tb_privilege": "none", + "sql": ["insert into ct2 using stb tags('ct2') values(now, 26.26, 26)", + "insert into ct1 using stb tags('ct1') values(now, 27.27, 27)", + "select * from stb;", + "select * from ct1;", + "select * from ct2;", + "insert into tb values(now, 28.28, 28);", + "select * from tb;"], + "res": [False, True, True, True, False, False, False] + }, + "test_db_none_stable_none_table_all": { + "db_privilege": "none", + "stable_priviege": "none", + "child_table_ct1_privilege": "none", + "child_table_ct2_privilege": "none", + "table_tb_privilege": "all", + "sql": ["insert into ct2 using stb tags('ct2') values(now, 26.26, 26)", + "insert into ct1 using stb tags('ct1') values(now, 27.27, 27)", + "select * from stb;", + "select * from ct1;", + "select * from ct2;", + "insert into tb values(now, 29.29, 29);", + "select * from tb;"], + "res": [False, False, False, False, False, True, True] } } @@ -361,7 +421,7 @@ class TDTestCase: data = res.fetch_all() tdLog.debug("query result: {}".format(data)) # check query results by cases - if case_name in ["test_db_no_permission_childtable_read", "test_db_write_childtable_read"] and self.cases[case_name]["sql"][index] == "select * from ct2;": + if case_name in ["test_db_no_permission_childtable_read", "test_db_write_childtable_read", "test_db_no_permission_childtable_all"] and self.cases[case_name]["sql"][index] == "select * from ct2;": if not self.cases[case_name]["res"][index]: if 0 == len(data): tdLog.debug("Query with sql {} successfully as expected with empty result".format(self.cases[case_name]["sql"][index])) From e4d16e594cd1f78e3a8938aba13d1b9b787a9947 Mon Sep 17 00:00:00 2001 From: Benguang Zhao Date: Tue, 8 Aug 2023 17:46:37 +0800 Subject: [PATCH 045/122] enh: check if disk space sufficient at primary dir with tfs --- include/libs/tfs/tfs.h | 19 ++++++ source/dnode/mgmt/mgmt_vnode/src/vmInt.c | 16 +----- source/dnode/mgmt/mgmt_vnode/src/vmWorker.c | 12 +++- source/dnode/mgmt/node_mgmt/inc/dmMgmt.h | 4 +- source/dnode/mgmt/node_mgmt/src/dmEnv.c | 64 +++++++++++++++------ source/dnode/mgmt/node_util/inc/dmUtil.h | 2 + source/libs/tfs/src/tfs.c | 34 +++++++++++ source/os/src/osEnv.c | 4 +- 8 files changed, 118 insertions(+), 37 deletions(-) diff --git a/include/libs/tfs/tfs.h b/include/libs/tfs/tfs.h index 509f8dc9e8..2b90e3226c 100644 --- a/include/libs/tfs/tfs.h +++ b/include/libs/tfs/tfs.h @@ -300,6 +300,25 @@ void tfsClosedir(STfsDir *pDir); */ int32_t tfsGetMonitorInfo(STfs *pTfs, SMonDiskInfo *pInfo); +/** + * @brief Check if disk space available at level + * + * @param pTfs The fs object. + * #param level the level + * @return bool + */ +bool tfsDiskSpaceAvailable(STfs *pTfs, int32_t level); + +/** + * @brief Check if disk space sufficient at disk of level + * + * @param pTfs The fs object. + * @param level the level + * @param disk the disk + * @return bool + */ +bool tfsDiskSpaceSufficient(STfs *pTfs, int32_t level, int32_t disk); + #ifdef __cplusplus } #endif diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c index 94a753062c..0ff2537e4c 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c @@ -460,7 +460,6 @@ static void vmCleanup(SVnodeMgmt *pMgmt) { vmCloseVnodes(pMgmt); vmStopWorker(pMgmt); vnodeCleanup(); - tfsClose(pMgmt->pTfs); taosThreadRwlockDestroy(&pMgmt->lock); taosMemoryFree(pMgmt); } @@ -535,20 +534,9 @@ static int32_t vmInit(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { pMgmt->msgCb.mgmt = pMgmt; taosThreadRwlockInit(&pMgmt->lock, NULL); - SDiskCfg dCfg = {0}; - tstrncpy(dCfg.dir, tsDataDir, TSDB_FILENAME_LEN); - dCfg.level = 0; - dCfg.primary = 1; - SDiskCfg *pDisks = tsDiskCfg; - int32_t numOfDisks = tsDiskCfgNum; - if (numOfDisks <= 0 || pDisks == NULL) { - pDisks = &dCfg; - numOfDisks = 1; - } - - pMgmt->pTfs = tfsOpen(pDisks, numOfDisks); + pMgmt->pTfs = pInput->pTfs; if (pMgmt->pTfs == NULL) { - dError("failed to init tfs since %s", terrstr()); + dError("tfs is null."); goto _OVER; } tmsgReportStartup("vnode-tfs", "initialized"); diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c index 247c1729a3..d567f1128e 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c @@ -15,6 +15,7 @@ #define _DEFAULT_SOURCE #include "vmInt.h" +#include "vnodeInt.h" static inline void vmSendRsp(SRpcMsg *pMsg, int32_t code) { if (pMsg->info.handle == NULL) return; @@ -158,6 +159,15 @@ static void vmSendResponse(SRpcMsg *pMsg) { } } +static bool vmDataSpaceSufficient(SVnodeObj *pVnode) { + STfs *pTfs = pVnode->pImpl->pTfs; + if (pTfs) { + return tfsDiskSpaceSufficient(pTfs, 0, pVnode->diskPrimary); + } else { + return osDataSpaceSufficient(); + } +} + static int32_t vmPutMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtype) { const STraceId *trace = &pMsg->info.traceId; if (pMsg->contLen < sizeof(SMsgHead)) { @@ -203,7 +213,7 @@ static int32_t vmPutMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtyp taosWriteQitem(pVnode->pFetchQ, pMsg); break; case WRITE_QUEUE: - if (!osDataSpaceSufficient()) { + if (!vmDataSpaceSufficient(pVnode)) { terrno = TSDB_CODE_NO_ENOUGH_DISKSPACE; code = terrno; dError("vgId:%d, msg:%p put into vnode-write queue failed since %s", pVnode->vgId, pMsg, terrstr(code)); diff --git a/source/dnode/mgmt/node_mgmt/inc/dmMgmt.h b/source/dnode/mgmt/node_mgmt/inc/dmMgmt.h index 02cd678433..98489433b9 100644 --- a/source/dnode/mgmt/node_mgmt/inc/dmMgmt.h +++ b/source/dnode/mgmt/node_mgmt/inc/dmMgmt.h @@ -20,6 +20,7 @@ #include "uv.h" #include "dmInt.h" +#include "tfs.h" #ifdef __cplusplus extern "C" { @@ -79,6 +80,7 @@ typedef struct SDnode { TdThreadMutex mutex; TdFilePtr lockfile; SDnodeData data; + STfs *pTfs; SMgmtWrapper wrappers[NODE_END]; } SDnode; @@ -124,4 +126,4 @@ void dmGetQnodeLoads(SQnodeLoad *pInfo); } #endif -#endif /*_TD_DND_MGMT_H_*/ \ No newline at end of file +#endif /*_TD_DND_MGMT_H_*/ diff --git a/source/dnode/mgmt/node_mgmt/src/dmEnv.c b/source/dnode/mgmt/node_mgmt/src/dmEnv.c index 3f9c5bbeaf..a34002161d 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmEnv.c +++ b/source/dnode/mgmt/node_mgmt/src/dmEnv.c @@ -96,28 +96,23 @@ _exit: return code; } -static bool dmCheckDiskSpace() { - osUpdate(); - // sufficiency - if (!osDataSpaceSufficient()) { - dWarn("free data disk size: %f GB, not sufficient, expected %f GB at least", - (double)tsDataSpace.size.avail / 1024.0 / 1024.0 / 1024.0, - (double)tsDataSpace.reserved / 1024.0 / 1024.0 / 1024.0); +static bool dmDataSpaceAvailable() { + SDnode *pDnode = dmInstance(); + if (pDnode->pTfs) { + return tfsDiskSpaceAvailable(pDnode->pTfs, 0); } - if (!osLogSpaceSufficient()) { - dWarn("free log disk size: %f GB, not sufficient, expected %f GB at least", - (double)tsLogSpace.size.avail / 1024.0 / 1024.0 / 1024.0, - (double)tsLogSpace.reserved / 1024.0 / 1024.0 / 1024.0); - } - if (!osTempSpaceSufficient()) { - dWarn("free temp disk size: %f GB, not sufficient, expected %f GB at least", - (double)tsTempSpace.size.avail / 1024.0 / 1024.0 / 1024.0, - (double)tsTempSpace.reserved / 1024.0 / 1024.0 / 1024.0); - } - // availability - bool ret = true; if (!osDataSpaceAvailable()) { dError("data disk space unavailable, i.e. %s", tsDataDir); + return false; + } + return true; +} + +static bool dmCheckDiskSpace() { + osUpdate(); + // availability + bool ret = true; + if (!dmDataSpaceAvailable()) { terrno = TSDB_CODE_NO_DISKSPACE; ret = false; } @@ -134,6 +129,34 @@ static bool dmCheckDiskSpace() { return ret; } +int32_t dmDiskInit() { + SDnode *pDnode = dmInstance(); + SDiskCfg dCfg = {0}; + tstrncpy(dCfg.dir, tsDataDir, TSDB_FILENAME_LEN); + dCfg.level = 0; + dCfg.primary = 1; + SDiskCfg *pDisks = tsDiskCfg; + int32_t numOfDisks = tsDiskCfgNum; + if (numOfDisks <= 0 || pDisks == NULL) { + pDisks = &dCfg; + numOfDisks = 1; + } + + pDnode->pTfs = tfsOpen(pDisks, numOfDisks); + if (pDnode->pTfs == NULL) { + dError("failed to init tfs since %s", terrstr()); + return -1; + } + return 0; +} + +int32_t dmDiskClose() { + SDnode *pDnode = dmInstance(); + tfsClose(pDnode->pTfs); + pDnode->pTfs = NULL; + return 0; +} + static bool dmCheckDataDirVersion() { char checkDataDirJsonFileName[PATH_MAX] = {0}; snprintf(checkDataDirJsonFileName, PATH_MAX, "%s/dnode/dnodeCfg.json", tsDataDir); @@ -147,6 +170,7 @@ static bool dmCheckDataDirVersion() { int32_t dmInit() { dInfo("start to init dnode env"); + if (dmDiskInit() != 0) return -1; if (!dmCheckDataDirVersion()) return -1; if (!dmCheckDiskSpace()) return -1; if (dmCheckRepeatInit(dmInstance()) != 0) return -1; @@ -177,6 +201,7 @@ void dmCleanup() { udfcClose(); udfStopUdfd(); taosStopCacheRefreshWorker(); + dmDiskClose(); dInfo("dnode env is cleaned up"); taosCleanupCfg(); @@ -367,6 +392,7 @@ SMgmtInputOpt dmBuildMgmtInputOpt(SMgmtWrapper *pWrapper) { SMgmtInputOpt opt = { .path = pWrapper->path, .name = pWrapper->name, + .pTfs = pWrapper->pDnode->pTfs, .pData = &pWrapper->pDnode->data, .processCreateNodeFp = dmProcessCreateNodeReq, .processAlterNodeTypeFp = dmProcessAlterNodeTypeReq, diff --git a/source/dnode/mgmt/node_util/inc/dmUtil.h b/source/dnode/mgmt/node_util/inc/dmUtil.h index 85057e5916..32c3d22506 100644 --- a/source/dnode/mgmt/node_util/inc/dmUtil.h +++ b/source/dnode/mgmt/node_util/inc/dmUtil.h @@ -37,6 +37,7 @@ #include "monitor.h" #include "qnode.h" #include "sync.h" +#include "tfs.h" #include "wal.h" #include "libs/function/tudf.h" @@ -111,6 +112,7 @@ typedef struct { typedef struct { const char *path; const char *name; + STfs *pTfs; SDnodeData *pData; SMsgCb msgCb; ProcessCreateNodeFp processCreateNodeFp; diff --git a/source/libs/tfs/src/tfs.c b/source/libs/tfs/src/tfs.c index 8adaab91a1..445c24159f 100644 --- a/source/libs/tfs/src/tfs.c +++ b/source/libs/tfs/src/tfs.c @@ -14,6 +14,7 @@ */ #define _DEFAULT_SOURCE +#include "osEnv.h" #include "tfsInt.h" static int32_t tfsMount(STfs *pTfs, SDiskCfg *pCfg); @@ -113,6 +114,39 @@ SDiskSize tfsGetSize(STfs *pTfs) { return size; } +bool tfsDiskSpaceAvailable(STfs *pTfs, int32_t level) { + if (level < 0 || level >= pTfs->nlevel) { + return false; + } + STfsTier *pTier = TFS_TIER_AT(pTfs, level); + for (int32_t id = 0; id < pTier->ndisk; id++) { + SDiskID diskId = {.level = level, .id = id}; + STfsDisk *pDisk = TFS_DISK_AT(pTfs, diskId); + if (pDisk == NULL) { + return false; + } + if (pDisk->size.avail <= 0) { + fError("tfs disk space unavailable. level:%d, disk:%d, path:%s", level, id, pDisk->path); + return false; + } + } + return true; +} + +bool tfsDiskSpaceSufficient(STfs *pTfs, int32_t level, int32_t disk) { + if (level < 0 || level >= pTfs->nlevel) { + return false; + } + + STfsTier *pTier = TFS_TIER_AT(pTfs, level); + if (disk < 0 || disk >= pTier->ndisk) { + return false; + } + SDiskID diskId = {.level = level, .id = disk}; + STfsDisk *pDisk = TFS_DISK_AT(pTfs, diskId); + return pDisk->size.avail >= tsDataSpace.reserved; +} + int32_t tfsGetDisksAtLevel(STfs *pTfs, int32_t level) { if (level < 0 || level >= pTfs->nlevel) { return 0; diff --git a/source/os/src/osEnv.c b/source/os/src/osEnv.c index 7f0e6d1dee..0fc136c693 100644 --- a/source/os/src/osEnv.c +++ b/source/os/src/osEnv.c @@ -95,10 +95,10 @@ void osCleanup() {} bool osLogSpaceAvailable() { return tsLogSpace.size.avail > 0; } -bool osDataSpaceAvailable() { return tsDataSpace.size.avail > 0; } - bool osTempSpaceAvailable() { return tsTempSpace.size.avail > 0; } +bool osDataSpaceAvailable() { return tsDataSpace.size.avail > 0; } + bool osLogSpaceSufficient() { return tsLogSpace.size.avail > tsLogSpace.reserved; } bool osDataSpaceSufficient() { return tsDataSpace.size.avail > tsDataSpace.reserved; } From 2c65ffc33b691e7ba9dad89f33243cdd662271f0 Mon Sep 17 00:00:00 2001 From: "chao.feng" Date: Tue, 8 Aug 2023 17:08:30 +0800 Subject: [PATCH 046/122] remove the case user_privilege_multi_users.py from cases.task --- tests/parallel_test/cases.task | 2 + .../0-others/user_privilege_all.py | 62 ++++++++++++++++++- 2 files changed, 63 insertions(+), 1 deletion(-) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 986d36e177..c9204e9ce8 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -155,6 +155,8 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/user_control.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/user_manage.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/user_privilege.py +,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/user_privilege_show.py +,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/user_privilege_all.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/fsync.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/multilevel.py ,,n,system-test,python3 ./test.py -f 0-others/compatibility.py diff --git a/tests/system-test/0-others/user_privilege_all.py b/tests/system-test/0-others/user_privilege_all.py index 2e796882c8..846b76317e 100644 --- a/tests/system-test/0-others/user_privilege_all.py +++ b/tests/system-test/0-others/user_privilege_all.py @@ -258,6 +258,66 @@ class TDTestCase: "insert into tb values(now, 20.0, 20);", "select * from tb;"], "res": [True, True, True, True, False, True, False] + }, + "test_db_all_childtable_none": { + "db_privilege": "all", + "stable_priviege": "none", + "child_table_ct1_privilege": "none", + "child_table_ct2_privilege": "none", + "table_tb_privilege": "none", + "sql": ["insert into ct2 using stb tags('ct2') values(now, 20.2, 20)", + "insert into ct1 using stb tags('ct1') values(now, 21.21, 21)", + "select * from stb;", + "select * from ct1;", + "select * from ct2;", + "insert into tb values(now, 22.22, 22);", + "select * from tb;"], + "res": [True, True, True, True, True, True, True] + }, + "test_db_none_stable_all_childtable_none": { + "db_privilege": "none", + "stable_priviege": "all", + "child_table_ct1_privilege": "none", + "child_table_ct2_privilege": "none", + "table_tb_privilege": "none", + "sql": ["insert into ct2 using stb tags('ct2') values(now, 23.23, 23)", + "insert into ct1 using stb tags('ct1') values(now, 24.24, 24)", + "select * from stb;", + "select * from ct1;", + "select * from ct2;", + "insert into tb values(now, 25.25, 25);", + "select * from tb;"], + "res": [True, True, True, True, True, False, False] + }, + "test_db_no_permission_childtable_all": { + "db_privilege": "none", + "stable_priviege": "none", + "child_table_ct1_privilege": "all", + "child_table_ct2_privilege": "none", + "table_tb_privilege": "none", + "sql": ["insert into ct2 using stb tags('ct2') values(now, 26.26, 26)", + "insert into ct1 using stb tags('ct1') values(now, 27.27, 27)", + "select * from stb;", + "select * from ct1;", + "select * from ct2;", + "insert into tb values(now, 28.28, 28);", + "select * from tb;"], + "res": [False, True, True, True, False, False, False] + }, + "test_db_none_stable_none_table_all": { + "db_privilege": "none", + "stable_priviege": "none", + "child_table_ct1_privilege": "none", + "child_table_ct2_privilege": "none", + "table_tb_privilege": "all", + "sql": ["insert into ct2 using stb tags('ct2') values(now, 26.26, 26)", + "insert into ct1 using stb tags('ct1') values(now, 27.27, 27)", + "select * from stb;", + "select * from ct1;", + "select * from ct2;", + "insert into tb values(now, 29.29, 29);", + "select * from tb;"], + "res": [False, False, False, False, False, True, True] } } @@ -361,7 +421,7 @@ class TDTestCase: data = res.fetch_all() tdLog.debug("query result: {}".format(data)) # check query results by cases - if case_name in ["test_db_no_permission_childtable_read", "test_db_write_childtable_read"] and self.cases[case_name]["sql"][index] == "select * from ct2;": + if case_name in ["test_db_no_permission_childtable_read", "test_db_write_childtable_read", "test_db_no_permission_childtable_all"] and self.cases[case_name]["sql"][index] == "select * from ct2;": if not self.cases[case_name]["res"][index]: if 0 == len(data): tdLog.debug("Query with sql {} successfully as expected with empty result".format(self.cases[case_name]["sql"][index])) From f02fd54298b7fb39f8f6c18d4c97dbcd4eb79f39 Mon Sep 17 00:00:00 2001 From: "chao.feng" Date: Tue, 1 Aug 2023 17:32:06 +0800 Subject: [PATCH 047/122] add test case to cases.task by charles --- tests/parallel_test/cases.task | 1 + .../0-others/user_privilege_multi_users.py | 126 ++++++++++++++++++ 2 files changed, 127 insertions(+) create mode 100644 tests/system-test/0-others/user_privilege_multi_users.py diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 89572d1c06..d8f178dcfa 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -154,6 +154,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/user_control.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/user_manage.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/user_privilege.py +,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/user_privilege_multi_users.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/fsync.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/multilevel.py ,,n,system-test,python3 ./test.py -f 0-others/compatibility.py diff --git a/tests/system-test/0-others/user_privilege_multi_users.py b/tests/system-test/0-others/user_privilege_multi_users.py new file mode 100644 index 0000000000..8812f42e7b --- /dev/null +++ b/tests/system-test/0-others/user_privilege_multi_users.py @@ -0,0 +1,126 @@ +from itertools import product +import taos +import random +from taos.tmq import * +from util.cases import * +from util.common import * +from util.log import * +from util.sql import * +from util.sqlset import * + + +class TDTestCase: + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + # init the tdsql + tdSql.init(conn.cursor()) + self.setsql = TDSetSql() + # user info + self.userNum = 100 + self.basic_username = "user" + self.password = "pwd" + + # db info + self.dbname = "user_privilege_multi_users" + self.stbname = 'stb' + self.ctbname_num = 100 + self.column_dict = { + 'ts': 'timestamp', + 'col1': 'float', + 'col2': 'int', + } + self.tag_dict = { + 'ctbname': 'binary(10)' + } + + self.privilege_list = [] + + def prepare_data(self): + """Create the db and data for test + """ + # create datebase + tdSql.execute(f"create database {self.dbname}") + tdLog.debug("sql:" + f"create database {self.dbname}") + tdSql.execute(f"use {self.dbname}") + tdLog.debug("sql:" + f"use {self.dbname}") + + # create super table + tdSql.execute(self.setsql.set_create_stable_sql(self.stbname, self.column_dict, self.tag_dict)) + tdLog.debug("Create stable {} successfully".format(self.stbname)) + for ctbIndex in range(self.ctbname_num): + ctname = f"ctb{ctbIndex}" + tdSql.execute(f"create table {ctname} using {self.stbname} tags('{ctname}')") + tdLog.debug("sql:" + f"create table {ctname} using {self.stbname} tags('{ctname}')") + + def create_multiusers(self): + """Create the user for test + """ + for userIndex in range(self.userNum): + username = f"{self.basic_username}{userIndex}" + tdSql.execute(f'create user {username} pass "{self.password}"') + tdLog.debug("sql:" + f'create user {username} pass "{self.password}"') + + def grant_privilege(self): + """Add the privilege for the users + """ + try: + for userIndex in range(self.userNum): + username = f"{self.basic_username}{userIndex}" + privilege = random.choice(["read", "write", "all"]) + condition = f"ctbname='ctb{userIndex}'" + self.privilege_list.append({ + "username": username, + "privilege": privilege, + "condition": condition + }) + tdSql.execute(f'grant {privilege} on {self.dbname}.{self.stbname} with {condition} to {username}') + tdLog.debug("sql:" + f'grant {privilege} on {self.dbname}.{self.stbname} with {condition} to {username}') + except Exception as ex: + tdLog.exit(ex) + + def remove_privilege(self): + """Remove the privilege for the users + """ + try: + for item in self.privilege_list: + username = item["username"] + privilege = item["privilege"] + condition = item["condition"] + tdSql.execute(f'revoke {privilege} on {self.dbname}.{self.stbname} with {condition} from {username}') + tdLog.debug("sql:" + f'revoke {privilege} on {self.dbname}.{self.stbname} with {condition} from {username}') + except Exception as ex: + tdLog.exit(ex) + + def run(self): + """ + Check the information from information_schema.ins_user_privileges + """ + self.create_multiusers() + self.prepare_data() + # grant privilege to users + self.grant_privilege() + # check information_schema.ins_user_privileges + tdSql.query("select * from information_schema.ins_user_privileges;") + tdLog.debug("Current information_schema.ins_user_privileges values: {}".format(tdSql.queryResult)) + if len(tdSql.queryResult) >= self.userNum: + tdLog.debug("case passed") + else: + tdLog.exit("The privilege number in information_schema.ins_user_privileges is incorrect") + + def stop(self): + # remove the privilege + self.remove_privilege() + # clear env + tdSql.execute(f"drop database {self.dbname}") + # remove the users + for userIndex in range(self.userNum): + username = f"{self.basic_username}{userIndex}" + tdSql.execute(f'drop user {username}') + # close the connection + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) From 16e015253b2c3f01981914f34743794e1d203e94 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 10:10:25 +0800 Subject: [PATCH 048/122] s3/mxml: remove os external dependency --- contrib/CMakeLists.txt | 31 ++++++++++++++++++++++++++++--- contrib/test/cos/CMakeLists.txt | 4 ++-- source/dnode/vnode/CMakeLists.txt | 2 +- 3 files changed, 31 insertions(+), 6 deletions(-) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index df9519d00f..db4d359938 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -124,6 +124,9 @@ endif(${BUILD_WITH_SQLITE}) # cos if(${BUILD_WITH_COS}) + file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/build/) + set(CMAKE_PREFIX_PATH ${CMAKE_BINARY_DIR}/build) + cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) add_definitions(-DUSE_COS) endif(${BUILD_WITH_COS}) @@ -157,6 +160,21 @@ if(${BUILD_GEOS}) cat("${TD_SUPPORT_DIR}/geos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) endif() +# SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-error=unused-function") +# include(ExternalProject) +# ExternalProject_Add(mxml +# GIT_REPOSITORY https://github.com/michaelrsweet/mxml.git +# GIT_TAG release-2.10 +# SOURCE_DIR "${TD_CONTRIB_DIR}/mxml" +# #BINARY_DIR "" +# BUILD_IN_SOURCE TRUE +# CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build +# BUILD_COMMAND make +# INSTALL_COMMAND make install +# TEST_COMMAND "" +# ) + + # download dependencies configure_file(${CONTRIB_TMP_FILE} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt") execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" . @@ -355,7 +373,10 @@ endif() # cos if(${BUILD_WITH_COS}) + if(NOT ${TD_WINDOWS}) + #ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a) option(ENABLE_TEST "Enable the tests" OFF) + INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/build/include) set(CMAKE_BUILD_TYPE debug) set(ORIG_CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME}) @@ -363,11 +384,15 @@ if(${BUILD_WITH_COS}) add_subdirectory(cos-c-sdk-v5 EXCLUDE_FROM_ALL) target_include_directories( - cos_c_sdk - PUBLIC $ - ) + cos_c_sdk + PUBLIC $ + ) set(CMAKE_PROJECT_NAME ${ORIG_CMAKE_PROJECT_NAME}) + + else() + + endif(NOT ${TD_WINDOWS}) endif(${BUILD_WITH_COS}) # lucene diff --git a/contrib/test/cos/CMakeLists.txt b/contrib/test/cos/CMakeLists.txt index 77c57e5a65..3eb484c2c5 100644 --- a/contrib/test/cos/CMakeLists.txt +++ b/contrib/test/cos/CMakeLists.txt @@ -39,11 +39,11 @@ target_include_directories( find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) -find_library(MINIXML_LIBRARY mxml) +#find_library(MINIXML_LIBRARY mxml) find_library(CURL_LIBRARY curl) target_link_libraries(cosTest cos_c_sdk) target_link_libraries(cosTest ${APR_UTIL_LIBRARY}) target_link_libraries(cosTest ${APR_LIBRARY}) -target_link_libraries(cosTest ${MINIXML_LIBRARY}) +target_link_libraries(cosTest mxml) target_link_libraries(cosTest ${CURL_LIBRARY}) diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index 0612f924f5..eea81ea3d2 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -137,7 +137,7 @@ endif() find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) -find_library(MINIXML_LIBRARY mxml) +#find_library(MINIXML_LIBRARY mxml) find_library(CURL_LIBRARY curl) target_link_libraries( From c4f7b5d530a58026b1b1b7b64b535e1e12887ecc Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 10:23:00 +0800 Subject: [PATCH 049/122] mxml: makefile for mxml --- cmake/mxml_CMakeLists.txt.in | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 cmake/mxml_CMakeLists.txt.in diff --git a/cmake/mxml_CMakeLists.txt.in b/cmake/mxml_CMakeLists.txt.in new file mode 100644 index 0000000000..994aa6e2cb --- /dev/null +++ b/cmake/mxml_CMakeLists.txt.in @@ -0,0 +1,12 @@ +# cos +ExternalProject_Add(mxml + GIT_REPOSITORY https://github.com/michaelrsweet/mxml.git + GIT_TAG release-2.10 + SOURCE_DIR "${TD_CONTRIB_DIR}/mxml" + #BINARY_DIR "" + BUILD_IN_SOURCE TRUE + CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ + BUILD_COMMAND make + INSTALL_COMMAND make install + TEST_COMMAND "" +) From 4d1155a5cfe6a329a4bb728e1c4aa3c3e3600a77 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 10:43:27 +0800 Subject: [PATCH 050/122] curl: makefile for curl --- cmake/curl_CMakeLists.txt.in | 12 ++++++++++++ contrib/test/cos/CMakeLists.txt | 4 ++-- source/dnode/vnode/CMakeLists.txt | 6 +++--- 3 files changed, 17 insertions(+), 5 deletions(-) create mode 100644 cmake/curl_CMakeLists.txt.in diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in new file mode 100644 index 0000000000..a23c5e7bab --- /dev/null +++ b/cmake/curl_CMakeLists.txt.in @@ -0,0 +1,12 @@ +# curl +ExternalProject_Add(curl + GIT_REPOSITORY https://github.com/curl/curl.git + GIT_TAG curl-7_88_1 + SOURCE_DIR "${TD_CONTRIB_DIR}/curl" + BINARY_DIR "" + #BUILD_IN_SOURCE TRUE + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" +) diff --git a/contrib/test/cos/CMakeLists.txt b/contrib/test/cos/CMakeLists.txt index 3eb484c2c5..38de8a25e8 100644 --- a/contrib/test/cos/CMakeLists.txt +++ b/contrib/test/cos/CMakeLists.txt @@ -40,10 +40,10 @@ target_include_directories( find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) #find_library(MINIXML_LIBRARY mxml) -find_library(CURL_LIBRARY curl) +#find_library(CURL_LIBRARY curl) target_link_libraries(cosTest cos_c_sdk) target_link_libraries(cosTest ${APR_UTIL_LIBRARY}) target_link_libraries(cosTest ${APR_LIBRARY}) target_link_libraries(cosTest mxml) -target_link_libraries(cosTest ${CURL_LIBRARY}) +target_link_libraries(cosTest curl) diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index eea81ea3d2..562207268c 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -138,7 +138,7 @@ endif() find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) #find_library(MINIXML_LIBRARY mxml) -find_library(CURL_LIBRARY curl) +#find_library(CURL_LIBRARY curl) target_link_libraries( vnode @@ -164,8 +164,8 @@ target_link_libraries( cos_c_sdk ${APR_UTIL_LIBRARY} ${APR_LIBRARY} - ${MINIXML_LIBRARY} - ${CURL_LIBRARY} + mxml + curl ) IF (TD_GRANT) From 7114ad63e5ac8846a11e221079031ee22bccc4f1 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 10:54:17 +0800 Subject: [PATCH 051/122] =?UTF-8?q?apr=EF=BC=9Amakefile=20for=20apr=20&=20?= =?UTF-8?q?apr-util?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- cmake/apr-util_CMakeLists.txt.in | 12 ++++++++++++ cmake/apr_CMakeLists.txt.in | 12 ++++++++++++ contrib/test/cos/CMakeLists.txt | 8 ++++---- source/dnode/vnode/CMakeLists.txt | 8 ++++---- 4 files changed, 32 insertions(+), 8 deletions(-) create mode 100644 cmake/apr-util_CMakeLists.txt.in create mode 100644 cmake/apr_CMakeLists.txt.in diff --git a/cmake/apr-util_CMakeLists.txt.in b/cmake/apr-util_CMakeLists.txt.in new file mode 100644 index 0000000000..8471a05db6 --- /dev/null +++ b/cmake/apr-util_CMakeLists.txt.in @@ -0,0 +1,12 @@ +# apr-util +ExternalProject_Add(apr + GIT_REPOSITORY https://github.com/apache/apr-util.git + GIT_TAG 1.5.4 + SOURCE_DIR "${TD_CONTRIB_DIR}/apr-util" + BINARY_DIR "" + #BUILD_IN_SOURCE TRUE + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" +) diff --git a/cmake/apr_CMakeLists.txt.in b/cmake/apr_CMakeLists.txt.in new file mode 100644 index 0000000000..68b6f39c89 --- /dev/null +++ b/cmake/apr_CMakeLists.txt.in @@ -0,0 +1,12 @@ +# apr +ExternalProject_Add(apr + GIT_REPOSITORY https://github.com/apache/apr.git + GIT_TAG 1.5.2 + SOURCE_DIR "${TD_CONTRIB_DIR}/apr" + BINARY_DIR "" + #BUILD_IN_SOURCE TRUE + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" +) diff --git a/contrib/test/cos/CMakeLists.txt b/contrib/test/cos/CMakeLists.txt index 38de8a25e8..2d2e101877 100644 --- a/contrib/test/cos/CMakeLists.txt +++ b/contrib/test/cos/CMakeLists.txt @@ -37,13 +37,13 @@ target_include_directories( PUBLIC "${TD_SOURCE_DIR}/contrib/cos-c-sdk-v5/cos_c_sdk" ) -find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) -find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) +#find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) +#find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) #find_library(MINIXML_LIBRARY mxml) #find_library(CURL_LIBRARY curl) target_link_libraries(cosTest cos_c_sdk) -target_link_libraries(cosTest ${APR_UTIL_LIBRARY}) -target_link_libraries(cosTest ${APR_LIBRARY}) +target_link_libraries(cosTest apr}) +target_link_libraries(cosTest apr-util}) target_link_libraries(cosTest mxml) target_link_libraries(cosTest curl) diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index 562207268c..cf7d205c00 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -135,8 +135,8 @@ else() endif() endif() -find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) -find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) +#find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) +#find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) #find_library(MINIXML_LIBRARY mxml) #find_library(CURL_LIBRARY curl) @@ -162,8 +162,8 @@ target_link_libraries( # s3 cos_c_sdk - ${APR_UTIL_LIBRARY} - ${APR_LIBRARY} + apr + apr-util mxml curl ) From 2e0519b9609d98a119b8908199c073dd8cb07d9f Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 11:01:42 +0800 Subject: [PATCH 052/122] apr: fix apr & apr-util project names --- cmake/apr-util_CMakeLists.txt.in | 2 +- cmake/apr_CMakeLists.txt.in | 2 +- contrib/test/cos/CMakeLists.txt | 4 ++-- source/dnode/vnode/CMakeLists.txt | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/cmake/apr-util_CMakeLists.txt.in b/cmake/apr-util_CMakeLists.txt.in index 8471a05db6..c4dd943243 100644 --- a/cmake/apr-util_CMakeLists.txt.in +++ b/cmake/apr-util_CMakeLists.txt.in @@ -1,5 +1,5 @@ # apr-util -ExternalProject_Add(apr +ExternalProject_Add(aprutil-1 GIT_REPOSITORY https://github.com/apache/apr-util.git GIT_TAG 1.5.4 SOURCE_DIR "${TD_CONTRIB_DIR}/apr-util" diff --git a/cmake/apr_CMakeLists.txt.in b/cmake/apr_CMakeLists.txt.in index 68b6f39c89..bfbe8196d3 100644 --- a/cmake/apr_CMakeLists.txt.in +++ b/cmake/apr_CMakeLists.txt.in @@ -1,5 +1,5 @@ # apr -ExternalProject_Add(apr +ExternalProject_Add(apr-1 GIT_REPOSITORY https://github.com/apache/apr.git GIT_TAG 1.5.2 SOURCE_DIR "${TD_CONTRIB_DIR}/apr" diff --git a/contrib/test/cos/CMakeLists.txt b/contrib/test/cos/CMakeLists.txt index 2d2e101877..f8804033de 100644 --- a/contrib/test/cos/CMakeLists.txt +++ b/contrib/test/cos/CMakeLists.txt @@ -43,7 +43,7 @@ target_include_directories( #find_library(CURL_LIBRARY curl) target_link_libraries(cosTest cos_c_sdk) -target_link_libraries(cosTest apr}) -target_link_libraries(cosTest apr-util}) +target_link_libraries(cosTest apr-1}) +target_link_libraries(cosTest aprutil-1}) target_link_libraries(cosTest mxml) target_link_libraries(cosTest curl) diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index cf7d205c00..a219990690 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -162,8 +162,8 @@ target_link_libraries( # s3 cos_c_sdk - apr - apr-util + apr-1 + aprutil-1 mxml curl ) From 57ba106371bbbcbf83b14ce8668b798aae861bbe Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 11:04:53 +0800 Subject: [PATCH 053/122] cos: move cmake prefix path after external building --- contrib/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index db4d359938..0ef799e9da 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -125,7 +125,6 @@ endif(${BUILD_WITH_SQLITE}) # cos if(${BUILD_WITH_COS}) file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/build/) - set(CMAKE_PREFIX_PATH ${CMAKE_BINARY_DIR}/build) cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) add_definitions(-DUSE_COS) @@ -374,6 +373,7 @@ endif() # cos if(${BUILD_WITH_COS}) if(NOT ${TD_WINDOWS}) + set(CMAKE_PREFIX_PATH ${CMAKE_BINARY_DIR}/build) #ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a) option(ENABLE_TEST "Enable the tests" OFF) INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/build/include) From 398567ef4ca524cd74ee3f203ba6512cde7f523d Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 11:14:00 +0800 Subject: [PATCH 054/122] contrib/cmake: add apr apr-util, and curl into makefile --- contrib/CMakeLists.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 0ef799e9da..053266c533 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -125,6 +125,9 @@ endif(${BUILD_WITH_SQLITE}) # cos if(${BUILD_WITH_COS}) file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/build/) + cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) add_definitions(-DUSE_COS) From 8b84e0544c44bb8f2504e79b26f1dbcb63b4e7c5 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Wed, 9 Aug 2023 11:40:23 +0800 Subject: [PATCH 055/122] add test cases for leastsquares --- tests/system-test/2-query/leastsquares.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/system-test/2-query/leastsquares.py b/tests/system-test/2-query/leastsquares.py index 1718802a82..f433e702ec 100644 --- a/tests/system-test/2-query/leastsquares.py +++ b/tests/system-test/2-query/leastsquares.py @@ -219,7 +219,7 @@ class TDTestCase: tdSql.checkRows(1) res = tdSql.getData(0, 0) if res is None: - tdLog.exit("result is not correct") + tdLog.exit("result is not correct") def __test_current(self): # tdSql.query("explain select c1 from {dbname}.ct1") @@ -228,6 +228,11 @@ class TDTestCase: # tdSql.query("explain select count(c3) from {dbname}.ct4 group by c7 having count(c3) > 0") # tdSql.query("explain select ct2.c3 from {dbname}.ct4 join ct2 on ct4.ts=ct2.ts") # tdSql.query("explain select c1 from stb1 where c1 is not null and c1 in (0, 1, 2) or c1 between 2 and 100 ") + # + tdSql.query("select leastsquares(c1, 1, 1) from (select c1 from nt1 group by c1)") + tdSql.query("select leastsquares(c1, 1, 1) from (select c1 from nt1 partition by c1)") + tdSql.query("select leastsquares(c1, 1, 1) from (select c1 from nt1 order by c1)") + tdSql.query("select leastsquares(c1, 1, 1) from (select c1 from nt1 union select c1 from nt1)") self.leastsquares_check() From 7e2859ed43e8ebf375a5808b0d35c3c191a725b6 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 13:03:37 +0800 Subject: [PATCH 056/122] apr: use tarball to avoid ./buildconf --- cmake/apr-util_CMakeLists.txt.in | 18 +++++++++++------- cmake/apr_CMakeLists.txt.in | 21 ++++++++++++++------- cmake/curl_CMakeLists.txt.in | 9 ++++----- contrib/CMakeLists.txt | 2 +- source/dnode/vnode/CMakeLists.txt | 16 ++++++++-------- 5 files changed, 38 insertions(+), 28 deletions(-) diff --git a/cmake/apr-util_CMakeLists.txt.in b/cmake/apr-util_CMakeLists.txt.in index c4dd943243..b81745aeef 100644 --- a/cmake/apr-util_CMakeLists.txt.in +++ b/cmake/apr-util_CMakeLists.txt.in @@ -1,12 +1,16 @@ # apr-util ExternalProject_Add(aprutil-1 - GIT_REPOSITORY https://github.com/apache/apr-util.git - GIT_TAG 1.5.4 + URL https://dlcdn.apache.org//apr/apr-util-1.6.3.tar.gz + URL_HASH SHA256=2b74d8932703826862ca305b094eef2983c27b39d5c9414442e9976a9acf1983 + DOWNLOAD_NO_PROGRESS 1 + DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download" + #GIT_REPOSITORY https://github.com/apache/apr-util.git + #GIT_TAG 1.5.4 SOURCE_DIR "${TD_CONTRIB_DIR}/apr-util" - BINARY_DIR "" - #BUILD_IN_SOURCE TRUE - CONFIGURE_COMMAND "" - BUILD_COMMAND "" - INSTALL_COMMAND "" + #BINARY_DIR "" + BUILD_IN_SOURCE TRUE + CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --with-apr=${CMAKE_BINARY_DIR}/build + BUILD_COMMAND make + INSTALL_COMMAND make install TEST_COMMAND "" ) diff --git a/cmake/apr_CMakeLists.txt.in b/cmake/apr_CMakeLists.txt.in index bfbe8196d3..037c2ee6cc 100644 --- a/cmake/apr_CMakeLists.txt.in +++ b/cmake/apr_CMakeLists.txt.in @@ -1,12 +1,19 @@ # apr ExternalProject_Add(apr-1 - GIT_REPOSITORY https://github.com/apache/apr.git - GIT_TAG 1.5.2 + URL https://dlcdn.apache.org//apr/apr-1.7.4.tar.gz + URL_HASH SHA256=a4137dd82a185076fa50ba54232d920a17c6469c30b0876569e1c2a05ff311d9 + DOWNLOAD_NO_PROGRESS 1 + DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download" + #GIT_REPOSITORY https://github.com/apache/apr.git + #GIT_TAG 1.5.2 SOURCE_DIR "${TD_CONTRIB_DIR}/apr" - BINARY_DIR "" - #BUILD_IN_SOURCE TRUE - CONFIGURE_COMMAND "" - BUILD_COMMAND "" - INSTALL_COMMAND "" + #BINARY_DIR "${CMAKE_BINARY_DIR}/build" + BUILD_IN_SOURCE TRUE + #CONFIGURE_COMMAND "" + #BUILD_COMMAND "" + #INSTALL_COMMAND "" + CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ + BUILD_COMMAND make + INSTALL_COMMAND make install TEST_COMMAND "" ) diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index a23c5e7bab..cec4dda004 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -3,10 +3,9 @@ ExternalProject_Add(curl GIT_REPOSITORY https://github.com/curl/curl.git GIT_TAG curl-7_88_1 SOURCE_DIR "${TD_CONTRIB_DIR}/curl" - BINARY_DIR "" - #BUILD_IN_SOURCE TRUE - CONFIGURE_COMMAND "" - BUILD_COMMAND "" - INSTALL_COMMAND "" + BUILD_IN_SOURCE TRUE + CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ + BUILD_COMMAND make + INSTALL_COMMAND make install TEST_COMMAND "" ) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 053266c533..058b2cf042 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -125,10 +125,10 @@ endif(${BUILD_WITH_SQLITE}) # cos if(${BUILD_WITH_COS}) file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/build/) + cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) - cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) add_definitions(-DUSE_COS) endif(${BUILD_WITH_COS}) diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index a219990690..0612f924f5 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -135,10 +135,10 @@ else() endif() endif() -#find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) -#find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) -#find_library(MINIXML_LIBRARY mxml) -#find_library(CURL_LIBRARY curl) +find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) +find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) +find_library(MINIXML_LIBRARY mxml) +find_library(CURL_LIBRARY curl) target_link_libraries( vnode @@ -162,10 +162,10 @@ target_link_libraries( # s3 cos_c_sdk - apr-1 - aprutil-1 - mxml - curl + ${APR_UTIL_LIBRARY} + ${APR_LIBRARY} + ${MINIXML_LIBRARY} + ${CURL_LIBRARY} ) IF (TD_GRANT) From 56b348abf2e821fd58834566ff878ffc988bbf34 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 13:15:25 +0800 Subject: [PATCH 057/122] curl: use tarball --- cmake/curl_CMakeLists.txt.in | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index cec4dda004..cbfe939219 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -1,7 +1,10 @@ # curl ExternalProject_Add(curl - GIT_REPOSITORY https://github.com/curl/curl.git - GIT_TAG curl-7_88_1 + URL https://curl.se/download/curl-8.2.1.tar.gz + DOWNLOAD_NO_PROGRESS 1 + DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download" + #GIT_REPOSITORY https://github.com/curl/curl.git + #GIT_TAG curl-7_88_1 SOURCE_DIR "${TD_CONTRIB_DIR}/curl" BUILD_IN_SOURCE TRUE CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ From 376a2c2520dc3c5887bbb10f17a143ba9aeec407 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 13:31:18 +0800 Subject: [PATCH 058/122] curl: with openssl building --- cmake/curl_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index cbfe939219..e411cd893c 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -7,7 +7,7 @@ ExternalProject_Add(curl #GIT_TAG curl-7_88_1 SOURCE_DIR "${TD_CONTRIB_DIR}/curl" BUILD_IN_SOURCE TRUE - CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ + CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --with-openssl BUILD_COMMAND make INSTALL_COMMAND make install TEST_COMMAND "" From b08d5b4d42de9ae814c695478d7eee9c28616573 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 14:02:10 +0800 Subject: [PATCH 059/122] mxml: add include dir to vnode --- source/dnode/vnode/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index 0612f924f5..6c107e0a22 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -195,6 +195,7 @@ include_directories (${APR_INCLUDE_DIR}) target_include_directories( vnode PUBLIC "${TD_SOURCE_DIR}/contrib/cos-c-sdk-v5/cos_c_sdk" + PUBLIC "${CMAKE_BINARY_DIR}/build/include" ) if(${BUILD_TEST}) From 6ab16e89d41115c7b186b10f0ed29afff04b5beb Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Wed, 9 Aug 2023 14:27:41 +0800 Subject: [PATCH 060/122] add tail, unique cases --- tests/system-test/2-query/tail.py | 12 ++++++++++++ tests/system-test/2-query/unique.py | 9 +++++++++ 2 files changed, 21 insertions(+) diff --git a/tests/system-test/2-query/tail.py b/tests/system-test/2-query/tail.py index 43321810d0..353aa36043 100644 --- a/tests/system-test/2-query/tail.py +++ b/tests/system-test/2-query/tail.py @@ -412,6 +412,18 @@ class TDTestCase: tdSql.checkData(0,0,4) tdSql.checkData(1,0,1) + tdSql.query(f"select tail(a, 1) from (select _rowts, first(c2) as a from ct1 group by c2);") + tdSql.checkRows(1) + + tdSql.query(f"select tail(a, 1) from (select _rowts, first(c2) as a from ct1 partition by c2);") + tdSql.checkRows(1) + + tdSql.query(f"select tail(a, 1) from (select _rowts, first(c2) as a from ct1 order by c2);") + tdSql.checkRows(1) + + tdSql.query(f"select tail(a, 1) from (select _rowts, first(c2) as a from ct1 union select _rowts, first(c2) as a from ct1);") + tdSql.checkRows(1) + def check_boundary_values(self, dbname="bound_test"): tdSql.execute(f"drop database if exists {dbname}") diff --git a/tests/system-test/2-query/unique.py b/tests/system-test/2-query/unique.py index 9b5da50e1f..bfd65c1fe2 100644 --- a/tests/system-test/2-query/unique.py +++ b/tests/system-test/2-query/unique.py @@ -438,6 +438,15 @@ class TDTestCase: tdSql.checkData(0,0,4) tdSql.checkData(1,0,1) + tdSql.query(f"select unique(c1) v from (select _rowts , c1 from {dbname}.ct1 partition by c2)") + tdSql.checkRows(10) + + tdSql.query(f"select unique(c1) v from (select _rowts , c1 from {dbname}.ct1 order by c2)") + tdSql.checkRows(10) + + tdSql.query(f"select unique(c1) v from (select _rowts , c1 from {dbname}.ct1 union all select _rowts , c1 from {dbname}.ct1)") + tdSql.checkRows(10) + # TD-19911 tdSql.error("select unique(mode(12)) from (select _rowts , t1 , tbname from db.stb1 );") tdSql.error("select unique(mode(t1,1)) from (select _rowts , t1 , tbname from db.stb1 );") From 70e4b4f44eb3b6abbb40d43e59fe24dab4b7b0b6 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Wed, 9 Aug 2023 14:30:34 +0800 Subject: [PATCH 061/122] remove unique need of primary ts --- source/libs/function/src/builtins.c | 2 +- tests/system-test/2-query/unique.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index be94352f29..51d51db6e8 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -2925,7 +2925,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "unique", .type = FUNCTION_TYPE_UNIQUE, - .classification = FUNC_MGT_SELECT_FUNC | FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC, + .classification = FUNC_MGT_SELECT_FUNC | FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_FORBID_STREAM_FUNC, .translateFunc = translateUnique, .getEnvFunc = getUniqueFuncEnv, .initFunc = uniqueFunctionSetup, diff --git a/tests/system-test/2-query/unique.py b/tests/system-test/2-query/unique.py index bfd65c1fe2..a3238ce8d1 100644 --- a/tests/system-test/2-query/unique.py +++ b/tests/system-test/2-query/unique.py @@ -438,13 +438,13 @@ class TDTestCase: tdSql.checkData(0,0,4) tdSql.checkData(1,0,1) - tdSql.query(f"select unique(c1) v from (select _rowts , c1 from {dbname}.ct1 partition by c2)") + tdSql.query(f"select unique(c1) v from (select c1 from {dbname}.ct1 partition by c2)") tdSql.checkRows(10) - tdSql.query(f"select unique(c1) v from (select _rowts , c1 from {dbname}.ct1 order by c2)") + tdSql.query(f"select unique(c1) v from (select c1 from {dbname}.ct1 order by c2)") tdSql.checkRows(10) - tdSql.query(f"select unique(c1) v from (select _rowts , c1 from {dbname}.ct1 union all select _rowts , c1 from {dbname}.ct1)") + tdSql.query(f"select unique(c1) v from (select c1 from {dbname}.ct1 union all select c1 from {dbname}.ct1)") tdSql.checkRows(10) # TD-19911 From 30cbbc425fdf2a17574c77bea6846ca15af6ba75 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 14:31:17 +0800 Subject: [PATCH 062/122] cos: new update command to build every cmake --- cmake/apr-util_CMakeLists.txt.in | 1 + cmake/apr_CMakeLists.txt.in | 5 +---- cmake/curl_CMakeLists.txt.in | 3 ++- cmake/mxml_CMakeLists.txt.in | 1 + 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cmake/apr-util_CMakeLists.txt.in b/cmake/apr-util_CMakeLists.txt.in index b81745aeef..ee30787cb6 100644 --- a/cmake/apr-util_CMakeLists.txt.in +++ b/cmake/apr-util_CMakeLists.txt.in @@ -9,6 +9,7 @@ ExternalProject_Add(aprutil-1 SOURCE_DIR "${TD_CONTRIB_DIR}/apr-util" #BINARY_DIR "" BUILD_IN_SOURCE TRUE + UPDATE_COMMAND "" CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --with-apr=${CMAKE_BINARY_DIR}/build BUILD_COMMAND make INSTALL_COMMAND make install diff --git a/cmake/apr_CMakeLists.txt.in b/cmake/apr_CMakeLists.txt.in index 037c2ee6cc..fa124de62c 100644 --- a/cmake/apr_CMakeLists.txt.in +++ b/cmake/apr_CMakeLists.txt.in @@ -7,11 +7,8 @@ ExternalProject_Add(apr-1 #GIT_REPOSITORY https://github.com/apache/apr.git #GIT_TAG 1.5.2 SOURCE_DIR "${TD_CONTRIB_DIR}/apr" - #BINARY_DIR "${CMAKE_BINARY_DIR}/build" BUILD_IN_SOURCE TRUE - #CONFIGURE_COMMAND "" - #BUILD_COMMAND "" - #INSTALL_COMMAND "" + UPDATE_COMMAND "" CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ BUILD_COMMAND make INSTALL_COMMAND make install diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index e411cd893c..47c4fd72a1 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -7,7 +7,8 @@ ExternalProject_Add(curl #GIT_TAG curl-7_88_1 SOURCE_DIR "${TD_CONTRIB_DIR}/curl" BUILD_IN_SOURCE TRUE - CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --with-openssl + UPDATE_COMMAND "" + CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --without-ssl BUILD_COMMAND make INSTALL_COMMAND make install TEST_COMMAND "" diff --git a/cmake/mxml_CMakeLists.txt.in b/cmake/mxml_CMakeLists.txt.in index 994aa6e2cb..12c9ea7d89 100644 --- a/cmake/mxml_CMakeLists.txt.in +++ b/cmake/mxml_CMakeLists.txt.in @@ -5,6 +5,7 @@ ExternalProject_Add(mxml SOURCE_DIR "${TD_CONTRIB_DIR}/mxml" #BINARY_DIR "" BUILD_IN_SOURCE TRUE + UPDATE_COMMAND "" CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ BUILD_COMMAND make INSTALL_COMMAND make install From 5d0edcd17b16bc557087a7f148d9f6b2d69d39d4 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 15:41:23 +0800 Subject: [PATCH 063/122] cos: use /usr/local as prefix instead of debug/build --- cmake/apr-util_CMakeLists.txt.in | 3 ++- cmake/apr_CMakeLists.txt.in | 3 ++- cmake/curl_CMakeLists.txt.in | 3 ++- cmake/mxml_CMakeLists.txt.in | 3 ++- contrib/CMakeLists.txt | 4 ++-- 5 files changed, 10 insertions(+), 6 deletions(-) diff --git a/cmake/apr-util_CMakeLists.txt.in b/cmake/apr-util_CMakeLists.txt.in index ee30787cb6..fc4f92858c 100644 --- a/cmake/apr-util_CMakeLists.txt.in +++ b/cmake/apr-util_CMakeLists.txt.in @@ -10,7 +10,8 @@ ExternalProject_Add(aprutil-1 #BINARY_DIR "" BUILD_IN_SOURCE TRUE UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --with-apr=${CMAKE_BINARY_DIR}/build + #CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --with-apr=${CMAKE_BINARY_DIR}/build + CONFIGURE_COMMAND ./configure --with-apr=/usr/local/ BUILD_COMMAND make INSTALL_COMMAND make install TEST_COMMAND "" diff --git a/cmake/apr_CMakeLists.txt.in b/cmake/apr_CMakeLists.txt.in index fa124de62c..57e2014c31 100644 --- a/cmake/apr_CMakeLists.txt.in +++ b/cmake/apr_CMakeLists.txt.in @@ -9,7 +9,8 @@ ExternalProject_Add(apr-1 SOURCE_DIR "${TD_CONTRIB_DIR}/apr" BUILD_IN_SOURCE TRUE UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ + #CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ + CONFIGURE_COMMAND ./configure BUILD_COMMAND make INSTALL_COMMAND make install TEST_COMMAND "" diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index 47c4fd72a1..fcd16a0518 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -8,7 +8,8 @@ ExternalProject_Add(curl SOURCE_DIR "${TD_CONTRIB_DIR}/curl" BUILD_IN_SOURCE TRUE UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --without-ssl + #CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --without-ssl + CONFIGURE_COMMAND ./configure --without-ssl BUILD_COMMAND make INSTALL_COMMAND make install TEST_COMMAND "" diff --git a/cmake/mxml_CMakeLists.txt.in b/cmake/mxml_CMakeLists.txt.in index 12c9ea7d89..cdd3e5b301 100644 --- a/cmake/mxml_CMakeLists.txt.in +++ b/cmake/mxml_CMakeLists.txt.in @@ -6,7 +6,8 @@ ExternalProject_Add(mxml #BINARY_DIR "" BUILD_IN_SOURCE TRUE UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ + #CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ + CONFIGURE_COMMAND ./configure BUILD_COMMAND make INSTALL_COMMAND make install TEST_COMMAND "" diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 058b2cf042..507928cbe9 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -124,7 +124,7 @@ endif(${BUILD_WITH_SQLITE}) # cos if(${BUILD_WITH_COS}) - file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/build/) + #file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/build/) cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) @@ -376,7 +376,7 @@ endif() # cos if(${BUILD_WITH_COS}) if(NOT ${TD_WINDOWS}) - set(CMAKE_PREFIX_PATH ${CMAKE_BINARY_DIR}/build) + #set(CMAKE_PREFIX_PATH ${CMAKE_BINARY_DIR}/build) #ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a) option(ENABLE_TEST "Enable the tests" OFF) INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/build/include) From 93ce558abf20e429980a6901344098bf16a0494c Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 15:49:34 +0800 Subject: [PATCH 064/122] apu: fix with-apr config --- cmake/apr-util_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/apr-util_CMakeLists.txt.in b/cmake/apr-util_CMakeLists.txt.in index fc4f92858c..96a8b3ef75 100644 --- a/cmake/apr-util_CMakeLists.txt.in +++ b/cmake/apr-util_CMakeLists.txt.in @@ -11,7 +11,7 @@ ExternalProject_Add(aprutil-1 BUILD_IN_SOURCE TRUE UPDATE_COMMAND "" #CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --with-apr=${CMAKE_BINARY_DIR}/build - CONFIGURE_COMMAND ./configure --with-apr=/usr/local/ + CONFIGURE_COMMAND ./configure --with-apr=/usr/local/apr BUILD_COMMAND make INSTALL_COMMAND make install TEST_COMMAND "" From bc64d5f769f6d5fdb7baf7b43e81ef2708fa04b8 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 16:53:19 +0800 Subject: [PATCH 065/122] cos: use ~/local as prefix for building --- cmake/apr-util_CMakeLists.txt.in | 4 ++-- cmake/apr_CMakeLists.txt.in | 4 ++-- cmake/curl_CMakeLists.txt.in | 4 ++-- cmake/mxml_CMakeLists.txt.in | 4 ++-- contrib/CMakeLists.txt | 4 ++-- tests/parallel_test/container_build.sh | 1 + 6 files changed, 11 insertions(+), 10 deletions(-) diff --git a/cmake/apr-util_CMakeLists.txt.in b/cmake/apr-util_CMakeLists.txt.in index 96a8b3ef75..1ae52c69af 100644 --- a/cmake/apr-util_CMakeLists.txt.in +++ b/cmake/apr-util_CMakeLists.txt.in @@ -10,8 +10,8 @@ ExternalProject_Add(aprutil-1 #BINARY_DIR "" BUILD_IN_SOURCE TRUE UPDATE_COMMAND "" - #CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --with-apr=${CMAKE_BINARY_DIR}/build - CONFIGURE_COMMAND ./configure --with-apr=/usr/local/apr + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local/ --with-apr=$ENV{HOME}/local + #CONFIGURE_COMMAND ./configure --with-apr=/usr/local/apr BUILD_COMMAND make INSTALL_COMMAND make install TEST_COMMAND "" diff --git a/cmake/apr_CMakeLists.txt.in b/cmake/apr_CMakeLists.txt.in index 57e2014c31..1df68919ae 100644 --- a/cmake/apr_CMakeLists.txt.in +++ b/cmake/apr_CMakeLists.txt.in @@ -9,8 +9,8 @@ ExternalProject_Add(apr-1 SOURCE_DIR "${TD_CONTRIB_DIR}/apr" BUILD_IN_SOURCE TRUE UPDATE_COMMAND "" - #CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ - CONFIGURE_COMMAND ./configure + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local/ + #CONFIGURE_COMMAND ./configure BUILD_COMMAND make INSTALL_COMMAND make install TEST_COMMAND "" diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index fcd16a0518..b09e85b9b2 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -8,8 +8,8 @@ ExternalProject_Add(curl SOURCE_DIR "${TD_CONTRIB_DIR}/curl" BUILD_IN_SOURCE TRUE UPDATE_COMMAND "" - #CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --without-ssl - CONFIGURE_COMMAND ./configure --without-ssl + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local --without-ssl + #CONFIGURE_COMMAND ./configure --without-ssl BUILD_COMMAND make INSTALL_COMMAND make install TEST_COMMAND "" diff --git a/cmake/mxml_CMakeLists.txt.in b/cmake/mxml_CMakeLists.txt.in index cdd3e5b301..33dc48ab4e 100644 --- a/cmake/mxml_CMakeLists.txt.in +++ b/cmake/mxml_CMakeLists.txt.in @@ -6,8 +6,8 @@ ExternalProject_Add(mxml #BINARY_DIR "" BUILD_IN_SOURCE TRUE UPDATE_COMMAND "" - #CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ - CONFIGURE_COMMAND ./configure + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local + #CONFIGURE_COMMAND ./configure BUILD_COMMAND make INSTALL_COMMAND make install TEST_COMMAND "" diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 507928cbe9..cc93226d68 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -124,7 +124,7 @@ endif(${BUILD_WITH_SQLITE}) # cos if(${BUILD_WITH_COS}) - #file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/build/) + file(MAKE_DIRECTORY $ENV{HOME}/local/) cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) @@ -376,7 +376,7 @@ endif() # cos if(${BUILD_WITH_COS}) if(NOT ${TD_WINDOWS}) - #set(CMAKE_PREFIX_PATH ${CMAKE_BINARY_DIR}/build) + set(CMAKE_PREFIX_PATH $ENV{HOME}/local) #ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a) option(ENABLE_TEST "Enable the tests" OFF) INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/build/include) diff --git a/tests/parallel_test/container_build.sh b/tests/parallel_test/container_build.sh index 5ae061072a..699a4667dd 100755 --- a/tests/parallel_test/container_build.sh +++ b/tests/parallel_test/container_build.sh @@ -88,6 +88,7 @@ docker run \ -v /root/.cargo/git:/root/.cargo/git \ -v /root/go/pkg/mod:/root/go/pkg/mod \ -v /root/.cache/go-build:/root/.cache/go-build \ + -v /root/local:/root/local \ -v ${REP_REAL_PATH}/enterprise/src/plugins/taosx/target:${REP_DIR}/enterprise/src/plugins/taosx/target \ -v ${REP_REAL_PATH}/community/tools/taosws-rs/target:${REP_DIR}/community/tools/taosws-rs/target \ -v ${REP_REAL_PATH}/community/contrib/cJson/:${REP_DIR}/community/contrib/cJson \ From 1ce3ef7fd52c3b86c2a2a68481cb10fa9c4b2602 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 17:02:16 +0800 Subject: [PATCH 066/122] cos: fix local/include directory --- contrib/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index cc93226d68..9feefa6947 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -379,7 +379,7 @@ if(${BUILD_WITH_COS}) set(CMAKE_PREFIX_PATH $ENV{HOME}/local) #ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a) option(ENABLE_TEST "Enable the tests" OFF) - INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/build/include) + INCLUDE_DIRECTORIES($ENV{HOME}/local/include) set(CMAKE_BUILD_TYPE debug) set(ORIG_CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME}) From 4911b6c8558beb4948fef3dbd1d6c46dfe630f81 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 17:07:55 +0800 Subject: [PATCH 067/122] container_build: use local as install dir --- tests/parallel_test/container_build.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/parallel_test/container_build.sh b/tests/parallel_test/container_build.sh index 699a4667dd..8de8f377fd 100755 --- a/tests/parallel_test/container_build.sh +++ b/tests/parallel_test/container_build.sh @@ -60,6 +60,7 @@ docker run \ -v /root/.cargo/git:/root/.cargo/git \ -v /root/go/pkg/mod:/root/go/pkg/mod \ -v /root/.cache/go-build:/root/.cache/go-build \ + -v /root/local:/root/local \ -v ${REP_REAL_PATH}/enterprise/src/plugins/taosx/target:${REP_DIR}/enterprise/src/plugins/taosx/target \ -v ${REP_REAL_PATH}/community/tools/taosws-rs/target:${REP_DIR}/community/tools/taosws-rs/target \ -v ${REP_REAL_PATH}/community/contrib/cJson/:${REP_DIR}/community/contrib/cJson \ From 11823580c7fc149268f73bb4bbf16bcbce563fda Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Wed, 9 Aug 2023 17:13:31 +0800 Subject: [PATCH 068/122] fix test cases --- source/libs/function/src/builtins.c | 2 +- tests/system-test/2-query/unique.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index 51d51db6e8..be94352f29 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -2925,7 +2925,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "unique", .type = FUNCTION_TYPE_UNIQUE, - .classification = FUNC_MGT_SELECT_FUNC | FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_FORBID_STREAM_FUNC, + .classification = FUNC_MGT_SELECT_FUNC | FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC, .translateFunc = translateUnique, .getEnvFunc = getUniqueFuncEnv, .initFunc = uniqueFunctionSetup, diff --git a/tests/system-test/2-query/unique.py b/tests/system-test/2-query/unique.py index a3238ce8d1..b3053817d3 100644 --- a/tests/system-test/2-query/unique.py +++ b/tests/system-test/2-query/unique.py @@ -438,13 +438,13 @@ class TDTestCase: tdSql.checkData(0,0,4) tdSql.checkData(1,0,1) - tdSql.query(f"select unique(c1) v from (select c1 from {dbname}.ct1 partition by c2)") + tdSql.query(f"select unique(c1) v from (select _rowts, c1 from {dbname}.ct1 partition by c2)") tdSql.checkRows(10) - tdSql.query(f"select unique(c1) v from (select c1 from {dbname}.ct1 order by c2)") + tdSql.query(f"select unique(c1) v from (select _rowts, c1 from {dbname}.ct1 order by c2)") tdSql.checkRows(10) - tdSql.query(f"select unique(c1) v from (select c1 from {dbname}.ct1 union all select c1 from {dbname}.ct1)") + tdSql.query(f"select unique(c1) v from (select _rowts, c1 from {dbname}.ct1 union all select _rowts, c1 from {dbname}.ct1)") tdSql.checkRows(10) # TD-19911 From a5ccc3e8aa39f91a85931129dffc6aeea8e34767 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 17:53:25 +0800 Subject: [PATCH 069/122] apr: make apr, apu, curl build always --- cmake/apr-util_CMakeLists.txt.in | 3 ++- cmake/apr_CMakeLists.txt.in | 4 +++- cmake/curl_CMakeLists.txt.in | 3 ++- cmake/mxml_CMakeLists.txt.in | 2 +- contrib/CMakeLists.txt | 15 --------------- 5 files changed, 8 insertions(+), 19 deletions(-) diff --git a/cmake/apr-util_CMakeLists.txt.in b/cmake/apr-util_CMakeLists.txt.in index 1ae52c69af..c64e4ffcdb 100644 --- a/cmake/apr-util_CMakeLists.txt.in +++ b/cmake/apr-util_CMakeLists.txt.in @@ -9,7 +9,8 @@ ExternalProject_Add(aprutil-1 SOURCE_DIR "${TD_CONTRIB_DIR}/apr-util" #BINARY_DIR "" BUILD_IN_SOURCE TRUE - UPDATE_COMMAND "" + BUILD_ALWAYS 1 + #UPDATE_COMMAND "" CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local/ --with-apr=$ENV{HOME}/local #CONFIGURE_COMMAND ./configure --with-apr=/usr/local/apr BUILD_COMMAND make diff --git a/cmake/apr_CMakeLists.txt.in b/cmake/apr_CMakeLists.txt.in index 1df68919ae..bae8cfe0a6 100644 --- a/cmake/apr_CMakeLists.txt.in +++ b/cmake/apr_CMakeLists.txt.in @@ -8,7 +8,9 @@ ExternalProject_Add(apr-1 #GIT_TAG 1.5.2 SOURCE_DIR "${TD_CONTRIB_DIR}/apr" BUILD_IN_SOURCE TRUE - UPDATE_COMMAND "" + UPDATE_DISCONNECTED TRUE + BUILD_ALWAYS 1 + #UPDATE_COMMAND "" CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local/ #CONFIGURE_COMMAND ./configure BUILD_COMMAND make diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index b09e85b9b2..27457ffdbc 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -7,7 +7,8 @@ ExternalProject_Add(curl #GIT_TAG curl-7_88_1 SOURCE_DIR "${TD_CONTRIB_DIR}/curl" BUILD_IN_SOURCE TRUE - UPDATE_COMMAND "" + BUILD_ALWAYS 1 + #UPDATE_COMMAND "" CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local --without-ssl #CONFIGURE_COMMAND ./configure --without-ssl BUILD_COMMAND make diff --git a/cmake/mxml_CMakeLists.txt.in b/cmake/mxml_CMakeLists.txt.in index 33dc48ab4e..f9b7e8e642 100644 --- a/cmake/mxml_CMakeLists.txt.in +++ b/cmake/mxml_CMakeLists.txt.in @@ -5,7 +5,7 @@ ExternalProject_Add(mxml SOURCE_DIR "${TD_CONTRIB_DIR}/mxml" #BINARY_DIR "" BUILD_IN_SOURCE TRUE - UPDATE_COMMAND "" + #UPDATE_COMMAND "" CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local #CONFIGURE_COMMAND ./configure BUILD_COMMAND make diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 9feefa6947..3fb7b93abe 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -162,21 +162,6 @@ if(${BUILD_GEOS}) cat("${TD_SUPPORT_DIR}/geos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) endif() -# SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-error=unused-function") -# include(ExternalProject) -# ExternalProject_Add(mxml -# GIT_REPOSITORY https://github.com/michaelrsweet/mxml.git -# GIT_TAG release-2.10 -# SOURCE_DIR "${TD_CONTRIB_DIR}/mxml" -# #BINARY_DIR "" -# BUILD_IN_SOURCE TRUE -# CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build -# BUILD_COMMAND make -# INSTALL_COMMAND make install -# TEST_COMMAND "" -# ) - - # download dependencies configure_file(${CONTRIB_TMP_FILE} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt") execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" . From a853d9d40cd9c937417d90692ad62c58e020e486 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 18:21:55 +0800 Subject: [PATCH 070/122] mxml: use ~/local/include --- source/dnode/vnode/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index 6c107e0a22..e6af282d10 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -195,7 +195,7 @@ include_directories (${APR_INCLUDE_DIR}) target_include_directories( vnode PUBLIC "${TD_SOURCE_DIR}/contrib/cos-c-sdk-v5/cos_c_sdk" - PUBLIC "${CMAKE_BINARY_DIR}/build/include" + PUBLIC "$ENV{HOME}/local/include" ) if(${BUILD_TEST}) From d6ed5fe096f7f6e63b45f8a676d956f4db482fc5 Mon Sep 17 00:00:00 2001 From: kailixu Date: Wed, 9 Aug 2023 20:08:19 +0800 Subject: [PATCH 071/122] fix: timezone and qsort for windows --- source/os/src/osMath.c | 2 +- source/os/src/osTimezone.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/source/os/src/osMath.c b/source/os/src/osMath.c index 0cff0f78a6..10d02ab25c 100644 --- a/source/os/src/osMath.c +++ b/source/os/src/osMath.c @@ -25,7 +25,7 @@ int32_t qsortHelper(const void* p1, const void* p2, const void* param) { // todo refactor: 1) move away; 2) use merge sort instead; 3) qsort is not a stable sort actually. void taosSort(void* base, int64_t sz, int64_t width, __compar_fn_t compar) { -#if defined(WINDOWS) || defined(_ALPINE) +#if defined(WINDOWS_STASH) || defined(_ALPINE) void* param = compar; taosqsort(base, sz, width, param, qsortHelper); #else diff --git a/source/os/src/osTimezone.c b/source/os/src/osTimezone.c index cd6ad7cdb5..4280490c68 100644 --- a/source/os/src/osTimezone.c +++ b/source/os/src/osTimezone.c @@ -768,7 +768,7 @@ void taosSetSystemTimezone(const char *inTimezoneStr, char *outTimezoneStr, int8 keyValue[4] = (keyValue[4] == '+' ? '-' : '+'); keyValue[10] = 0; sprintf(winStr, "TZ=%s:00", &(keyValue[1])); - *tsTimezone = taosStr2Int32(&keyValue[4], NULL, 10); + *tsTimezone = -taosStr2Int32(&keyValue[4], NULL, 10); } break; } @@ -789,7 +789,7 @@ void taosSetSystemTimezone(const char *inTimezoneStr, char *outTimezoneStr, int8 indexStr = ppp - pp + 3; } sprintf(&winStr[indexStr], "%c%c%c:%c%c:00", (p[0] == '+' ? '-' : '+'), p[1], p[2], p[3], p[4]); - *tsTimezone = taosStr2Int32(p, NULL, 10); + *tsTimezone = -taosStr2Int32(p, NULL, 10); } else { *tsTimezone = 0; } From ca5571d0d6fb05b622e93d90bc3d5f30077ad1ec Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 18:48:59 +0800 Subject: [PATCH 072/122] config: fix default configs --- source/common/src/tglobal.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index fbc98715f0..91ab9f62d5 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -266,6 +266,9 @@ int32_t taosSetTfsCfg(SConfig *pCfg); int32_t taosSetS3Cfg(SConfig *pCfg) { tstrncpy(tsS3AccessKey, cfgGetItem(pCfg, "s3Accesskey")->str, TSDB_FQDN_LEN); + if (tsS3AccessKey[0] == '<') { + return 0; + } char *colon = strchr(tsS3AccessKey, ':'); if (!colon) { uError("invalid access key:%s", tsS3AccessKey); From 1ce8d06032f94f2b1c1ce01a4f455057eef1eebd Mon Sep 17 00:00:00 2001 From: kailixu Date: Thu, 10 Aug 2023 09:57:57 +0800 Subject: [PATCH 073/122] fix: proj col compare func --- source/libs/parser/src/parTranslater.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 554dc7cce8..38118c03f8 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -6596,7 +6596,10 @@ typedef struct SProjColPos { } SProjColPos; static int32_t projColPosCompar(const void* l, const void* r) { - return ((SProjColPos*)l)->colId > ((SProjColPos*)r)->colId; + if (((SProjColPos*)l)->colId < ((SProjColPos*)r)->colId) { + return -1; + } + return ((SProjColPos*)l)->colId == ((SProjColPos*)r)->colId ? 0 : 1; } static void projColPosDelete(void* p) { nodesDestroyNode(((SProjColPos*)p)->pProj); } From b17f4a9c586fe65daffdc8eaee8f8e280ff98b3d Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Thu, 10 Aug 2023 11:05:18 +0800 Subject: [PATCH 074/122] add test cases --- tests/system-test/2-query/leastsquares.py | 10 +++++----- tests/system-test/2-query/tail.py | 8 ++++---- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/tests/system-test/2-query/leastsquares.py b/tests/system-test/2-query/leastsquares.py index f433e702ec..3dfd1f6aca 100644 --- a/tests/system-test/2-query/leastsquares.py +++ b/tests/system-test/2-query/leastsquares.py @@ -221,7 +221,7 @@ class TDTestCase: if res is None: tdLog.exit("result is not correct") - def __test_current(self): + def __test_current(self, dbname=DBNAME): # tdSql.query("explain select c1 from {dbname}.ct1") # tdSql.query("explain select 1 from {dbname}.ct2") # tdSql.query("explain select cast(ceil(c6) as bigint) from {dbname}.ct4 group by c6") @@ -229,10 +229,10 @@ class TDTestCase: # tdSql.query("explain select ct2.c3 from {dbname}.ct4 join ct2 on ct4.ts=ct2.ts") # tdSql.query("explain select c1 from stb1 where c1 is not null and c1 in (0, 1, 2) or c1 between 2 and 100 ") # - tdSql.query("select leastsquares(c1, 1, 1) from (select c1 from nt1 group by c1)") - tdSql.query("select leastsquares(c1, 1, 1) from (select c1 from nt1 partition by c1)") - tdSql.query("select leastsquares(c1, 1, 1) from (select c1 from nt1 order by c1)") - tdSql.query("select leastsquares(c1, 1, 1) from (select c1 from nt1 union select c1 from nt1)") + tdSql.query(f"select leastsquares(c1, 1, 1) from (select c1 from {dbname}.nt1 group by c1)") + tdSql.query(f"select leastsquares(c1, 1, 1) from (select c1 from {dbname}.nt1 partition by c1)") + tdSql.query(f"select leastsquares(c1, 1, 1) from (select c1 from {dbname}.nt1 order by c1)") + tdSql.query(f"select leastsquares(c1, 1, 1) from (select c1 from {dbname}.nt1 union select c1 from {dbname}.nt1)") self.leastsquares_check() diff --git a/tests/system-test/2-query/tail.py b/tests/system-test/2-query/tail.py index 353aa36043..99791db3df 100644 --- a/tests/system-test/2-query/tail.py +++ b/tests/system-test/2-query/tail.py @@ -412,16 +412,16 @@ class TDTestCase: tdSql.checkData(0,0,4) tdSql.checkData(1,0,1) - tdSql.query(f"select tail(a, 1) from (select _rowts, first(c2) as a from ct1 group by c2);") + tdSql.query(f"select tail(a, 1) from (select _rowts, first(c2) as a from {dbname}.ct1 group by c2);") tdSql.checkRows(1) - tdSql.query(f"select tail(a, 1) from (select _rowts, first(c2) as a from ct1 partition by c2);") + tdSql.query(f"select tail(a, 1) from (select _rowts, first(c2) as a from {dbname}.ct1 partition by c2);") tdSql.checkRows(1) - tdSql.query(f"select tail(a, 1) from (select _rowts, first(c2) as a from ct1 order by c2);") + tdSql.query(f"select tail(a, 1) from (select _rowts, first(c2) as a from {dbname}.ct1 order by c2);") tdSql.checkRows(1) - tdSql.query(f"select tail(a, 1) from (select _rowts, first(c2) as a from ct1 union select _rowts, first(c2) as a from ct1);") + tdSql.query(f"select tail(a, 1) from (select _rowts, first(c2) as a from {dbname}.ct1 union select _rowts, first(c2) as a from {dbname}.ct1);") tdSql.checkRows(1) def check_boundary_values(self, dbname="bound_test"): From cd63e814500cdd9138e674831e0bbef664fc2b75 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 10 Aug 2023 14:18:12 +0800 Subject: [PATCH 075/122] cos: separate building phase for apr & apr-util --- contrib/CMakeLists.txt | 42 +++++++++++++++++++++++++------ source/dnode/vnode/CMakeLists.txt | 2 +- 2 files changed, 36 insertions(+), 8 deletions(-) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 3fb7b93abe..e8f6c98efe 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -6,6 +6,35 @@ function(cat IN_FILE OUT_FILE) file(APPEND ${OUT_FILE} "${CONTENTS}") endfunction(cat IN_FILE OUT_FILE) +set(CONTRIB_TMP_FILE3 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in3") +configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) + +if(${BUILD_WITH_COS}) + file(MAKE_DIRECTORY $ENV{HOME}/.cos-local/) + cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) + cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) + cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) +endif(${BUILD_WITH_COS}) + +configure_file(${CONTRIB_TMP_FILE3} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt") +execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" . + WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download") +execute_process(COMMAND "${CMAKE_COMMAND}" --build . + WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download") + +set(CONTRIB_TMP_FILE2 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in2") +configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE2}) + +if(${BUILD_WITH_COS}) + cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) +endif(${BUILD_WITH_COS}) + +configure_file(${CONTRIB_TMP_FILE2} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt") +execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" . + WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download") +execute_process(COMMAND "${CMAKE_COMMAND}" --build . + WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download") + set(CONTRIB_TMP_FILE "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in") configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) @@ -124,11 +153,10 @@ endif(${BUILD_WITH_SQLITE}) # cos if(${BUILD_WITH_COS}) - file(MAKE_DIRECTORY $ENV{HOME}/local/) - cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) - cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) - cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) - cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + #cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + #cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + #cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + #cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) add_definitions(-DUSE_COS) endif(${BUILD_WITH_COS}) @@ -361,10 +389,10 @@ endif() # cos if(${BUILD_WITH_COS}) if(NOT ${TD_WINDOWS}) - set(CMAKE_PREFIX_PATH $ENV{HOME}/local) + set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local) #ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a) option(ENABLE_TEST "Enable the tests" OFF) - INCLUDE_DIRECTORIES($ENV{HOME}/local/include) + INCLUDE_DIRECTORIES($ENV{HOME}/.cos-local/include) set(CMAKE_BUILD_TYPE debug) set(ORIG_CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME}) diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index e6af282d10..3cfcc9b716 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -195,7 +195,7 @@ include_directories (${APR_INCLUDE_DIR}) target_include_directories( vnode PUBLIC "${TD_SOURCE_DIR}/contrib/cos-c-sdk-v5/cos_c_sdk" - PUBLIC "$ENV{HOME}/local/include" + PUBLIC "$ENV{HOME}/.cos-local/include" ) if(${BUILD_TEST}) From bb0b80e42df5c0c3b6af41ccdbd4767ccf3684de Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 10 Aug 2023 14:24:51 +0800 Subject: [PATCH 076/122] cmake: use .cos-local as prebuilt directory --- cmake/apr-util_CMakeLists.txt.in | 2 +- cmake/apr_CMakeLists.txt.in | 2 +- cmake/curl_CMakeLists.txt.in | 2 +- cmake/mxml_CMakeLists.txt.in | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cmake/apr-util_CMakeLists.txt.in b/cmake/apr-util_CMakeLists.txt.in index c64e4ffcdb..6172be380e 100644 --- a/cmake/apr-util_CMakeLists.txt.in +++ b/cmake/apr-util_CMakeLists.txt.in @@ -11,7 +11,7 @@ ExternalProject_Add(aprutil-1 BUILD_IN_SOURCE TRUE BUILD_ALWAYS 1 #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local/ --with-apr=$ENV{HOME}/local + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local/ --with-apr=$ENV{HOME}/.cos-local #CONFIGURE_COMMAND ./configure --with-apr=/usr/local/apr BUILD_COMMAND make INSTALL_COMMAND make install diff --git a/cmake/apr_CMakeLists.txt.in b/cmake/apr_CMakeLists.txt.in index bae8cfe0a6..538b45a7f9 100644 --- a/cmake/apr_CMakeLists.txt.in +++ b/cmake/apr_CMakeLists.txt.in @@ -11,7 +11,7 @@ ExternalProject_Add(apr-1 UPDATE_DISCONNECTED TRUE BUILD_ALWAYS 1 #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local/ + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local/ #CONFIGURE_COMMAND ./configure BUILD_COMMAND make INSTALL_COMMAND make install diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index 27457ffdbc..1d9d028848 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -9,7 +9,7 @@ ExternalProject_Add(curl BUILD_IN_SOURCE TRUE BUILD_ALWAYS 1 #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local --without-ssl + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local --without-ssl #CONFIGURE_COMMAND ./configure --without-ssl BUILD_COMMAND make INSTALL_COMMAND make install diff --git a/cmake/mxml_CMakeLists.txt.in b/cmake/mxml_CMakeLists.txt.in index f9b7e8e642..87b126d8d3 100644 --- a/cmake/mxml_CMakeLists.txt.in +++ b/cmake/mxml_CMakeLists.txt.in @@ -6,7 +6,7 @@ ExternalProject_Add(mxml #BINARY_DIR "" BUILD_IN_SOURCE TRUE #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local #CONFIGURE_COMMAND ./configure BUILD_COMMAND make INSTALL_COMMAND make install From f3b56a0687e2b4f0127e3eff22787783f4a59154 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 10 Aug 2023 14:28:23 +0800 Subject: [PATCH 077/122] apu: fix apr-util building --- contrib/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index e8f6c98efe..d20b205e69 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -26,7 +26,7 @@ set(CONTRIB_TMP_FILE2 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in2") configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE2}) if(${BUILD_WITH_COS}) - cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE2}) endif(${BUILD_WITH_COS}) configure_file(${CONTRIB_TMP_FILE2} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt") From b739a422e33da2c44842b9afb4674cd1ded0589d Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 10 Aug 2023 14:31:11 +0800 Subject: [PATCH 078/122] container-build: use .cos-local for prebuilt building --- tests/parallel_test/container_build.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/parallel_test/container_build.sh b/tests/parallel_test/container_build.sh index 8de8f377fd..62254984a9 100755 --- a/tests/parallel_test/container_build.sh +++ b/tests/parallel_test/container_build.sh @@ -60,7 +60,7 @@ docker run \ -v /root/.cargo/git:/root/.cargo/git \ -v /root/go/pkg/mod:/root/go/pkg/mod \ -v /root/.cache/go-build:/root/.cache/go-build \ - -v /root/local:/root/local \ + -v /root/.cos-local:/root/.cos-local \ -v ${REP_REAL_PATH}/enterprise/src/plugins/taosx/target:${REP_DIR}/enterprise/src/plugins/taosx/target \ -v ${REP_REAL_PATH}/community/tools/taosws-rs/target:${REP_DIR}/community/tools/taosws-rs/target \ -v ${REP_REAL_PATH}/community/contrib/cJson/:${REP_DIR}/community/contrib/cJson \ @@ -89,7 +89,7 @@ docker run \ -v /root/.cargo/git:/root/.cargo/git \ -v /root/go/pkg/mod:/root/go/pkg/mod \ -v /root/.cache/go-build:/root/.cache/go-build \ - -v /root/local:/root/local \ + -v /root/.cos-local:/root/.cos-local \ -v ${REP_REAL_PATH}/enterprise/src/plugins/taosx/target:${REP_DIR}/enterprise/src/plugins/taosx/target \ -v ${REP_REAL_PATH}/community/tools/taosws-rs/target:${REP_DIR}/community/tools/taosws-rs/target \ -v ${REP_REAL_PATH}/community/contrib/cJson/:${REP_DIR}/community/contrib/cJson \ From c04ada3573e86ddf47a5e5fc67f79557c56b5c66 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 10 Aug 2023 15:28:22 +0800 Subject: [PATCH 079/122] cos: link with static libs --- source/dnode/vnode/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index 3cfcc9b716..c036fbc54a 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -135,6 +135,7 @@ else() endif() endif() +set(CMAKE_FIND_LIBRARY_SUFFIXES ".a") find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) find_library(MINIXML_LIBRARY mxml) From b2e615d4e70a78a8ef80bf4cf3e4ce7af6c9381e Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Thu, 10 Aug 2023 17:30:01 +0800 Subject: [PATCH 080/122] enhance: tag scan cursor based block --- include/libs/executor/storageapi.h | 14 +- source/dnode/vnode/src/inc/vnodeInt.h | 2 +- source/dnode/vnode/src/meta/metaQuery.c | 12 +- source/dnode/vnode/src/vnd/vnodeInitApi.c | 4 + source/libs/executor/inc/executil.h | 2 + source/libs/executor/inc/executorInt.h | 4 + source/libs/executor/inc/operator.h | 2 +- source/libs/executor/src/executil.c | 4 +- source/libs/executor/src/operator.c | 2 +- source/libs/executor/src/scanoperator.c | 271 +++++++++++++++++++++- 10 files changed, 298 insertions(+), 19 deletions(-) diff --git a/include/libs/executor/storageapi.h b/include/libs/executor/storageapi.h index 773f373a2d..724d6638db 100644 --- a/include/libs/executor/storageapi.h +++ b/include/libs/executor/storageapi.h @@ -98,6 +98,16 @@ typedef struct SMTbCursor { int8_t paused; } SMTbCursor; +typedef struct SMCtbCursor { + SMeta *pMeta; + void *pCur; + tb_uid_t suid; + void *pKey; + void *pVal; + int kLen; + int vLen; +} SMCtbCursor; + typedef struct SRowBuffPos { void* pRowBuff; void* pKey; @@ -278,13 +288,15 @@ typedef struct SStoreMeta { void (*getBasicInfo)(void* pVnode, const char** dbname, int32_t* vgId, int64_t* numOfTables, int64_t* numOfNormalTables); // vnodeGetInfo(void *pVnode, const char **dbname, int32_t *vgId) & // metaGetTbNum(SMeta *pMeta) & metaGetNtbNum(SMeta *pMeta); - int64_t (*getNumOfRowsInMem)(void* pVnode); /** int32_t vnodeGetCtbIdList(void *pVnode, int64_t suid, SArray *list); int32_t vnodeGetCtbIdListByFilter(void *pVnode, int64_t suid, SArray *list, bool (*filter)(void *arg), void *arg); int32_t vnodeGetStbIdList(void *pVnode, int64_t suid, SArray *list); */ + SMCtbCursor* (*openCtbCursor)(void *pVnode, tb_uid_t uid, int lock); + void (*closeCtbCursor)(SMCtbCursor *pCtbCur, int lock); + tb_uid_t (*ctbCursorNext)(SMCtbCursor* pCur); } SStoreMeta; typedef struct SStoreMetaReader { diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h index cd7704940b..e3b2d3e41e 100644 --- a/source/dnode/vnode/src/inc/vnodeInt.h +++ b/source/dnode/vnode/src/inc/vnodeInt.h @@ -167,7 +167,7 @@ int metaAddIndexToSTable(SMeta* pMeta, int64_t version, SVCreateStbReq* pReq); int metaDropIndexFromSTable(SMeta* pMeta, int64_t version, SDropIndexReq* pReq); int64_t metaGetTimeSeriesNum(SMeta* pMeta); -SMCtbCursor* metaOpenCtbCursor(SMeta* pMeta, tb_uid_t uid, int lock); +SMCtbCursor* metaOpenCtbCursor(void* pVnode, tb_uid_t uid, int lock); void metaCloseCtbCursor(SMCtbCursor* pCtbCur, int lock); tb_uid_t metaCtbCursorNext(SMCtbCursor* pCtbCur); SMStbCursor* metaOpenStbCursor(SMeta* pMeta, tb_uid_t uid); diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c index c26bb45c2b..31c7bc8500 100644 --- a/source/dnode/vnode/src/meta/metaQuery.c +++ b/source/dnode/vnode/src/meta/metaQuery.c @@ -408,17 +408,9 @@ _err: return NULL; } -struct SMCtbCursor { - SMeta *pMeta; - TBC *pCur; - tb_uid_t suid; - void *pKey; - void *pVal; - int kLen; - int vLen; -}; -SMCtbCursor *metaOpenCtbCursor(SMeta *pMeta, tb_uid_t uid, int lock) { +SMCtbCursor *metaOpenCtbCursor(void* pVnode, tb_uid_t uid, int lock) { + SMeta* pMeta = ((SVnode*)pVnode)->pMeta; SMCtbCursor *pCtbCur = NULL; SCtbIdxKey ctbIdxKey; int ret = 0; diff --git a/source/dnode/vnode/src/vnd/vnodeInitApi.c b/source/dnode/vnode/src/vnd/vnodeInitApi.c index 5c8d563d73..dca8dd271c 100644 --- a/source/dnode/vnode/src/vnd/vnodeInitApi.c +++ b/source/dnode/vnode/src/vnd/vnodeInitApi.c @@ -96,6 +96,10 @@ void initMetadataAPI(SStoreMeta* pMeta) { pMeta->metaGetCachedTbGroup = metaGetCachedTbGroup; pMeta->metaPutTbGroupToCache = metaPutTbGroupToCache; + + pMeta->openCtbCursor = metaOpenCtbCursor; + pMeta->closeCtbCursor = metaCloseCtbCursor; + pMeta->ctbCursorNext = metaCtbCursorNext; } void initTqAPI(SStoreTqReader* pTq) { diff --git a/source/libs/executor/inc/executil.h b/source/libs/executor/inc/executil.h index 33c9d845b9..f273f63770 100644 --- a/source/libs/executor/inc/executil.h +++ b/source/libs/executor/inc/executil.h @@ -190,4 +190,6 @@ void printDataBlock(SSDataBlock* pBlock, const char* flag); void getNextTimeWindow(const SInterval* pInterval, STimeWindow* tw, int32_t order); void getInitialStartTimeWindow(SInterval* pInterval, TSKEY ts, STimeWindow* w, bool ascQuery); +SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList, void* pVnode, + SStorageAPI* pStorageAPI); #endif // TDENGINE_EXECUTIL_H diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h index fbca5e29f9..cadf367481 100644 --- a/source/libs/executor/inc/executorInt.h +++ b/source/libs/executor/inc/executorInt.h @@ -259,6 +259,10 @@ typedef struct STagScanInfo { SLimitNode* pSlimit; SReadHandle readHandle; STableListInfo* pTableListInfo; + uint64_t suid; + void* pCtbCursor; + SNode* pTagCond; + SNode* pTagIndexCond; } STagScanInfo; typedef enum EStreamScanMode { diff --git a/source/libs/executor/inc/operator.h b/source/libs/executor/inc/operator.h index e6c3405d7f..38cefc1cc5 100644 --- a/source/libs/executor/inc/operator.h +++ b/source/libs/executor/inc/operator.h @@ -81,7 +81,7 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanNode, SReadHandle* readHandle, STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo); -SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysiNode* pPhyNode, STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo); +SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysiNode* pPhyNode, STableListInfo* pTableListInfo, SNode* pTagCond, SNode*pTagIndexCond, SExecTaskInfo* pTaskInfo); SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScanPhysiNode* pScanPhyNode, const char* pUser, SExecTaskInfo* pTaskInfo); diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index aa0c7945b0..5bb8f8a38b 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -47,8 +47,6 @@ static int32_t optimizeTbnameInCondImpl(void* metaHandle, SArray* list, SNode* p static int32_t getTableList(void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, SNode* pTagIndexCond, STableListInfo* pListInfo, uint8_t* digest, const char* idstr, SStorageAPI* pStorageAPI); -static SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList, void* pVnode, - SStorageAPI* pStorageAPI); static int64_t getLimit(const SNode* pLimit) { return NULL == pLimit ? -1 : ((SLimitNode*)pLimit)->limit; } static int64_t getOffset(const SNode* pLimit) { return NULL == pLimit ? -1 : ((SLimitNode*)pLimit)->offset; } @@ -846,7 +844,7 @@ static int32_t optimizeTbnameInCondImpl(void* pVnode, SArray* pExistedUidList, S return -1; } -static SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList, void* pVnode, +SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList, void* pVnode, SStorageAPI* pStorageAPI) { SSDataBlock* pResBlock = createDataBlock(); if (pResBlock == NULL) { diff --git a/source/libs/executor/src/operator.c b/source/libs/executor/src/operator.c index 8ddcc8fd15..0fc1b77b73 100644 --- a/source/libs/executor/src/operator.c +++ b/source/libs/executor/src/operator.c @@ -380,7 +380,7 @@ SOperatorInfo* createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SR return NULL; } - pOperator = createTagScanOperatorInfo(pHandle, pScanPhyNode, pTableListInfo, pTaskInfo); + pOperator = createTagScanOperatorInfo(pHandle, pScanPhyNode, pTableListInfo, pTagCond, pTagIndexCond, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN == type) { SBlockDistScanPhysiNode* pBlockNode = (SBlockDistScanPhysiNode*)pPhyNode; STableListInfo* pTableListInfo = tableListCreate(); diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 71b0747be8..24ed717c8a 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2688,6 +2688,271 @@ static void doTagScanOneTable(SOperatorInfo* pOperator, const SSDataBlock* pRes, } } +static void tagScanFreeUidTag(void* p) { + STUidTagInfo* pInfo = p; + if (pInfo->pTagVal != NULL) { + taosMemoryFree(pInfo->pTagVal); + } +} + +static int32_t tagScanCreateResultData(SDataType* pType, int32_t numOfRows, SScalarParam* pParam) { + SColumnInfoData* pColumnData = taosMemoryCalloc(1, sizeof(SColumnInfoData)); + if (pColumnData == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return terrno; + } + + pColumnData->info.type = pType->type; + pColumnData->info.bytes = pType->bytes; + pColumnData->info.scale = pType->scale; + pColumnData->info.precision = pType->precision; + + int32_t code = colInfoDataEnsureCapacity(pColumnData, numOfRows, true); + if (code != TSDB_CODE_SUCCESS) { + terrno = code; + taosMemoryFree(pColumnData); + return terrno; + } + + pParam->columnData = pColumnData; + pParam->colAlloced = true; + return TSDB_CODE_SUCCESS; +} + +typedef struct STagScanFilterContext { + SHashObj* colHash; + int32_t index; + SArray* cInfoList; +} STagScanFilterContext; + +static EDealRes tagScanRewriteTagColumn(SNode** pNode, void* pContext) { + SColumnNode* pSColumnNode = NULL; + if (QUERY_NODE_COLUMN == nodeType((*pNode))) { + pSColumnNode = *(SColumnNode**)pNode; + } else if (QUERY_NODE_FUNCTION == nodeType((*pNode))) { + SFunctionNode* pFuncNode = *(SFunctionNode**)(pNode); + if (pFuncNode->funcType == FUNCTION_TYPE_TBNAME) { + pSColumnNode = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN); + if (NULL == pSColumnNode) { + return DEAL_RES_ERROR; + } + pSColumnNode->colId = -1; + pSColumnNode->colType = COLUMN_TYPE_TBNAME; + pSColumnNode->node.resType.type = TSDB_DATA_TYPE_VARCHAR; + pSColumnNode->node.resType.bytes = TSDB_TABLE_FNAME_LEN - 1 + VARSTR_HEADER_SIZE; + nodesDestroyNode(*pNode); + *pNode = (SNode*)pSColumnNode; + } else { + return DEAL_RES_CONTINUE; + } + } else { + return DEAL_RES_CONTINUE; + } + + STagScanFilterContext* pCtx = (STagScanFilterContext*)pContext; + void* data = taosHashGet(pCtx->colHash, &pSColumnNode->colId, sizeof(pSColumnNode->colId)); + if (!data) { + taosHashPut(pCtx->colHash, &pSColumnNode->colId, sizeof(pSColumnNode->colId), pNode, sizeof((*pNode))); + pSColumnNode->slotId = pCtx->index++; + SColumnInfo cInfo = {.colId = pSColumnNode->colId, + .type = pSColumnNode->node.resType.type, + .bytes = pSColumnNode->node.resType.bytes}; + taosArrayPush(pCtx->cInfoList, &cInfo); + } else { + SColumnNode* col = *(SColumnNode**)data; + pSColumnNode->slotId = col->slotId; + } + + return DEAL_RES_CONTINUE; +} + + +static void tagScanFilterByTagCond(SArray* aUidTags, SNode* pTagCond, SArray* aUidTagIdxs, void* pVnode, SStorageAPI* pAPI) { + int32_t code = 0; + int32_t numOfTables = taosArrayGetSize(aUidTags); + STagScanFilterContext ctx = {0}; + ctx.colHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_SMALLINT), false, HASH_NO_LOCK); + ctx.cInfoList = taosArrayInit(4, sizeof(SColumnInfo)); + + nodesRewriteExprPostOrder(&pTagCond, tagScanRewriteTagColumn, (void*)&ctx); + + SSDataBlock* pResBlock = createTagValBlockForFilter(ctx.cInfoList, numOfTables, aUidTags, pVnode, pAPI); + if (pResBlock == NULL) { + + } + + SArray* pBlockList = taosArrayInit(1, POINTER_BYTES); + taosArrayPush(pBlockList, &pResBlock); + SDataType type = {.type = TSDB_DATA_TYPE_BOOL, .bytes = sizeof(bool)}; + + SScalarParam output = {0}; + code = tagScanCreateResultData(&type, numOfTables, &output); + if (code != TSDB_CODE_SUCCESS) { + + } + + code = scalarCalculate(pTagCond, pBlockList, &output); + if (code != TSDB_CODE_SUCCESS) { + } + + bool* result = (bool*)output.columnData->pData; + for (int32_t i = 0 ; i < numOfTables; ++i) { + if (result[i]) { + taosArrayPush(aUidTagIdxs, &i); + } + } + + taosHashCleanup(ctx.colHash); + taosArrayDestroy(ctx.cInfoList); + blockDataDestroy(pResBlock); + taosArrayDestroy(pBlockList); + colDataDestroy(output.columnData); + taosMemoryFreeClear(output.columnData); +} + +static void tagScanFillOneCellWithTag(const STUidTagInfo* pUidTagInfo, SExprInfo* pExprInfo, SColumnInfoData* pColInfo, int rowIndex, const SStorageAPI* pAPI, void* pVnode) { + if (fmIsScanPseudoColumnFunc(pExprInfo->pExpr->_function.functionId)) { // tbname + char str[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; + STR_TO_VARSTR(str, "zsl"); + // if (pUidTagInfo->name != NULL) { + // STR_TO_VARSTR(str, pUidTagInfo->name); + // } else { // name is not retrieved during filter + // pAPI->metaFn.getTableNameByUid(pVnode, pUidTagInfo->uid, str); + // } + + colDataSetVal(pColInfo, rowIndex, str, false); + } else { + STagVal tagVal = {0}; + tagVal.cid = pExprInfo->base.pParam[0].pCol->colId; + if (pUidTagInfo->pTagVal == NULL) { + colDataSetNULL(pColInfo, rowIndex); + } else { + const char* p = pAPI->metaFn.extractTagVal(pUidTagInfo->pTagVal, pColInfo->info.type, &tagVal); + + if (p == NULL || (pColInfo->info.type == TSDB_DATA_TYPE_JSON && ((STag*)p)->nTag == 0)) { + colDataSetNULL(pColInfo, rowIndex); + } else if (pColInfo->info.type == TSDB_DATA_TYPE_JSON) { + colDataSetVal(pColInfo, rowIndex, p, false); + } else if (IS_VAR_DATA_TYPE(pColInfo->info.type)) { + char* tmp = taosMemoryMalloc(tagVal.nData + VARSTR_HEADER_SIZE + 1); + varDataSetLen(tmp, tagVal.nData); + memcpy(tmp + VARSTR_HEADER_SIZE, tagVal.pData, tagVal.nData); + colDataSetVal(pColInfo, rowIndex, tmp, false); + taosMemoryFree(tmp); + } else { + colDataSetVal(pColInfo, rowIndex, (const char*)&tagVal.i64, false); + } + } + } +} + +static int32_t tagScanFillResultBlock(SOperatorInfo* pOperator, SSDataBlock* pRes, SArray* aUidTags, SArray* aUidTagIdxs, + SStorageAPI* pAPI) { + STagScanInfo* pInfo = pOperator->info; + SExprInfo* pExprInfo = &pOperator->exprSupp.pExprInfo[0]; + + for (int i = 0; i < taosArrayGetSize(aUidTagIdxs); ++i) { + STUidTagInfo* pUidTagInfo = taosArrayGet(aUidTags, *(int32_t*)taosArrayGet(aUidTagIdxs, i)); + for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) { + SColumnInfoData* pDst = taosArrayGet(pRes->pDataBlock, pExprInfo[j].base.resSchema.slotId); + tagScanFillOneCellWithTag(pUidTagInfo, &pExprInfo[j], pDst, i, pAPI, pInfo->readHandle.vnode); + } + } + return 0; +} + +#if 0 +static int32_t tagScanFillResultBlock(SOperatorInfo* pOperator, SSDataBlock* pRes, SArray* aUidTags, + SStorageAPI* pAPI) { + STagScanInfo* pInfo = pOperator->info; + SExprInfo* pExprInfo = &pOperator->exprSupp.pExprInfo[0]; + + int32_t nTbls = taosArrayGetSize(aUidTags); + for (int i = 0; i < nTbls; ++i) { + STUidTagInfo* pUidTagInfo = taosArrayGet(aUidTags, i); + for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) { + SColumnInfoData* pDst = taosArrayGet(pRes->pDataBlock, pExprInfo[j].base.resSchema.slotId); + + // refactor later + if (fmIsScanPseudoColumnFunc(pExprInfo[j].pExpr->_function.functionId)) { + char str[512]; + + STR_TO_VARSTR(str, "zsl"); + colDataSetVal(pDst, (i), str, false); + } else { // it is a tag value + STagVal val = {0}; + val.cid = pExprInfo[j].base.pParam[0].pCol->colId; + const char* p = pAPI->metaFn.extractTagVal(pUidTagInfo->pTagVal, pDst->info.type, &val); + + char* data = NULL; + if (pDst->info.type != TSDB_DATA_TYPE_JSON && p != NULL) { + data = tTagValToData((const STagVal*)p, false); + } else { + data = (char*)p; + } + colDataSetVal(pDst, i, data, + (data == NULL) || (pDst->info.type == TSDB_DATA_TYPE_JSON && tTagIsJsonNull(data))); + + if (pDst->info.type != TSDB_DATA_TYPE_JSON && p != NULL && IS_VAR_DATA_TYPE(((const STagVal*)p)->type) && + data != NULL) { + taosMemoryFree(data); + } + } + } + } + return 0; +} +#endif + + +static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { + if (pOperator->status == OP_EXEC_DONE) { + return NULL; + } + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + SStorageAPI* pAPI = &pTaskInfo->storageAPI; + + STagScanInfo* pInfo = pOperator->info; + SExprInfo* pExprInfo = &pOperator->exprSupp.pExprInfo[0]; + SSDataBlock* pRes = pInfo->pRes; + blockDataCleanup(pRes); + int32_t count = 0; + + if (pInfo->pCtbCursor == NULL) { + pInfo->pCtbCursor = pAPI->metaFn.openCtbCursor(pInfo->readHandle.vnode, pInfo->suid, 1); + } + SArray* aUidTags = taosArrayInit(pOperator->resultInfo.capacity, sizeof(STUidTagInfo)); + SArray* aUidTagIdxs = taosArrayInit(pOperator->resultInfo.capacity, sizeof(int32_t)); + while (1) { + while (count < pOperator->resultInfo.capacity) { + SMCtbCursor* pCur = pInfo->pCtbCursor; + tb_uid_t uid = pAPI->metaFn.ctbCursorNext(pInfo->pCtbCursor); + if (uid == 0) { + break; + } + STUidTagInfo info = {.uid = uid, .pTagVal = pCur->pVal}; + info.pTagVal = taosMemoryMalloc(pCur->vLen); + memcpy(info.pTagVal, pCur->pVal, pCur->vLen); + taosArrayPush(aUidTags, &info); + } + + int32_t numTables = taosArrayGetSize(aUidTags); + if (numTables != 0 && pInfo->pTagCond != NULL) { + tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, pInfo->readHandle.vnode, aUidTagIdxs, pAPI); + } + tagScanFillResultBlock(pOperator, pRes, aUidTags, aUidTagIdxs, pAPI); + if (taosArrayGetSize(aUidTagIdxs) != 0) { + break; + } + taosArrayClearEx(aUidTags, tagScanFreeUidTag); + taosArrayClear(aUidTagIdxs); + } + taosArrayDestroy(aUidTagIdxs); + taosArrayDestroyEx(aUidTags, tagScanFreeUidTag); + pOperator->resultInfo.totalRows += count; + return (pRes->info.rows == 0) ? NULL : pInfo->pRes; +} + static SSDataBlock* doTagScan(SOperatorInfo* pOperator) { if (pOperator->status == OP_EXEC_DONE) { return NULL; @@ -2753,7 +3018,7 @@ static void destroyTagScanOperatorInfo(void* param) { } SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysiNode* pPhyNode, - STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo) { + STableListInfo* pTableListInfo, SNode* pTagCond, SNode* pTagIndexCond, SExecTaskInfo* pTaskInfo) { STagScanInfo* pInfo = taosMemoryCalloc(1, sizeof(STagScanInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { @@ -2774,7 +3039,8 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi if (code != TSDB_CODE_SUCCESS) { goto _error; } - + pInfo->pTagCond = pTagCond; + pInfo->pTagIndexCond = pTagIndexCond; pInfo->pTableListInfo = pTableListInfo; pInfo->pRes = createDataBlockFromDescNode(pDescNode); pInfo->readHandle = *pReadHandle; @@ -2789,6 +3055,7 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, doTagScan, NULL, destroyTagScanOperatorInfo, optrDefaultBufFn, NULL); + pInfo->suid = pPhyNode->suid; return pOperator; _error: From 20f5e2af5b5f6742466e82611f2e54278af6d776 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Thu, 10 Aug 2023 17:40:54 +0800 Subject: [PATCH 081/122] continue coding and save work --- source/libs/executor/src/scanoperator.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 24ed717c8a..42c488edbe 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2913,7 +2913,6 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { SStorageAPI* pAPI = &pTaskInfo->storageAPI; STagScanInfo* pInfo = pOperator->info; - SExprInfo* pExprInfo = &pOperator->exprSupp.pExprInfo[0]; SSDataBlock* pRes = pInfo->pRes; blockDataCleanup(pRes); int32_t count = 0; @@ -2941,6 +2940,8 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, pInfo->readHandle.vnode, aUidTagIdxs, pAPI); } tagScanFillResultBlock(pOperator, pRes, aUidTags, aUidTagIdxs, pAPI); + count = taosArrayGetSize(aUidTagIdxs); + if (taosArrayGetSize(aUidTagIdxs) != 0) { break; } From 7085d6bc11d7c8fb7ef18258273bccdee5817337 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 10 Aug 2023 18:49:17 +0800 Subject: [PATCH 082/122] mxml: disable shared lib --- cmake/mxml_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/mxml_CMakeLists.txt.in b/cmake/mxml_CMakeLists.txt.in index 87b126d8d3..7377f81c33 100644 --- a/cmake/mxml_CMakeLists.txt.in +++ b/cmake/mxml_CMakeLists.txt.in @@ -6,7 +6,7 @@ ExternalProject_Add(mxml #BINARY_DIR "" BUILD_IN_SOURCE TRUE #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local --enable-shared=no #CONFIGURE_COMMAND ./configure BUILD_COMMAND make INSTALL_COMMAND make install From 4513acfee925eaebf27b1486561afe3afab0ffe4 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Fri, 11 Aug 2023 09:54:54 +0800 Subject: [PATCH 083/122] cos: use static libs for mxml, apr, apu, curl --- cmake/apr-util_CMakeLists.txt.in | 2 +- cmake/apr_CMakeLists.txt.in | 2 +- cmake/curl_CMakeLists.txt.in | 2 +- cmake/mxml_CMakeLists.txt.in | 2 +- contrib/CMakeLists.txt | 7 ++++--- source/dnode/vnode/CMakeLists.txt | 2 +- tests/parallel_test/container_build.sh | 4 ++-- 7 files changed, 11 insertions(+), 10 deletions(-) diff --git a/cmake/apr-util_CMakeLists.txt.in b/cmake/apr-util_CMakeLists.txt.in index 6172be380e..d98a381005 100644 --- a/cmake/apr-util_CMakeLists.txt.in +++ b/cmake/apr-util_CMakeLists.txt.in @@ -11,7 +11,7 @@ ExternalProject_Add(aprutil-1 BUILD_IN_SOURCE TRUE BUILD_ALWAYS 1 #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local/ --with-apr=$ENV{HOME}/.cos-local + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1/ --with-apr=$ENV{HOME}/.cos-local #CONFIGURE_COMMAND ./configure --with-apr=/usr/local/apr BUILD_COMMAND make INSTALL_COMMAND make install diff --git a/cmake/apr_CMakeLists.txt.in b/cmake/apr_CMakeLists.txt.in index 538b45a7f9..18c4eb62a1 100644 --- a/cmake/apr_CMakeLists.txt.in +++ b/cmake/apr_CMakeLists.txt.in @@ -11,7 +11,7 @@ ExternalProject_Add(apr-1 UPDATE_DISCONNECTED TRUE BUILD_ALWAYS 1 #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local/ + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1/ --enable-shared=no #CONFIGURE_COMMAND ./configure BUILD_COMMAND make INSTALL_COMMAND make install diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index 1d9d028848..5f1efc1e5a 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -9,7 +9,7 @@ ExternalProject_Add(curl BUILD_IN_SOURCE TRUE BUILD_ALWAYS 1 #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local --without-ssl + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1 --without-ssl --enable-shared=no #CONFIGURE_COMMAND ./configure --without-ssl BUILD_COMMAND make INSTALL_COMMAND make install diff --git a/cmake/mxml_CMakeLists.txt.in b/cmake/mxml_CMakeLists.txt.in index 7377f81c33..9dcb5df665 100644 --- a/cmake/mxml_CMakeLists.txt.in +++ b/cmake/mxml_CMakeLists.txt.in @@ -6,7 +6,7 @@ ExternalProject_Add(mxml #BINARY_DIR "" BUILD_IN_SOURCE TRUE #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local --enable-shared=no + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1 --enable-shared=no #CONFIGURE_COMMAND ./configure BUILD_COMMAND make INSTALL_COMMAND make install diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index d20b205e69..452192a288 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -10,7 +10,7 @@ set(CONTRIB_TMP_FILE3 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in3") configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) if(${BUILD_WITH_COS}) - file(MAKE_DIRECTORY $ENV{HOME}/.cos-local/) + file(MAKE_DIRECTORY $ENV{HOME}/.cos-local.1/) cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) @@ -389,10 +389,11 @@ endif() # cos if(${BUILD_WITH_COS}) if(NOT ${TD_WINDOWS}) - set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local) + set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local.1) #ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a) option(ENABLE_TEST "Enable the tests" OFF) - INCLUDE_DIRECTORIES($ENV{HOME}/.cos-local/include) + INCLUDE_DIRECTORIES($ENV{HOME}/.cos-local.1/include) + MESSAGE("$ENV{HOME}/.cos-local.1/include") set(CMAKE_BUILD_TYPE debug) set(ORIG_CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME}) diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index c036fbc54a..684134c2d6 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -196,7 +196,7 @@ include_directories (${APR_INCLUDE_DIR}) target_include_directories( vnode PUBLIC "${TD_SOURCE_DIR}/contrib/cos-c-sdk-v5/cos_c_sdk" - PUBLIC "$ENV{HOME}/.cos-local/include" + PUBLIC "$ENV{HOME}/.cos-local.1/include" ) if(${BUILD_TEST}) diff --git a/tests/parallel_test/container_build.sh b/tests/parallel_test/container_build.sh index 62254984a9..f5e426057e 100755 --- a/tests/parallel_test/container_build.sh +++ b/tests/parallel_test/container_build.sh @@ -60,7 +60,7 @@ docker run \ -v /root/.cargo/git:/root/.cargo/git \ -v /root/go/pkg/mod:/root/go/pkg/mod \ -v /root/.cache/go-build:/root/.cache/go-build \ - -v /root/.cos-local:/root/.cos-local \ + -v /root/.cos-local.1:/root/.cos-local.1 \ -v ${REP_REAL_PATH}/enterprise/src/plugins/taosx/target:${REP_DIR}/enterprise/src/plugins/taosx/target \ -v ${REP_REAL_PATH}/community/tools/taosws-rs/target:${REP_DIR}/community/tools/taosws-rs/target \ -v ${REP_REAL_PATH}/community/contrib/cJson/:${REP_DIR}/community/contrib/cJson \ @@ -89,7 +89,7 @@ docker run \ -v /root/.cargo/git:/root/.cargo/git \ -v /root/go/pkg/mod:/root/go/pkg/mod \ -v /root/.cache/go-build:/root/.cache/go-build \ - -v /root/.cos-local:/root/.cos-local \ + -v /root/.cos-local.1:/root/.cos-local.1 \ -v ${REP_REAL_PATH}/enterprise/src/plugins/taosx/target:${REP_DIR}/enterprise/src/plugins/taosx/target \ -v ${REP_REAL_PATH}/community/tools/taosws-rs/target:${REP_DIR}/community/tools/taosws-rs/target \ -v ${REP_REAL_PATH}/community/contrib/cJson/:${REP_DIR}/community/contrib/cJson \ From 104ead6783d3f5225a8d53c8df07de46c2ac2f9b Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Fri, 11 Aug 2023 10:06:42 +0800 Subject: [PATCH 084/122] apu: fix apr location --- cmake/apr-util_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/apr-util_CMakeLists.txt.in b/cmake/apr-util_CMakeLists.txt.in index d98a381005..5a68020dd7 100644 --- a/cmake/apr-util_CMakeLists.txt.in +++ b/cmake/apr-util_CMakeLists.txt.in @@ -11,7 +11,7 @@ ExternalProject_Add(aprutil-1 BUILD_IN_SOURCE TRUE BUILD_ALWAYS 1 #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1/ --with-apr=$ENV{HOME}/.cos-local + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1/ --with-apr=$ENV{HOME}/.cos-local.1 #CONFIGURE_COMMAND ./configure --with-apr=/usr/local/apr BUILD_COMMAND make INSTALL_COMMAND make install From ffbdcde8eb22cc81aa862263901e5864775e8ff8 Mon Sep 17 00:00:00 2001 From: Shungang Li Date: Fri, 11 Aug 2023 10:59:17 +0800 Subject: [PATCH 085/122] docs: update for clusher cfg --- docs/en/10-deployment/01-deploy.md | 15 ++++++++------- docs/zh/10-deployment/01-deploy.md | 21 +++++++++++---------- 2 files changed, 19 insertions(+), 17 deletions(-) diff --git a/docs/en/10-deployment/01-deploy.md b/docs/en/10-deployment/01-deploy.md index da00e21a7e..ec9162c859 100644 --- a/docs/en/10-deployment/01-deploy.md +++ b/docs/en/10-deployment/01-deploy.md @@ -62,12 +62,13 @@ serverPort 6030 For all the dnodes in a TDengine cluster, the below parameters must be configured exactly the same, any node whose configuration is different from dnodes already in the cluster can't join the cluster. -| **#** | **Parameter** | **Definition** | -| ----- | ------------------ | ------------------------------------------- | -| 1 | statusInterval | The interval by which dnode reports its status to mnode | -| 2 | timezone | Timezone | -| 3 | locale | System region and encoding | -| 4 | charset | Character set | +| **#** | **Parameter** | **Definition** | +| ----- | ---------------- | ----------------------------------------------------------------------------- | +| 1 | statusInterval | The interval by which dnode reports its status to mnode | +| 2 | timezone | Timezone | +| 3 | locale | System region and encoding | +| 4 | charset | Character set | +| 5 | ttlChangeOnWrite | Whether the ttl expiration time changes with the table modification operation | ## Start Cluster @@ -97,7 +98,7 @@ Then, on the first dnode i.e. h1.tdengine.com in our example, use TDengine CLI ` CREATE DNODE "h2.taos.com:6030"; ```` -This adds the end point of the new dnode (from Step 4) into the end point list of the cluster. In the command "fqdn:port" should be quoted using double quotes. Change `"h2.taos.com:6030"` to the end point of your new dnode. +This adds the end point of the new dnode (from Step 4) into the end point list of the cluster. In the command "fqdn:port" should be quoted using double quotes. Change `"h2.taos.com:6030"` to the end point of your new dnode. Then on the first dnode h1.tdengine.com, execute `show dnodes` in `taos` diff --git a/docs/zh/10-deployment/01-deploy.md b/docs/zh/10-deployment/01-deploy.md index b68bf7b743..0ffbb8467b 100644 --- a/docs/zh/10-deployment/01-deploy.md +++ b/docs/zh/10-deployment/01-deploy.md @@ -62,12 +62,13 @@ serverPort 6030 加入到集群中的数据节点 dnode,下表中涉及集群相关的参数必须完全相同,否则不能成功加入到集群中。 -| **#** | **配置参数名称** | **含义** | -| ----- | ------------------ | ------------------------------------------- | -| 1 | statusInterval | dnode 向 mnode 报告状态时长 | -| 2 | timezone | 时区 | -| 3 | locale | 系统区位信息及编码格式 | -| 4 | charset | 字符集编码 | +| **#** | **配置参数名称** | **含义** | +| ----- | ---------------- | ------------------------------------ | +| 1 | statusInterval | dnode 向 mnode 报告状态时长 | +| 2 | timezone | 时区 | +| 3 | locale | 系统区位信息及编码格式 | +| 4 | charset | 字符集编码 | +| 5 | ttlChangeOnWrite | ttl 到期时间是否伴随表的修改操作改变 | ## 启动集群 @@ -196,10 +197,10 @@ dnodeID 是集群自动分配的,不得人工指定。它在生成时是递增 1、建立集群时使用 CREATE DNODE 增加新节点后,新节点始终显示 offline 状态? ```sql 1)首先要检查增加的新节点上的 taosd 服务是否已经正常启动 - + 2)如果已经启动,再检查到新节点的网络是否通畅,可以使用 ping fqdn 验证下 - + 3)如果前面两步都没有问题,这一步要检查新节点做为独立集群在运行了,可以使用 taos -h fqdn 连接上后,show dnodes; 命令查看. - 如果显示的列表与你主节点上显示的不一致,说明此节点自己单独成立了一个集群,解决的方法是停止新节点上的服务,然后清空新节点上 + 如果显示的列表与你主节点上显示的不一致,说明此节点自己单独成立了一个集群,解决的方法是停止新节点上的服务,然后清空新节点上 taos.cfg 中配置的 dataDir 目录下的所有文件,重新启动新节点服务即可解决。 -``` +``` From 1d089fe085bd00e75ce389311a8e362e0f9e61a2 Mon Sep 17 00:00:00 2001 From: kailixu Date: Fri, 11 Aug 2023 11:34:57 +0800 Subject: [PATCH 086/122] fix: dup read lock on windows --- source/dnode/vnode/src/meta/metaQuery.c | 23 ++++++++++++----------- source/libs/executor/src/scanoperator.c | 2 +- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c index c26bb45c2b..9c5ab2fbb4 100644 --- a/source/dnode/vnode/src/meta/metaQuery.c +++ b/source/dnode/vnode/src/meta/metaQuery.c @@ -1432,37 +1432,38 @@ int32_t metaGetInfo(SMeta *pMeta, int64_t uid, SMetaInfo *pInfo, SMetaReader *pR int32_t code = 0; void *pData = NULL; int nData = 0; - int lock = 0; + bool lock = false; - metaRLock(pMeta); + if (pReader && !(pReader->flags & META_READER_NOLOCK)) { + lock = true; + } + + if(!lock) metaRLock(pMeta); // search cache if (metaCacheGet(pMeta, uid, pInfo) == 0) { - metaULock(pMeta); + if(!lock) metaULock(pMeta); goto _exit; } // search TDB if (tdbTbGet(pMeta->pUidIdx, &uid, sizeof(uid), &pData, &nData) < 0) { // not found - metaULock(pMeta); + if(!lock) metaULock(pMeta); code = TSDB_CODE_NOT_FOUND; goto _exit; } - metaULock(pMeta); + if(!lock) metaULock(pMeta); pInfo->uid = uid; pInfo->suid = ((SUidIdxVal *)pData)->suid; pInfo->version = ((SUidIdxVal *)pData)->version; pInfo->skmVer = ((SUidIdxVal *)pData)->skmVer; - if (pReader != NULL) { - lock = !(pReader->flags & META_READER_NOLOCK); - if (lock) { - metaULock(pReader->pMeta); - // metaReaderReleaseLock(pReader); - } + if (lock) { + metaULock(pReader->pMeta); + // metaReaderReleaseLock(pReader); } // upsert the cache metaWLock(pMeta); diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 71b0747be8..9a836caa8c 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -504,7 +504,7 @@ int32_t addTagPseudoColumnData(SReadHandle* pHandle, const SExprInfo* pExpr, int // 1. check if it is existed in meta cache if (pCache == NULL) { - pHandle->api.metaReaderFn.initReader(&mr, pHandle->vnode, META_READER_NOLOCK, &pHandle->api.metaFn); + pHandle->api.metaReaderFn.initReader(&mr, pHandle->vnode, 0, &pHandle->api.metaFn); code = pHandle->api.metaReaderFn.getEntryGetUidCache(&mr, pBlock->info.id.uid); if (code != TSDB_CODE_SUCCESS) { // when encounter the TSDB_CODE_PAR_TABLE_NOT_EXIST error, we proceed. From 64a1636dd20bfafafd4faeccde8a964e7a9ade4f Mon Sep 17 00:00:00 2001 From: kailixu Date: Fri, 11 Aug 2023 12:54:45 +0800 Subject: [PATCH 087/122] chore: trigger CI --- source/dnode/vnode/src/meta/metaQuery.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c index 9c5ab2fbb4..41fc3b3d7e 100644 --- a/source/dnode/vnode/src/meta/metaQuery.c +++ b/source/dnode/vnode/src/meta/metaQuery.c @@ -1432,10 +1432,10 @@ int32_t metaGetInfo(SMeta *pMeta, int64_t uid, SMetaInfo *pInfo, SMetaReader *pR int32_t code = 0; void *pData = NULL; int nData = 0; - bool lock = false; + int lock = 0; if (pReader && !(pReader->flags & META_READER_NOLOCK)) { - lock = true; + lock = 1; } if(!lock) metaRLock(pMeta); From 7c39bc989083ce501dd6df5bd980b4edaab07057 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Fri, 11 Aug 2023 13:50:41 +0800 Subject: [PATCH 088/122] fix: some minor modifications --- source/libs/executor/inc/executorInt.h | 1 + source/libs/executor/src/scanoperator.c | 77 ++++++++++++++----------- 2 files changed, 44 insertions(+), 34 deletions(-) diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h index cadf367481..2b25feabb3 100644 --- a/source/libs/executor/inc/executorInt.h +++ b/source/libs/executor/inc/executorInt.h @@ -263,6 +263,7 @@ typedef struct STagScanInfo { void* pCtbCursor; SNode* pTagCond; SNode* pTagIndexCond; + SStorageAPI* pStorageAPI; } STagScanInfo; typedef enum EStreamScanMode { diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 42c488edbe..5e0eb71c13 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2767,9 +2767,10 @@ static EDealRes tagScanRewriteTagColumn(SNode** pNode, void* pContext) { } -static void tagScanFilterByTagCond(SArray* aUidTags, SNode* pTagCond, SArray* aUidTagIdxs, void* pVnode, SStorageAPI* pAPI) { +static void tagScanFilterByTagCond(SArray* aUidTags, SNode* pTagCond, SArray* aFilterIdxs, void* pVnode, SStorageAPI* pAPI) { int32_t code = 0; int32_t numOfTables = taosArrayGetSize(aUidTags); + STagScanFilterContext ctx = {0}; ctx.colHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_SMALLINT), false, HASH_NO_LOCK); ctx.cInfoList = taosArrayInit(4, sizeof(SColumnInfo)); @@ -2777,48 +2778,42 @@ static void tagScanFilterByTagCond(SArray* aUidTags, SNode* pTagCond, SArray* aU nodesRewriteExprPostOrder(&pTagCond, tagScanRewriteTagColumn, (void*)&ctx); SSDataBlock* pResBlock = createTagValBlockForFilter(ctx.cInfoList, numOfTables, aUidTags, pVnode, pAPI); - if (pResBlock == NULL) { - - } SArray* pBlockList = taosArrayInit(1, POINTER_BYTES); taosArrayPush(pBlockList, &pResBlock); SDataType type = {.type = TSDB_DATA_TYPE_BOOL, .bytes = sizeof(bool)}; SScalarParam output = {0}; - code = tagScanCreateResultData(&type, numOfTables, &output); - if (code != TSDB_CODE_SUCCESS) { + tagScanCreateResultData(&type, numOfTables, &output); - } - - code = scalarCalculate(pTagCond, pBlockList, &output); - if (code != TSDB_CODE_SUCCESS) { - } + scalarCalculate(pTagCond, pBlockList, &output); bool* result = (bool*)output.columnData->pData; for (int32_t i = 0 ; i < numOfTables; ++i) { if (result[i]) { - taosArrayPush(aUidTagIdxs, &i); + taosArrayPush(aFilterIdxs, &i); } } - taosHashCleanup(ctx.colHash); - taosArrayDestroy(ctx.cInfoList); - blockDataDestroy(pResBlock); - taosArrayDestroy(pBlockList); colDataDestroy(output.columnData); taosMemoryFreeClear(output.columnData); + + blockDataDestroy(pResBlock); + taosArrayDestroy(pBlockList); + + taosHashCleanup(ctx.colHash); + taosArrayDestroy(ctx.cInfoList); } static void tagScanFillOneCellWithTag(const STUidTagInfo* pUidTagInfo, SExprInfo* pExprInfo, SColumnInfoData* pColInfo, int rowIndex, const SStorageAPI* pAPI, void* pVnode) { if (fmIsScanPseudoColumnFunc(pExprInfo->pExpr->_function.functionId)) { // tbname char str[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; +// if (pUidTagInfo->name != NULL) { +// STR_TO_VARSTR(str, pUidTagInfo->name); +// } else { // name is not retrieved during filter +// pAPI->metaFn.getTableNameByUid(pVnode, pUidTagInfo->uid, str); +// } STR_TO_VARSTR(str, "zsl"); - // if (pUidTagInfo->name != NULL) { - // STR_TO_VARSTR(str, pUidTagInfo->name); - // } else { // name is not retrieved during filter - // pAPI->metaFn.getTableNameByUid(pVnode, pUidTagInfo->uid, str); - // } colDataSetVal(pColInfo, rowIndex, str, false); } else { @@ -2846,13 +2841,15 @@ static void tagScanFillOneCellWithTag(const STUidTagInfo* pUidTagInfo, SExprInfo } } -static int32_t tagScanFillResultBlock(SOperatorInfo* pOperator, SSDataBlock* pRes, SArray* aUidTags, SArray* aUidTagIdxs, +static int32_t tagScanFillResultBlock(SOperatorInfo* pOperator, SSDataBlock* pRes, SArray* aUidTags, SArray* aFilterIdxs, SStorageAPI* pAPI) { STagScanInfo* pInfo = pOperator->info; SExprInfo* pExprInfo = &pOperator->exprSupp.pExprInfo[0]; - for (int i = 0; i < taosArrayGetSize(aUidTagIdxs); ++i) { - STUidTagInfo* pUidTagInfo = taosArrayGet(aUidTags, *(int32_t*)taosArrayGet(aUidTagIdxs, i)); + size_t szTables = taosArrayGetSize(aFilterIdxs); + for (int i = 0; i < szTables; ++i) { + int32_t idx = *(int32_t*)taosArrayGet(aFilterIdxs, i); + STUidTagInfo* pUidTagInfo = taosArrayGet(aUidTags, idx); for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) { SColumnInfoData* pDst = taosArrayGet(pRes->pDataBlock, pExprInfo[j].base.resSchema.slotId); tagScanFillOneCellWithTag(pUidTagInfo, &pExprInfo[j], pDst, i, pAPI, pInfo->readHandle.vnode); @@ -2920,8 +2917,10 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { if (pInfo->pCtbCursor == NULL) { pInfo->pCtbCursor = pAPI->metaFn.openCtbCursor(pInfo->readHandle.vnode, pInfo->suid, 1); } + SArray* aUidTags = taosArrayInit(pOperator->resultInfo.capacity, sizeof(STUidTagInfo)); - SArray* aUidTagIdxs = taosArrayInit(pOperator->resultInfo.capacity, sizeof(int32_t)); + SArray* aFilterIdxs = taosArrayInit(pOperator->resultInfo.capacity, sizeof(int32_t)); + while (1) { while (count < pOperator->resultInfo.capacity) { SMCtbCursor* pCur = pInfo->pCtbCursor; @@ -2936,20 +2935,26 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { } int32_t numTables = taosArrayGetSize(aUidTags); - if (numTables != 0 && pInfo->pTagCond != NULL) { - tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, pInfo->readHandle.vnode, aUidTagIdxs, pAPI); - } - tagScanFillResultBlock(pOperator, pRes, aUidTags, aUidTagIdxs, pAPI); - count = taosArrayGetSize(aUidTagIdxs); - - if (taosArrayGetSize(aUidTagIdxs) != 0) { + if (numTables == 0) { break; } + + tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, pInfo->readHandle.vnode, aFilterIdxs, pAPI); + + tagScanFillResultBlock(pOperator, pRes, aUidTags, aFilterIdxs, pAPI); + count = taosArrayGetSize(aFilterIdxs); + + if (count != 0) { + break; + } + taosArrayClearEx(aUidTags, tagScanFreeUidTag); - taosArrayClear(aUidTagIdxs); + taosArrayClear(aFilterIdxs); } - taosArrayDestroy(aUidTagIdxs); + + taosArrayDestroy(aFilterIdxs); taosArrayDestroyEx(aUidTags, tagScanFreeUidTag); + pOperator->resultInfo.totalRows += count; return (pRes->info.rows == 0) ? NULL : pInfo->pRes; } @@ -3012,6 +3017,9 @@ static SSDataBlock* doTagScan(SOperatorInfo* pOperator) { static void destroyTagScanOperatorInfo(void* param) { STagScanInfo* pInfo = (STagScanInfo*)param; + if (pInfo->pCtbCursor != NULL) { + pInfo->pStorageAPI->metaFn.closeCtbCursor(pInfo->pCtbCursor, 1); + } pInfo->pRes = blockDataDestroy(pInfo->pRes); taosArrayDestroy(pInfo->matchInfo.pList); pInfo->pTableListInfo = tableListDestroy(pInfo->pTableListInfo); @@ -3043,6 +3051,7 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi pInfo->pTagCond = pTagCond; pInfo->pTagIndexCond = pTagIndexCond; pInfo->pTableListInfo = pTableListInfo; + pInfo->pStorageAPI = &pTaskInfo->storageAPI; pInfo->pRes = createDataBlockFromDescNode(pDescNode); pInfo->readHandle = *pReadHandle; pInfo->curPos = 0; From 1c7f854a719b590f4aa5d201d2543681d1e28975 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Fri, 11 Aug 2023 14:47:28 +0800 Subject: [PATCH 089/122] enhance: add only meta ctb index to tag scan physi node --- include/libs/nodes/plannodes.h | 14 +++- source/libs/nodes/src/nodesCloneFuncs.c | 11 ++- source/libs/nodes/src/nodesCodeFuncs.c | 70 ++++++++++++++++++- source/libs/nodes/src/nodesMsgFuncs.c | 89 +++++++++++++++++++++++++ 4 files changed, 180 insertions(+), 4 deletions(-) diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h index 063318332a..0830dc4918 100644 --- a/include/libs/nodes/plannodes.h +++ b/include/libs/nodes/plannodes.h @@ -334,7 +334,19 @@ typedef struct SScanPhysiNode { bool groupOrderScan; } SScanPhysiNode; -typedef SScanPhysiNode STagScanPhysiNode; +typedef struct STagScanPhysiNode { + // SScanPhysiNode scan; //TODO? + SPhysiNode node; + SNodeList* pScanCols; + SNodeList* pScanPseudoCols; + uint64_t uid; // unique id of the table + uint64_t suid; + int8_t tableType; + SName tableName; + bool groupOrderScan; + bool onlyMetaCtbIdx; //no tbname, tag index not used. +} STagScanPhysiNode; + typedef SScanPhysiNode SBlockDistScanPhysiNode; typedef struct SLastRowScanPhysiNode { diff --git a/source/libs/nodes/src/nodesCloneFuncs.c b/source/libs/nodes/src/nodesCloneFuncs.c index f5eacf0bd5..965af41fa7 100644 --- a/source/libs/nodes/src/nodesCloneFuncs.c +++ b/source/libs/nodes/src/nodesCloneFuncs.c @@ -564,7 +564,16 @@ static int32_t physiScanCopy(const SScanPhysiNode* pSrc, SScanPhysiNode* pDst) { } static int32_t physiTagScanCopy(const STagScanPhysiNode* pSrc, STagScanPhysiNode* pDst) { - return physiScanCopy(pSrc, pDst); + COPY_BASE_OBJECT_FIELD(node, physiNodeCopy); + CLONE_NODE_LIST_FIELD(pScanCols); + CLONE_NODE_LIST_FIELD(pScanPseudoCols); + COPY_SCALAR_FIELD(uid); + COPY_SCALAR_FIELD(suid); + COPY_SCALAR_FIELD(tableType); + COPY_OBJECT_FIELD(tableName, sizeof(SName)); + COPY_SCALAR_FIELD(groupOrderScan); + COPY_SCALAR_FIELD(onlyMetaCtbIdx); + return TSDB_CODE_SUCCESS; } static int32_t physiTableScanCopy(const STableScanPhysiNode* pSrc, STableScanPhysiNode* pDst) { diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index f25616065e..3540f8cb70 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -1562,7 +1562,7 @@ static const char* jkScanPhysiPlanTableName = "TableName"; static const char* jkScanPhysiPlanGroupOrderScan = "GroupOrderScan"; static int32_t physiScanNodeToJson(const void* pObj, SJson* pJson) { - const STagScanPhysiNode* pNode = (const STagScanPhysiNode*)pObj; + const SScanPhysiNode* pNode = (const SScanPhysiNode*)pObj; int32_t code = physicPlanNodeToJson(pObj, pJson); if (TSDB_CODE_SUCCESS == code) { @@ -1591,7 +1591,7 @@ static int32_t physiScanNodeToJson(const void* pObj, SJson* pJson) { } static int32_t jsonToPhysiScanNode(const SJson* pJson, void* pObj) { - STagScanPhysiNode* pNode = (STagScanPhysiNode*)pObj; + SScanPhysiNode* pNode = (SScanPhysiNode*)pObj; int32_t code = jsonToPhysicPlanNode(pJson, pObj); if (TSDB_CODE_SUCCESS == code) { @@ -1619,6 +1619,70 @@ static int32_t jsonToPhysiScanNode(const SJson* pJson, void* pObj) { return code; } +static const char* jkTagScanPhysiOnlyMetaCtbIdx = "OnlyMetaCtbIdx"; + +static int32_t physiTagScanNodeToJson(const void* pObj, SJson* pJson) { + const STagScanPhysiNode* pNode = (const STagScanPhysiNode*)pObj; + + int32_t code = physicPlanNodeToJson(pObj, pJson); + if (TSDB_CODE_SUCCESS == code) { + code = nodeListToJson(pJson, jkScanPhysiPlanScanCols, pNode->pScanCols); + } + if (TSDB_CODE_SUCCESS == code) { + code = nodeListToJson(pJson, jkScanPhysiPlanScanPseudoCols, pNode->pScanPseudoCols); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkScanPhysiPlanTableId, pNode->uid); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkScanPhysiPlanSTableId, pNode->suid); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkScanPhysiPlanTableType, pNode->tableType); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddObject(pJson, jkScanPhysiPlanTableName, nameToJson, &pNode->tableName); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddBoolToObject(pJson, jkScanPhysiPlanGroupOrderScan, pNode->groupOrderScan); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddBoolToObject(pJson, jkTagScanPhysiOnlyMetaCtbIdx, pNode->onlyMetaCtbIdx); + } + return code; +} + +static int32_t jsonToPhysiTagScanNode(const SJson* pJson, void* pObj) { + STagScanPhysiNode* pNode = (STagScanPhysiNode*)pObj; + + int32_t code = jsonToPhysicPlanNode(pJson, pObj); + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeList(pJson, jkScanPhysiPlanScanCols, &pNode->pScanCols); + } + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeList(pJson, jkScanPhysiPlanScanPseudoCols, &pNode->pScanPseudoCols); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetUBigIntValue(pJson, jkScanPhysiPlanTableId, &pNode->uid); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetUBigIntValue(pJson, jkScanPhysiPlanSTableId, &pNode->suid); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetTinyIntValue(pJson, jkScanPhysiPlanTableType, &pNode->tableType); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonToObject(pJson, jkScanPhysiPlanTableName, jsonToName, &pNode->tableName); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetBoolValue(pJson, jkScanPhysiPlanGroupOrderScan, &pNode->groupOrderScan); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetBoolValue(pJson, jkTagScanPhysiOnlyMetaCtbIdx, &pNode->onlyMetaCtbIdx); + } + return code; +} + static const char* jkLastRowScanPhysiPlanGroupTags = "GroupTags"; static const char* jkLastRowScanPhysiPlanGroupSort = "GroupSort"; @@ -6590,6 +6654,7 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) { case QUERY_NODE_LOGIC_PLAN: return logicPlanToJson(pObj, pJson); case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN: + return physiTableScanNodeToJson(pObj, pJson); case QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN: return physiScanNodeToJson(pObj, pJson); case QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN: @@ -6908,6 +6973,7 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) { case QUERY_NODE_LOGIC_PLAN: return jsonToLogicPlan(pJson, pObj); case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN: + return jsonToPhysiTagScanNode(pJson, pObj); case QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN: case QUERY_NODE_PHYSICAL_PLAN_TABLE_COUNT_SCAN: return jsonToPhysiScanNode(pJson, pObj); diff --git a/source/libs/nodes/src/nodesMsgFuncs.c b/source/libs/nodes/src/nodesMsgFuncs.c index 20e829766d..4d1120861d 100644 --- a/source/libs/nodes/src/nodesMsgFuncs.c +++ b/source/libs/nodes/src/nodesMsgFuncs.c @@ -2003,6 +2003,91 @@ static int32_t msgToPhysiScanNode(STlvDecoder* pDecoder, void* pObj) { return code; } +enum { + PHY_TAG_SCAN_CODE_BASE_NODE = 1, + PHY_TAG_SCAN_CODE_SCAN_COLS, + PHY_TAG_SCAN_CODE_SCAN_PSEUDO_COLS, + PHY_TAG_SCAN_CODE_BASE_UID, + PHY_TAG_SCAN_CODE_BASE_SUID, + PHY_TAG_SCAN_CODE_BASE_TABLE_TYPE, + PHY_TAG_SCAN_CODE_BASE_TABLE_NAME, + PHY_TAG_SCAN_CODE_BASE_GROUP_ORDER_SCAN, + PHY_TAG_SCAN_CODE_ONLY_META_CTB_IDX +}; + +static int32_t physiTagScanNodeToMsg(const void* pObj, STlvEncoder* pEncoder) { + const STagScanPhysiNode* pNode = (const STagScanPhysiNode*)pObj; + + int32_t code = tlvEncodeObj(pEncoder, PHY_TAG_SCAN_CODE_BASE_NODE, physiNodeToMsg, &pNode->node); + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeObj(pEncoder, PHY_TAG_SCAN_CODE_SCAN_COLS, nodeListToMsg, pNode->pScanCols); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeObj(pEncoder, PHY_TAG_SCAN_CODE_SCAN_PSEUDO_COLS, nodeListToMsg, pNode->pScanPseudoCols); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeU64(pEncoder, PHY_TAG_SCAN_CODE_BASE_UID, pNode->uid); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeU64(pEncoder, PHY_TAG_SCAN_CODE_BASE_SUID, pNode->suid); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeI8(pEncoder, PHY_TAG_SCAN_CODE_BASE_TABLE_TYPE, pNode->tableType); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeObj(pEncoder, PHY_TAG_SCAN_CODE_BASE_TABLE_NAME, nameToMsg, &pNode->tableName); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeBool(pEncoder, PHY_TAG_SCAN_CODE_BASE_GROUP_ORDER_SCAN, pNode->groupOrderScan); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeBool(pEncoder, PHY_TAG_SCAN_CODE_ONLY_META_CTB_IDX, pNode->onlyMetaCtbIdx); + } + return code; +} + +static int32_t msgToPhysiTagScanNode(STlvDecoder* pDecoder, void* pObj) { + STagScanPhysiNode* pNode = (STagScanPhysiNode*)pObj; + + int32_t code = TSDB_CODE_SUCCESS; + STlv* pTlv = NULL; + tlvForEach(pDecoder, pTlv, code) { + switch (pTlv->type) { + case PHY_TAG_SCAN_CODE_BASE_NODE: + code = tlvDecodeObjFromTlv(pTlv, msgToPhysiNode, &pNode->node); + break; + case PHY_TAG_SCAN_CODE_SCAN_COLS: + code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pScanCols); + break; + case PHY_TAG_SCAN_CODE_SCAN_PSEUDO_COLS: + code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pScanPseudoCols); + break; + case PHY_TAG_SCAN_CODE_BASE_UID: + code = tlvDecodeU64(pTlv, &pNode->uid); + break; + case PHY_TAG_SCAN_CODE_BASE_SUID: + code = tlvDecodeU64(pTlv, &pNode->suid); + break; + case PHY_TAG_SCAN_CODE_BASE_TABLE_TYPE: + code = tlvDecodeI8(pTlv, &pNode->tableType); + break; + case PHY_TAG_SCAN_CODE_BASE_TABLE_NAME: + code = tlvDecodeObjFromTlv(pTlv, msgToName, &pNode->tableName); + break; + case PHY_TAG_SCAN_CODE_BASE_GROUP_ORDER_SCAN: + code = tlvDecodeBool(pTlv, &pNode->groupOrderScan); + break; + case PHY_TAG_SCAN_CODE_ONLY_META_CTB_IDX: + code = tlvDecodeBool(pTlv, &pNode->onlyMetaCtbIdx); + break; + default: + break; + } + } + + return code; +} + enum { PHY_LAST_ROW_SCAN_CODE_SCAN = 1, PHY_LAST_ROW_SCAN_CODE_GROUP_TAGS, @@ -3726,6 +3811,8 @@ static int32_t specificNodeToMsg(const void* pObj, STlvEncoder* pEncoder) { code = caseWhenNodeToMsg(pObj, pEncoder); break; case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN: + code = physiTagScanNodeToMsg(pObj, pEncoder); + break; case QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN: code = physiScanNodeToMsg(pObj, pEncoder); break; @@ -3869,6 +3956,8 @@ static int32_t msgToSpecificNode(STlvDecoder* pDecoder, void* pObj) { code = msgToCaseWhenNode(pDecoder, pObj); break; case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN: + code = msgToPhysiTagScanNode(pDecoder, pObj); + break; case QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN: code = msgToPhysiScanNode(pDecoder, pObj); break; From a0c62d215d36bc980aabe3ebb12ee32521db2c43 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Fri, 11 Aug 2023 14:54:43 +0800 Subject: [PATCH 090/122] enhance: tag scan only meta ctb idx backend modification --- source/libs/executor/src/operator.c | 19 ++++++++++--------- source/libs/executor/src/scanoperator.c | 9 ++++++--- 2 files changed, 16 insertions(+), 12 deletions(-) diff --git a/source/libs/executor/src/operator.c b/source/libs/executor/src/operator.c index 0fc1b77b73..d0805a86e4 100644 --- a/source/libs/executor/src/operator.c +++ b/source/libs/executor/src/operator.c @@ -370,17 +370,18 @@ SOperatorInfo* createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SR STableCountScanPhysiNode* pTblCountScanNode = (STableCountScanPhysiNode*)pPhyNode; pOperator = createTableCountScanOperatorInfo(pHandle, pTblCountScanNode, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN == type) { - STagScanPhysiNode* pScanPhyNode = (STagScanPhysiNode*)pPhyNode; + STagScanPhysiNode* pTagScanPhyNode = (STagScanPhysiNode*)pPhyNode; STableListInfo* pTableListInfo = tableListCreate(); - int32_t code = createScanTableListInfo(pScanPhyNode, NULL, false, pHandle, pTableListInfo, pTagCond, - pTagIndexCond, pTaskInfo); - if (code != TSDB_CODE_SUCCESS) { - pTaskInfo->code = code; - qError("failed to getTableList, code: %s", tstrerror(code)); - return NULL; + if (!pTagScanPhyNode->onlyMetaCtbIdx) { + int32_t code = createScanTableListInfo(pTagScanPhyNode, NULL, false, pHandle, pTableListInfo, pTagCond, + pTagIndexCond, pTaskInfo); + if (code != TSDB_CODE_SUCCESS) { + pTaskInfo->code = code; + qError("failed to getTableList, code: %s", tstrerror(code)); + return NULL; + } } - - pOperator = createTagScanOperatorInfo(pHandle, pScanPhyNode, pTableListInfo, pTagCond, pTagIndexCond, pTaskInfo); + pOperator = createTagScanOperatorInfo(pHandle, pTagScanPhyNode, pTableListInfo, pTagCond, pTagIndexCond, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN == type) { SBlockDistScanPhysiNode* pBlockNode = (SBlockDistScanPhysiNode*)pPhyNode; STableListInfo* pTableListInfo = tableListCreate(); diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 5e0eb71c13..107ea14914 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -3048,10 +3048,13 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi if (code != TSDB_CODE_SUCCESS) { goto _error; } + pInfo->pTagCond = pTagCond; pInfo->pTagIndexCond = pTagIndexCond; - pInfo->pTableListInfo = pTableListInfo; + pInfo->suid = pPhyNode->suid; pInfo->pStorageAPI = &pTaskInfo->storageAPI; + + pInfo->pTableListInfo = pTableListInfo; pInfo->pRes = createDataBlockFromDescNode(pDescNode); pInfo->readHandle = *pReadHandle; pInfo->curPos = 0; @@ -3062,10 +3065,10 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi initResultSizeInfo(&pOperator->resultInfo, 4096); blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity); + __optr_fn_t tagScanNextFn = (pPhyNode->onlyMetaCtbIdx) ? doTagScanFromCtbIdx : doTagScan; pOperator->fpSet = - createOperatorFpSet(optrDummyOpenFn, doTagScan, NULL, destroyTagScanOperatorInfo, optrDefaultBufFn, NULL); + createOperatorFpSet(optrDummyOpenFn, tagScanNextFn, NULL, destroyTagScanOperatorInfo, optrDefaultBufFn, NULL); - pInfo->suid = pPhyNode->suid; return pOperator; _error: From 8987553d9c8cc2c86f8d575ab3bc6431f9b399eb Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Fri, 11 Aug 2023 15:56:38 +0800 Subject: [PATCH 091/122] fix: remove ins_modules --- include/libs/nodes/nodes.h | 2 +- source/common/src/systable.c | 2 +- source/dnode/mnode/impl/src/mndShow.c | 2 ++ source/libs/nodes/src/nodesCodeFuncs.c | 2 ++ source/libs/nodes/src/nodesUtilFuncs.c | 4 ++-- source/libs/parser/src/parAstParser.c | 2 ++ source/libs/parser/src/parAuthenticator.c | 2 +- source/libs/parser/src/parTranslater.c | 4 +++- 8 files changed, 14 insertions(+), 6 deletions(-) diff --git a/include/libs/nodes/nodes.h b/include/libs/nodes/nodes.h index 2319643b09..8eeeff4148 100644 --- a/include/libs/nodes/nodes.h +++ b/include/libs/nodes/nodes.h @@ -169,7 +169,7 @@ typedef enum ENodeType { QUERY_NODE_REVOKE_STMT, QUERY_NODE_SHOW_DNODES_STMT, QUERY_NODE_SHOW_MNODES_STMT, - QUERY_NODE_SHOW_MODULES_STMT, +// QUERY_NODE_SHOW_MODULES_STMT, QUERY_NODE_SHOW_QNODES_STMT, QUERY_NODE_SHOW_SNODES_STMT, QUERY_NODE_SHOW_BNODES_STMT, diff --git a/source/common/src/systable.c b/source/common/src/systable.c index 0940fcef6a..eb8042099f 100644 --- a/source/common/src/systable.c +++ b/source/common/src/systable.c @@ -314,7 +314,7 @@ static const SSysDbTableSchema userUserPrivilegesSchema[] = { static const SSysTableMeta infosMeta[] = { {TSDB_INS_TABLE_DNODES, dnodesSchema, tListLen(dnodesSchema), true}, {TSDB_INS_TABLE_MNODES, mnodesSchema, tListLen(mnodesSchema), true}, - {TSDB_INS_TABLE_MODULES, modulesSchema, tListLen(modulesSchema), true}, + // {TSDB_INS_TABLE_MODULES, modulesSchema, tListLen(modulesSchema), true}, {TSDB_INS_TABLE_QNODES, qnodesSchema, tListLen(qnodesSchema), true}, {TSDB_INS_TABLE_SNODES, snodesSchema, tListLen(snodesSchema), true}, {TSDB_INS_TABLE_CLUSTER, clusterSchema, tListLen(clusterSchema), true}, diff --git a/source/dnode/mnode/impl/src/mndShow.c b/source/dnode/mnode/impl/src/mndShow.c index 44f4751700..7d842be7de 100644 --- a/source/dnode/mnode/impl/src/mndShow.c +++ b/source/dnode/mnode/impl/src/mndShow.c @@ -58,8 +58,10 @@ static int32_t convertToRetrieveType(char *name, int32_t len) { type = TSDB_MGMT_TABLE_DNODE; } else if (strncasecmp(name, TSDB_INS_TABLE_MNODES, len) == 0) { type = TSDB_MGMT_TABLE_MNODE; +/* } else if (strncasecmp(name, TSDB_INS_TABLE_MODULES, len) == 0) { type = TSDB_MGMT_TABLE_MODULE; +*/ } else if (strncasecmp(name, TSDB_INS_TABLE_QNODES, len) == 0) { type = TSDB_MGMT_TABLE_QNODE; } else if (strncasecmp(name, TSDB_INS_TABLE_SNODES, len) == 0) { diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index f25616065e..263d1b21ab 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -197,8 +197,10 @@ const char* nodesNodeName(ENodeType type) { return "ShowDnodesStmt"; case QUERY_NODE_SHOW_MNODES_STMT: return "ShowMnodesStmt"; +/* case QUERY_NODE_SHOW_MODULES_STMT: return "ShowModulesStmt"; +*/ case QUERY_NODE_SHOW_QNODES_STMT: return "ShowQnodesStmt"; case QUERY_NODE_SHOW_SNODES_STMT: diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c index c8197721fb..75b63b9d34 100644 --- a/source/libs/nodes/src/nodesUtilFuncs.c +++ b/source/libs/nodes/src/nodesUtilFuncs.c @@ -406,7 +406,7 @@ SNode* nodesMakeNode(ENodeType type) { return makeNode(type, sizeof(SRevokeStmt)); case QUERY_NODE_SHOW_DNODES_STMT: case QUERY_NODE_SHOW_MNODES_STMT: - case QUERY_NODE_SHOW_MODULES_STMT: +// case QUERY_NODE_SHOW_MODULES_STMT: case QUERY_NODE_SHOW_QNODES_STMT: case QUERY_NODE_SHOW_SNODES_STMT: case QUERY_NODE_SHOW_BNODES_STMT: @@ -982,7 +982,7 @@ void nodesDestroyNode(SNode* pNode) { break; case QUERY_NODE_SHOW_DNODES_STMT: case QUERY_NODE_SHOW_MNODES_STMT: - case QUERY_NODE_SHOW_MODULES_STMT: +// case QUERY_NODE_SHOW_MODULES_STMT: case QUERY_NODE_SHOW_QNODES_STMT: case QUERY_NODE_SHOW_SNODES_STMT: case QUERY_NODE_SHOW_BNODES_STMT: diff --git a/source/libs/parser/src/parAstParser.c b/source/libs/parser/src/parAstParser.c index fdec9cba79..86b4566d37 100644 --- a/source/libs/parser/src/parAstParser.c +++ b/source/libs/parser/src/parAstParser.c @@ -690,8 +690,10 @@ static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt) { return collectMetaKeyFromShowDnodes(pCxt, (SShowStmt*)pStmt); case QUERY_NODE_SHOW_MNODES_STMT: return collectMetaKeyFromShowMnodes(pCxt, (SShowStmt*)pStmt); +/* case QUERY_NODE_SHOW_MODULES_STMT: return collectMetaKeyFromShowModules(pCxt, (SShowStmt*)pStmt); +*/ case QUERY_NODE_SHOW_QNODES_STMT: return collectMetaKeyFromShowQnodes(pCxt, (SShowStmt*)pStmt); case QUERY_NODE_SHOW_SNODES_STMT: diff --git a/source/libs/parser/src/parAuthenticator.c b/source/libs/parser/src/parAuthenticator.c index 9b2ac662c8..6a26dcfa8b 100644 --- a/source/libs/parser/src/parAuthenticator.c +++ b/source/libs/parser/src/parAuthenticator.c @@ -263,7 +263,7 @@ static int32_t authQuery(SAuthCxt* pCxt, SNode* pStmt) { return authAlterTable(pCxt, (SAlterTableStmt*)pStmt); case QUERY_NODE_SHOW_DNODES_STMT: case QUERY_NODE_SHOW_MNODES_STMT: - case QUERY_NODE_SHOW_MODULES_STMT: +// case QUERY_NODE_SHOW_MODULES_STMT: case QUERY_NODE_SHOW_QNODES_STMT: case QUERY_NODE_SHOW_SNODES_STMT: case QUERY_NODE_SHOW_BNODES_STMT: diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 38118c03f8..a41447edf3 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -92,6 +92,7 @@ static const SSysTableShowAdapter sysTableShowAdapter[] = { .numOfShowCols = 1, .pShowCols = {"*"} }, +/* { .showType = QUERY_NODE_SHOW_MODULES_STMT, .pDbName = TSDB_INFORMATION_SCHEMA_DB, @@ -99,6 +100,7 @@ static const SSysTableShowAdapter sysTableShowAdapter[] = { .numOfShowCols = 1, .pShowCols = {"*"} }, +*/ { .showType = QUERY_NODE_SHOW_QNODES_STMT, .pDbName = TSDB_INFORMATION_SCHEMA_DB, @@ -9164,7 +9166,7 @@ static int32_t rewriteQuery(STranslateContext* pCxt, SQuery* pQuery) { case QUERY_NODE_SHOW_USERS_STMT: case QUERY_NODE_SHOW_DNODES_STMT: case QUERY_NODE_SHOW_MNODES_STMT: - case QUERY_NODE_SHOW_MODULES_STMT: +// case QUERY_NODE_SHOW_MODULES_STMT: case QUERY_NODE_SHOW_QNODES_STMT: case QUERY_NODE_SHOW_FUNCTIONS_STMT: case QUERY_NODE_SHOW_INDEXES_STMT: From 84452c8deefc8ca600dcefdff907f6bc4608edcc Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Fri, 11 Aug 2023 16:36:53 +0800 Subject: [PATCH 092/122] cos: use static sdk --- cmake/curl_CMakeLists.txt.in | 2 +- source/dnode/vnode/CMakeLists.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index 5f1efc1e5a..856d42257a 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -9,7 +9,7 @@ ExternalProject_Add(curl BUILD_IN_SOURCE TRUE BUILD_ALWAYS 1 #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1 --without-ssl --enable-shared=no + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1 --without-ssl --enable-shared=no --disable-ldap --disable-ldaps #CONFIGURE_COMMAND ./configure --without-ssl BUILD_COMMAND make INSTALL_COMMAND make install diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index 684134c2d6..a07e38e53b 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -162,7 +162,7 @@ target_link_libraries( PUBLIC index # s3 - cos_c_sdk + cos_c_sdk_static ${APR_UTIL_LIBRARY} ${APR_LIBRARY} ${MINIXML_LIBRARY} From 6530658815346945aaa333326ab72f54067182c4 Mon Sep 17 00:00:00 2001 From: slzhou Date: Fri, 11 Aug 2023 17:05:59 +0800 Subject: [PATCH 093/122] fix: continue coding --- source/libs/executor/src/operator.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/executor/src/operator.c b/source/libs/executor/src/operator.c index d0805a86e4..7f0c5baa36 100644 --- a/source/libs/executor/src/operator.c +++ b/source/libs/executor/src/operator.c @@ -373,7 +373,7 @@ SOperatorInfo* createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SR STagScanPhysiNode* pTagScanPhyNode = (STagScanPhysiNode*)pPhyNode; STableListInfo* pTableListInfo = tableListCreate(); if (!pTagScanPhyNode->onlyMetaCtbIdx) { - int32_t code = createScanTableListInfo(pTagScanPhyNode, NULL, false, pHandle, pTableListInfo, pTagCond, + int32_t code = createScanTableListInfo((SScanPhysiNode*)pTagScanPhyNode, NULL, false, pHandle, pTableListInfo, pTagCond, pTagIndexCond, pTaskInfo); if (code != TSDB_CODE_SUCCESS) { pTaskInfo->code = code; From e1971cf0a023e1f8e155a27e0fc1e92e2c56b415 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Fri, 11 Aug 2023 17:18:38 +0800 Subject: [PATCH 094/122] curl: disable brotli with static lib --- cmake/curl_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index 856d42257a..0fe0c2256f 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -9,7 +9,7 @@ ExternalProject_Add(curl BUILD_IN_SOURCE TRUE BUILD_ALWAYS 1 #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1 --without-ssl --enable-shared=no --disable-ldap --disable-ldaps + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1 --without-ssl --enable-shared=no --disable-ldap --disable-ldaps --without-brotli #CONFIGURE_COMMAND ./configure --without-ssl BUILD_COMMAND make INSTALL_COMMAND make install From 7b22d37f123234d1c1fbed53cb1be45d72922635 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Fri, 11 Aug 2023 17:25:43 +0800 Subject: [PATCH 095/122] fix:lost data when consumer db --- include/libs/wal/wal.h | 7 +- source/dnode/vnode/src/inc/tq.h | 2 +- source/dnode/vnode/src/tq/tqRead.c | 44 ++++----- source/dnode/vnode/src/tq/tqUtil.c | 12 +-- source/libs/wal/src/walRead.c | 142 ++++------------------------- 5 files changed, 39 insertions(+), 168 deletions(-) diff --git a/include/libs/wal/wal.h b/include/libs/wal/wal.h index b19a0d783d..1f7323a06a 100644 --- a/include/libs/wal/wal.h +++ b/include/libs/wal/wal.h @@ -153,7 +153,6 @@ struct SWalReader { int64_t capacity; TdThreadMutex mutex; SWalFilterCond cond; - // TODO remove it SWalCkHead *pHead; }; @@ -208,9 +207,9 @@ void walReaderVerifyOffset(SWalReader *pWalReader, STqOffsetVal* pOffset) // only for tq usage void walSetReaderCapacity(SWalReader *pRead, int32_t capacity); -int32_t walFetchHead(SWalReader *pRead, int64_t ver, SWalCkHead *pHead); -int32_t walFetchBody(SWalReader *pRead, SWalCkHead **ppHead); -int32_t walSkipFetchBody(SWalReader *pRead, const SWalCkHead *pHead); +int32_t walFetchHead(SWalReader *pRead, int64_t ver); +int32_t walFetchBody(SWalReader *pRead); +int32_t walSkipFetchBody(SWalReader *pRead); void walRefFirstVer(SWal *, SWalRef *); void walRefLastVer(SWal *, SWalRef *); diff --git a/source/dnode/vnode/src/inc/tq.h b/source/dnode/vnode/src/inc/tq.h index a6a84075b5..f08c308185 100644 --- a/source/dnode/vnode/src/inc/tq.h +++ b/source/dnode/vnode/src/inc/tq.h @@ -127,7 +127,7 @@ void tqDestroyTqHandle(void* data); // tqRead int32_t tqScanTaosx(STQ* pTq, const STqHandle* pHandle, STaosxRsp* pRsp, SMqMetaRsp* pMetaRsp, STqOffsetVal* offset); int32_t tqScanData(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVal* pOffset); -int32_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalCkHead** pHeadWithCkSum, uint64_t reqId); +int32_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, uint64_t reqId); // tqExec int32_t tqTaosxScanLog(STQ* pTq, STqHandle* pHandle, SPackedData submit, STaosxRsp* pRsp, int32_t* totalRows); diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index 9b8f1781cb..6c091fa4cb 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -184,50 +184,42 @@ end: return tbSuid == realTbSuid; } -int32_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalCkHead** ppCkHead, uint64_t reqId) { - int32_t code = 0; +int32_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, uint64_t reqId) { + int32_t code = -1; int32_t vgId = TD_VID(pTq->pVnode); - taosThreadMutexLock(&pHandle->pWalReader->mutex); int64_t offset = *fetchOffset; + int64_t lastVer = walGetLastVer(pHandle->pWalReader->pWal); + int64_t committedVer = walGetCommittedVer(pHandle->pWalReader->pWal); + int64_t appliedVer = walGetAppliedVer(pHandle->pWalReader->pWal); - while (1) { - if (walFetchHead(pHandle->pWalReader, offset, *ppCkHead) < 0) { + wDebug("vgId:%d, wal start to fetch, index:%" PRId64 ", last index:%" PRId64 " commit index:%" PRId64 ", applied index:%" PRId64, + vgId, offset, lastVer, committedVer, appliedVer); + + while (offset <= appliedVer) { + if (walFetchHead(pHandle->pWalReader, offset) < 0) { tqDebug("tmq poll: consumer:0x%" PRIx64 ", (epoch %d) vgId:%d offset %" PRId64 ", no more log to return, reqId:0x%" PRIx64, pHandle->consumerId, pHandle->epoch, vgId, offset, reqId); - *fetchOffset = offset; - code = -1; goto END; } tqDebug("vgId:%d, consumer:0x%" PRIx64 " taosx get msg ver %" PRId64 ", type: %s, reqId:0x%" PRIx64, vgId, - pHandle->consumerId, offset, TMSG_INFO((*ppCkHead)->head.msgType), reqId); + pHandle->consumerId, offset, TMSG_INFO(pHandle->pWalReader->pHead->head.msgType), reqId); - if ((*ppCkHead)->head.msgType == TDMT_VND_SUBMIT) { - code = walFetchBody(pHandle->pWalReader, ppCkHead); - - if (code < 0) { - *fetchOffset = offset; - code = -1; - goto END; - } - *fetchOffset = offset; - code = 0; + if (pHandle->pWalReader->pHead->head.msgType == TDMT_VND_SUBMIT) { + code = walFetchBody(pHandle->pWalReader); goto END; } else { if (pHandle->fetchMeta != WITH_DATA) { - SWalCont* pHead = &((*ppCkHead)->head); + SWalCont* pHead = &(pHandle->pWalReader->pHead->head); if (IS_META_MSG(pHead->msgType) && !(pHead->msgType == TDMT_VND_DELETE && pHandle->fetchMeta == ONLY_META)) { - code = walFetchBody(pHandle->pWalReader, ppCkHead); + code = walFetchBody(pHandle->pWalReader); if (code < 0) { - *fetchOffset = offset; - code = -1; goto END; } if (isValValidForTable(pHandle, pHead)) { - *fetchOffset = offset; code = 0; goto END; } else { @@ -236,10 +228,8 @@ int32_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalCkHea } } } - code = walSkipFetchBody(pHandle->pWalReader, *ppCkHead); + code = walSkipFetchBody(pHandle->pWalReader); if (code < 0) { - *fetchOffset = offset; - code = -1; goto END; } offset++; @@ -247,7 +237,7 @@ int32_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalCkHea } END: - taosThreadMutexUnlock(&pHandle->pWalReader->mutex); + *fetchOffset = offset; return code; } diff --git a/source/dnode/vnode/src/tq/tqUtil.c b/source/dnode/vnode/src/tq/tqUtil.c index 5cbca6e0f2..42aac52c63 100644 --- a/source/dnode/vnode/src/tq/tqUtil.c +++ b/source/dnode/vnode/src/tq/tqUtil.c @@ -179,7 +179,6 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle, SRpcMsg* pMsg, STqOffsetVal* offset) { int code = 0; int32_t vgId = TD_VID(pTq->pVnode); - SWalCkHead* pCkHead = NULL; SMqMetaRsp metaRsp = {0}; STaosxRsp taosxRsp = {0}; tqInitTaosxRsp(&taosxRsp, *offset); @@ -216,12 +215,6 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle, if (offset->type == TMQ_OFFSET__LOG) { walReaderVerifyOffset(pHandle->pWalReader, offset); int64_t fetchVer = offset->version; - pCkHead = taosMemoryMalloc(sizeof(SWalCkHead) + 2048); - if (pCkHead == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - code = -1; - goto end; - } walSetReaderCapacity(pHandle->pWalReader, 2048); int totalRows = 0; @@ -234,14 +227,14 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle, break; } - if (tqFetchLog(pTq, pHandle, &fetchVer, &pCkHead, pRequest->reqId) < 0) { + if (tqFetchLog(pTq, pHandle, &fetchVer, pRequest->reqId) < 0) { tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer); // setRequestVersion(&taosxRsp.reqOffset, offset->version); code = tqSendDataRsp(pHandle, pMsg, pRequest, (SMqDataRsp*)&taosxRsp, TMQ_MSG_TYPE__POLL_DATA_RSP, vgId); goto end; } - SWalCont* pHead = &pCkHead->head; + SWalCont* pHead = &pHandle->pWalReader->pHead->head; tqDebug("tmq poll: consumer:0x%" PRIx64 " (epoch %d) iter log, vgId:%d offset %" PRId64 " msgType %d", pRequest->consumerId, pRequest->epoch, vgId, fetchVer, pHead->msgType); @@ -291,7 +284,6 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle, end: tDeleteSTaosxRsp(&taosxRsp); - taosMemoryFreeClear(pCkHead); return code; } diff --git a/source/libs/wal/src/walRead.c b/source/libs/wal/src/walRead.c index 54b9576eb1..01404494e3 100644 --- a/source/libs/wal/src/walRead.c +++ b/source/libs/wal/src/walRead.c @@ -16,10 +16,6 @@ #include "taoserror.h" #include "walInt.h" -static int32_t walFetchHeadNew(SWalReader *pRead, int64_t fetchVer); -static int32_t walFetchBodyNew(SWalReader *pRead); -static int32_t walSkipFetchBodyNew(SWalReader *pRead); - SWalReader *walOpenReader(SWal *pWal, SWalFilterCond *cond) { SWalReader *pReader = taosMemoryCalloc(1, sizeof(SWalReader)); if (pReader == NULL) { @@ -80,19 +76,19 @@ int32_t walNextValidMsg(SWalReader *pReader) { return -1; } while (fetchVer <= appliedVer) { - if (walFetchHeadNew(pReader, fetchVer) < 0) { + if (walFetchHead(pReader, fetchVer) < 0) { return -1; } int32_t type = pReader->pHead->head.msgType; if (type == TDMT_VND_SUBMIT || ((type == TDMT_VND_DELETE) && (pReader->cond.deleteMsg == 1)) || (IS_META_MSG(type) && pReader->cond.scanMeta)) { - if (walFetchBodyNew(pReader) < 0) { + if (walFetchBody(pReader) < 0) { return -1; } return 0; } else { - if (walSkipFetchBodyNew(pReader) < 0) { + if (walSkipFetchBody(pReader) < 0) { return -1; } @@ -256,102 +252,7 @@ int32_t walReaderSeekVer(SWalReader *pReader, int64_t ver) { void walSetReaderCapacity(SWalReader *pRead, int32_t capacity) { pRead->capacity = capacity; } -static int32_t walFetchHeadNew(SWalReader *pRead, int64_t fetchVer) { - int64_t contLen; - bool seeked = false; - - wDebug("vgId:%d, wal starts to fetch head, index:%" PRId64, pRead->pWal->cfg.vgId, fetchVer); - - if (pRead->curVersion != fetchVer) { - if (walReaderSeekVer(pRead, fetchVer) < 0) { - return -1; - } - seeked = true; - } - - while (1) { - contLen = taosReadFile(pRead->pLogFile, pRead->pHead, sizeof(SWalCkHead)); - if (contLen == sizeof(SWalCkHead)) { - break; - } else if (contLen == 0 && !seeked) { - if(walReadSeekVerImpl(pRead, fetchVer) < 0){ - return -1; - } - seeked = true; - continue; - } else { - if (contLen < 0) { - terrno = TAOS_SYSTEM_ERROR(errno); - } else { - terrno = TSDB_CODE_WAL_FILE_CORRUPTED; - } - return -1; - } - } -// pRead->curInvalid = 0; - return 0; -} - -static int32_t walFetchBodyNew(SWalReader *pReader) { - SWalCont *pReadHead = &pReader->pHead->head; - int64_t ver = pReadHead->version; - - wDebug("vgId:%d, wal starts to fetch body, ver:%" PRId64 " ,len:%d, total", pReader->pWal->cfg.vgId, ver, - pReadHead->bodyLen); - - if (pReader->capacity < pReadHead->bodyLen) { - SWalCkHead *ptr = (SWalCkHead *)taosMemoryRealloc(pReader->pHead, sizeof(SWalCkHead) + pReadHead->bodyLen); - if (ptr == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - return -1; - } - - pReader->pHead = ptr; - pReadHead = &pReader->pHead->head; - pReader->capacity = pReadHead->bodyLen; - } - - if (pReadHead->bodyLen != taosReadFile(pReader->pLogFile, pReadHead->body, pReadHead->bodyLen)) { - if (pReadHead->bodyLen < 0) { - terrno = TAOS_SYSTEM_ERROR(errno); - wError("vgId:%d, wal fetch body error:%" PRId64 ", read request index:%" PRId64 ", since %s", - pReader->pWal->cfg.vgId, pReader->pHead->head.version, ver, tstrerror(terrno)); - } else { - wError("vgId:%d, wal fetch body error:%" PRId64 ", read request index:%" PRId64 ", since file corrupted", - pReader->pWal->cfg.vgId, pReader->pHead->head.version, ver); - terrno = TSDB_CODE_WAL_FILE_CORRUPTED; - } - return -1; - } - - if (walValidBodyCksum(pReader->pHead) != 0) { - wError("vgId:%d, wal fetch body error:%" PRId64 ", since body checksum not passed", pReader->pWal->cfg.vgId, ver); - terrno = TSDB_CODE_WAL_FILE_CORRUPTED; - return -1; - } - - wDebug("vgId:%d, index:%" PRId64 " is fetched, type:%d, cursor advance", pReader->pWal->cfg.vgId, ver, pReader->pHead->head.msgType); - pReader->curVersion = ver + 1; - return 0; -} - -static int32_t walSkipFetchBodyNew(SWalReader *pRead) { - int64_t code; - - code = taosLSeekFile(pRead->pLogFile, pRead->pHead->head.bodyLen, SEEK_CUR); - if (code < 0) { - terrno = TAOS_SYSTEM_ERROR(errno); -// pRead->curInvalid = 1; - return -1; - } - - pRead->curVersion++; - wDebug("vgId:%d, version advance to %" PRId64 ", skip fetch", pRead->pWal->cfg.vgId, pRead->curVersion); - - return 0; -} - -int32_t walFetchHead(SWalReader *pRead, int64_t ver, SWalCkHead *pHead) { +int32_t walFetchHead(SWalReader *pRead, int64_t ver) { int64_t code; int64_t contLen; bool seeked = false; @@ -369,15 +270,13 @@ int32_t walFetchHead(SWalReader *pRead, int64_t ver, SWalCkHead *pHead) { if (pRead->curVersion != ver) { code = walReaderSeekVer(pRead, ver); if (code < 0) { -// pRead->curVersion = ver; -// pRead->curInvalid = 1; return -1; } seeked = true; } while (1) { - contLen = taosReadFile(pRead->pLogFile, pHead, sizeof(SWalCkHead)); + contLen = taosReadFile(pRead->pLogFile, pRead->pHead, sizeof(SWalCkHead)); if (contLen == sizeof(SWalCkHead)) { break; } else if (contLen == 0 && !seeked) { @@ -392,12 +291,11 @@ int32_t walFetchHead(SWalReader *pRead, int64_t ver, SWalCkHead *pHead) { } else { terrno = TSDB_CODE_WAL_FILE_CORRUPTED; } -// pRead->curInvalid = 1; return -1; } } - code = walValidHeadCksum(pHead); + code = walValidHeadCksum(pRead->pHead); if (code != 0) { wError("vgId:%d, unexpected wal log index:%" PRId64 ", since head checksum not passed", pRead->pWal->cfg.vgId, ver); @@ -405,32 +303,27 @@ int32_t walFetchHead(SWalReader *pRead, int64_t ver, SWalCkHead *pHead) { return -1; } -// pRead->curInvalid = 0; return 0; } -int32_t walSkipFetchBody(SWalReader *pRead, const SWalCkHead *pHead) { - int64_t code; - +int32_t walSkipFetchBody(SWalReader *pRead) { wDebug("vgId:%d, skip fetch body %" PRId64 ", first ver:%" PRId64 ", commit ver:%" PRId64 ", last ver:%" PRId64 ", applied ver:%" PRId64, - pRead->pWal->cfg.vgId, pHead->head.version, pRead->pWal->vers.firstVer, pRead->pWal->vers.commitVer, + pRead->pWal->cfg.vgId, pRead->pHead->head.version, pRead->pWal->vers.firstVer, pRead->pWal->vers.commitVer, pRead->pWal->vers.lastVer, pRead->pWal->vers.appliedVer); - code = taosLSeekFile(pRead->pLogFile, pHead->head.bodyLen, SEEK_CUR); + int64_t code = taosLSeekFile(pRead->pLogFile, pRead->pHead->head.bodyLen, SEEK_CUR); if (code < 0) { terrno = TAOS_SYSTEM_ERROR(errno); -// pRead->curInvalid = 1; return -1; } pRead->curVersion++; - return 0; } -int32_t walFetchBody(SWalReader *pRead, SWalCkHead **ppHead) { - SWalCont *pReadHead = &((*ppHead)->head); +int32_t walFetchBody(SWalReader *pRead) { + SWalCont *pReadHead = &pRead->pHead->head; int64_t ver = pReadHead->version; wDebug("vgId:%d, fetch body %" PRId64 ", first ver:%" PRId64 ", commit ver:%" PRId64 ", last ver:%" PRId64 @@ -439,13 +332,13 @@ int32_t walFetchBody(SWalReader *pRead, SWalCkHead **ppHead) { pRead->pWal->vers.appliedVer); if (pRead->capacity < pReadHead->bodyLen) { - SWalCkHead *ptr = (SWalCkHead *)taosMemoryRealloc(*ppHead, sizeof(SWalCkHead) + pReadHead->bodyLen); + SWalCkHead *ptr = (SWalCkHead *)taosMemoryRealloc(pRead->pHead, sizeof(SWalCkHead) + pReadHead->bodyLen); if (ptr == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; return -1; } - *ppHead = ptr; - pReadHead = &((*ppHead)->head); + pRead->pHead = ptr; + pReadHead = &pRead->pHead->head; pRead->capacity = pReadHead->bodyLen; } @@ -459,27 +352,24 @@ int32_t walFetchBody(SWalReader *pRead, SWalCkHead **ppHead) { pRead->pWal->cfg.vgId, pReadHead->version, ver); terrno = TSDB_CODE_WAL_FILE_CORRUPTED; } -// pRead->curInvalid = 1; return -1; } if (pReadHead->version != ver) { wError("vgId:%d, wal fetch body error, index:%" PRId64 ", read request index:%" PRId64, pRead->pWal->cfg.vgId, pReadHead->version, ver); -// pRead->curInvalid = 1; terrno = TSDB_CODE_WAL_FILE_CORRUPTED; return -1; } - if (walValidBodyCksum(*ppHead) != 0) { + if (walValidBodyCksum(pRead->pHead) != 0) { wError("vgId:%d, wal fetch body error, index:%" PRId64 ", since body checksum not passed", pRead->pWal->cfg.vgId, ver); -// pRead->curInvalid = 1; terrno = TSDB_CODE_WAL_FILE_CORRUPTED; return -1; } - pRead->curVersion = ver + 1; + pRead->curVersion++; return 0; } From 47d2f9ad6df4385e0cb912841204e94cff1d4ed0 Mon Sep 17 00:00:00 2001 From: slzhou Date: Fri, 11 Aug 2023 17:52:52 +0800 Subject: [PATCH 096/122] fix: first run without tag cond --- source/dnode/vnode/src/meta/metaQuery.c | 4 ++-- source/dnode/vnode/src/vnd/vnodeQuery.c | 6 +++--- source/libs/executor/src/operator.c | 1 + source/libs/executor/src/scanoperator.c | 15 +++++++++++---- 4 files changed, 17 insertions(+), 9 deletions(-) diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c index 31c7bc8500..39c3dfa080 100644 --- a/source/dnode/vnode/src/meta/metaQuery.c +++ b/source/dnode/vnode/src/meta/metaQuery.c @@ -427,7 +427,7 @@ SMCtbCursor *metaOpenCtbCursor(void* pVnode, tb_uid_t uid, int lock) { metaRLock(pMeta); } - ret = tdbTbcOpen(pMeta->pCtbIdx, &pCtbCur->pCur, NULL); + ret = tdbTbcOpen(pMeta->pCtbIdx, (TBC**)&pCtbCur->pCur, NULL); if (ret < 0) { metaULock(pMeta); taosMemoryFree(pCtbCur); @@ -1365,7 +1365,7 @@ int32_t metaGetTableTagsByUids(void *pVnode, int64_t suid, SArray *uidList) { } int32_t metaGetTableTags(void *pVnode, uint64_t suid, SArray *pUidTagInfo) { - SMCtbCursor *pCur = metaOpenCtbCursor(((SVnode *)pVnode)->pMeta, suid, 1); + SMCtbCursor *pCur = metaOpenCtbCursor(pVnode, suid, 1); // If len > 0 means there already have uids, and we only want the // tags of the specified tables, of which uid in the uid list. Otherwise, all table tags are retrieved and kept diff --git a/source/dnode/vnode/src/vnd/vnodeQuery.c b/source/dnode/vnode/src/vnd/vnodeQuery.c index 51f4cee40c..48f8ec021d 100644 --- a/source/dnode/vnode/src/vnd/vnodeQuery.c +++ b/source/dnode/vnode/src/vnd/vnodeQuery.c @@ -440,7 +440,7 @@ int32_t vnodeGetTableList(void* pVnode, int8_t type, SArray* pList) { } int32_t vnodeGetAllTableList(SVnode *pVnode, uint64_t uid, SArray *list) { - SMCtbCursor *pCur = metaOpenCtbCursor(pVnode->pMeta, uid, 1); + SMCtbCursor *pCur = metaOpenCtbCursor(pVnode, uid, 1); while (1) { tb_uid_t id = metaCtbCursorNext(pCur); @@ -462,7 +462,7 @@ int32_t vnodeGetCtbIdListByFilter(SVnode *pVnode, int64_t suid, SArray *list, bo int32_t vnodeGetCtbIdList(void *pVnode, int64_t suid, SArray *list) { SVnode *pVnodeObj = pVnode; - SMCtbCursor *pCur = metaOpenCtbCursor(pVnodeObj->pMeta, suid, 1); + SMCtbCursor *pCur = metaOpenCtbCursor(pVnodeObj, suid, 1); while (1) { tb_uid_t id = metaCtbCursorNext(pCur); @@ -521,7 +521,7 @@ int32_t vnodeGetStbIdListByFilter(SVnode *pVnode, int64_t suid, SArray *list, bo } int32_t vnodeGetCtbNum(SVnode *pVnode, int64_t suid, int64_t *num) { - SMCtbCursor *pCur = metaOpenCtbCursor(pVnode->pMeta, suid, 0); + SMCtbCursor *pCur = metaOpenCtbCursor(pVnode, suid, 0); if (!pCur) { return TSDB_CODE_FAILED; } diff --git a/source/libs/executor/src/operator.c b/source/libs/executor/src/operator.c index 7f0c5baa36..31998d13b6 100644 --- a/source/libs/executor/src/operator.c +++ b/source/libs/executor/src/operator.c @@ -371,6 +371,7 @@ SOperatorInfo* createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SR pOperator = createTableCountScanOperatorInfo(pHandle, pTblCountScanNode, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN == type) { STagScanPhysiNode* pTagScanPhyNode = (STagScanPhysiNode*)pPhyNode; + pTagScanPhyNode->onlyMetaCtbIdx = true; STableListInfo* pTableListInfo = tableListCreate(); if (!pTagScanPhyNode->onlyMetaCtbIdx) { int32_t code = createScanTableListInfo((SScanPhysiNode*)pTagScanPhyNode, NULL, false, pHandle, pTableListInfo, pTagCond, diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 107ea14914..5f8bf03d80 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2922,7 +2922,8 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { SArray* aFilterIdxs = taosArrayInit(pOperator->resultInfo.capacity, sizeof(int32_t)); while (1) { - while (count < pOperator->resultInfo.capacity) { + int32_t numTables = 0; + while (numTables < pOperator->resultInfo.capacity) { SMCtbCursor* pCur = pInfo->pCtbCursor; tb_uid_t uid = pAPI->metaFn.ctbCursorNext(pInfo->pCtbCursor); if (uid == 0) { @@ -2932,14 +2933,19 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { info.pTagVal = taosMemoryMalloc(pCur->vLen); memcpy(info.pTagVal, pCur->pVal, pCur->vLen); taosArrayPush(aUidTags, &info); + ++numTables; } - int32_t numTables = taosArrayGetSize(aUidTags); if (numTables == 0) { break; } - - tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, pInfo->readHandle.vnode, aFilterIdxs, pAPI); + if (pInfo->pTagCond != NULL) { + tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, pInfo->readHandle.vnode, aFilterIdxs, pAPI); + } else { + for (int i = 0; i < numTables; ++i) { + taosArrayPush(aFilterIdxs, &i); + } + } tagScanFillResultBlock(pOperator, pRes, aUidTags, aFilterIdxs, pAPI); count = taosArrayGetSize(aFilterIdxs); @@ -2955,6 +2961,7 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { taosArrayDestroy(aFilterIdxs); taosArrayDestroyEx(aUidTags, tagScanFreeUidTag); + pRes->info.rows = count; pOperator->resultInfo.totalRows += count; return (pRes->info.rows == 0) ? NULL : pInfo->pRes; } From 767563752a033eb0cc18f508eb75c802204189d4 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Fri, 11 Aug 2023 20:21:13 +0800 Subject: [PATCH 097/122] opti:wal logic --- source/dnode/vnode/src/tq/tqRead.c | 2 ++ source/dnode/vnode/src/tq/tqUtil.c | 1 - 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index 6c091fa4cb..e74146bab2 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -224,6 +224,7 @@ int32_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, uint64_t goto END; } else { offset++; + code = -1; continue; } } @@ -234,6 +235,7 @@ int32_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, uint64_t } offset++; } + code = -1; } END: diff --git a/source/dnode/vnode/src/tq/tqUtil.c b/source/dnode/vnode/src/tq/tqUtil.c index 42aac52c63..b7fd505784 100644 --- a/source/dnode/vnode/src/tq/tqUtil.c +++ b/source/dnode/vnode/src/tq/tqUtil.c @@ -216,7 +216,6 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle, walReaderVerifyOffset(pHandle->pWalReader, offset); int64_t fetchVer = offset->version; - walSetReaderCapacity(pHandle->pWalReader, 2048); int totalRows = 0; while (1) { int32_t savedEpoch = atomic_load_32(&pHandle->epoch); From f1baed79b4eb6f136b814204fff906ac0b5969cf Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Fri, 11 Aug 2023 20:22:51 +0800 Subject: [PATCH 098/122] opti:rm useless code --- include/libs/wal/wal.h | 1 - source/libs/wal/src/walRead.c | 1 - 2 files changed, 2 deletions(-) diff --git a/include/libs/wal/wal.h b/include/libs/wal/wal.h index 1f7323a06a..cfe70a186c 100644 --- a/include/libs/wal/wal.h +++ b/include/libs/wal/wal.h @@ -206,7 +206,6 @@ void walReaderValidVersionRange(SWalReader *pReader, int64_t *sver, int64 void walReaderVerifyOffset(SWalReader *pWalReader, STqOffsetVal* pOffset); // only for tq usage -void walSetReaderCapacity(SWalReader *pRead, int32_t capacity); int32_t walFetchHead(SWalReader *pRead, int64_t ver); int32_t walFetchBody(SWalReader *pRead); int32_t walSkipFetchBody(SWalReader *pRead); diff --git a/source/libs/wal/src/walRead.c b/source/libs/wal/src/walRead.c index 01404494e3..d9e43e4324 100644 --- a/source/libs/wal/src/walRead.c +++ b/source/libs/wal/src/walRead.c @@ -250,7 +250,6 @@ int32_t walReaderSeekVer(SWalReader *pReader, int64_t ver) { return 0; } -void walSetReaderCapacity(SWalReader *pRead, int32_t capacity) { pRead->capacity = capacity; } int32_t walFetchHead(SWalReader *pRead, int64_t ver) { int64_t code; From edd2fa4f351c3c3434c2143822b0a188a1c305d1 Mon Sep 17 00:00:00 2001 From: slzhou Date: Sat, 12 Aug 2023 08:17:43 +0800 Subject: [PATCH 099/122] fix: pass compilation and simple test --- source/libs/executor/src/scanoperator.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 5f8bf03d80..ac20bae167 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2940,7 +2940,7 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { break; } if (pInfo->pTagCond != NULL) { - tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, pInfo->readHandle.vnode, aFilterIdxs, pAPI); + tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, aFilterIdxs, pInfo->readHandle.vnode, pAPI); } else { for (int i = 0; i < numTables; ++i) { taosArrayPush(aFilterIdxs, &i); From f83bfec067deb4de1ab9e98434094f0d0e20a8cf Mon Sep 17 00:00:00 2001 From: slzhou Date: Sat, 12 Aug 2023 08:28:25 +0800 Subject: [PATCH 100/122] fix: change only meta ctb idx back to false --- source/libs/executor/src/operator.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/executor/src/operator.c b/source/libs/executor/src/operator.c index 31998d13b6..abef8298e5 100644 --- a/source/libs/executor/src/operator.c +++ b/source/libs/executor/src/operator.c @@ -371,7 +371,7 @@ SOperatorInfo* createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SR pOperator = createTableCountScanOperatorInfo(pHandle, pTblCountScanNode, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN == type) { STagScanPhysiNode* pTagScanPhyNode = (STagScanPhysiNode*)pPhyNode; - pTagScanPhyNode->onlyMetaCtbIdx = true; + pTagScanPhyNode->onlyMetaCtbIdx = false; STableListInfo* pTableListInfo = tableListCreate(); if (!pTagScanPhyNode->onlyMetaCtbIdx) { int32_t code = createScanTableListInfo((SScanPhysiNode*)pTagScanPhyNode, NULL, false, pHandle, pTableListInfo, pTagCond, From efb472d54af8a8046245757c522ff91f3c3339d0 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Sat, 12 Aug 2023 10:15:40 +0800 Subject: [PATCH 101/122] fix:heap use after free --- source/dnode/vnode/src/tq/tqRead.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index e74146bab2..252a0642fa 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -219,6 +219,7 @@ int32_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, uint64_t goto END; } + pHead = &(pHandle->pWalReader->pHead->head); if (isValValidForTable(pHandle, pHead)) { code = 0; goto END; From c4bd52d434c2e12f59d869857083775903d97ce3 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Sun, 13 Aug 2023 18:14:22 +0800 Subject: [PATCH 102/122] fix:disable test case 5dnode3mnodeRoll --- tests/parallel_test/cases.task | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index c8b8ce87fd..a946a7feaf 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -456,7 +456,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py -N 6 -M 3 #,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py -N 6 -M 3 -n 3 ,,n,system-test,python3 ./test.py -f 6-cluster/manually-test/6dnode3mnodeInsertLessDataAlterRep3to1to3.py -N 6 -M 3 -,,n,system-test,python ./test.py -f 6-cluster/5dnode3mnodeRoll.py -N 3 -C 1 +#,,n,system-test,python ./test.py -f 6-cluster/5dnode3mnodeRoll.py -N 3 -C 1 ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 7 -M 3 -C 6 ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 7 -M 3 -C 6 -n 3 #,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeDrop.py -N 5 From 6688d70ba41400c83e9686e0ca6565dc861b1327 Mon Sep 17 00:00:00 2001 From: slzhou Date: Sun, 13 Aug 2023 18:46:55 +0800 Subject: [PATCH 103/122] fix: fix planner test error --- source/libs/nodes/src/nodesCodeFuncs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index 3540f8cb70..a2de0bc63a 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -6654,7 +6654,7 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) { case QUERY_NODE_LOGIC_PLAN: return logicPlanToJson(pObj, pJson); case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN: - return physiTableScanNodeToJson(pObj, pJson); + return physiTagScanNodeToJson(pObj, pJson); case QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN: return physiScanNodeToJson(pObj, pJson); case QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN: From 0aa35987ab91f1e41a8799eaee2570817dc270e6 Mon Sep 17 00:00:00 2001 From: haoranchen Date: Mon, 14 Aug 2023 09:45:04 +0800 Subject: [PATCH 104/122] Update 5dnode3mnodeRoll.py --- tests/system-test/6-cluster/5dnode3mnodeRoll.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/system-test/6-cluster/5dnode3mnodeRoll.py b/tests/system-test/6-cluster/5dnode3mnodeRoll.py index 38ac47f777..9d62eb3b4b 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeRoll.py +++ b/tests/system-test/6-cluster/5dnode3mnodeRoll.py @@ -27,7 +27,7 @@ import threading import time import json -BASEVERSION = "3.1.0.0" +BASEVERSION = "3.1.1.0" class TDTestCase: From c97b9249fc4e08667648d28ab577828ff8640dd7 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Mon, 14 Aug 2023 14:38:28 +0800 Subject: [PATCH 105/122] cos: only for linux --- cmake/cmake.options | 4 ++ contrib/CMakeLists.txt | 9 +++-- source/common/src/tglobal.c | 2 + source/dnode/vnode/CMakeLists.txt | 56 +++++++++++++++------------ source/dnode/vnode/src/vnd/vnodeCos.c | 14 +++++++ 5 files changed, 57 insertions(+), 28 deletions(-) diff --git a/cmake/cmake.options b/cmake/cmake.options index ea5efcb13a..1d4e9ba515 100644 --- a/cmake/cmake.options +++ b/cmake/cmake.options @@ -125,12 +125,16 @@ option( ON ) +IF(${TD_LINUX}) + option( BUILD_WITH_COS "If build with cos" ON ) +ENDIF () + option( BUILD_WITH_SQLITE "If build with sqlite" diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 452192a288..e3e48ac3a1 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -6,6 +6,8 @@ function(cat IN_FILE OUT_FILE) file(APPEND ${OUT_FILE} "${CONTENTS}") endfunction(cat IN_FILE OUT_FILE) +if(${TD_LINUX}) + set(CONTRIB_TMP_FILE3 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in3") configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) @@ -35,6 +37,8 @@ execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" . execute_process(COMMAND "${CMAKE_COMMAND}" --build . WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download") +endif(${TD_LINUX}) + set(CONTRIB_TMP_FILE "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in") configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) @@ -388,7 +392,7 @@ endif() # cos if(${BUILD_WITH_COS}) - if(NOT ${TD_WINDOWS}) + if(${TD_LINUX}) set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local.1) #ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a) option(ENABLE_TEST "Enable the tests" OFF) @@ -406,10 +410,9 @@ if(${BUILD_WITH_COS}) ) set(CMAKE_PROJECT_NAME ${ORIG_CMAKE_PROJECT_NAME}) - else() - endif(NOT ${TD_WINDOWS}) + endif(${TD_LINUX}) endif(${BUILD_WITH_COS}) # lucene diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 2447e02698..3595347db3 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -292,7 +292,9 @@ int32_t taosSetS3Cfg(SConfig *pCfg) { } } if (tsS3BucketName[0] != '<' && tsDiskCfgNum > 1) { +#ifdef USE_COS tsS3Enabled = true; +#endif } return 0; diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index a07e38e53b..052b6be37f 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -135,12 +135,6 @@ else() endif() endif() -set(CMAKE_FIND_LIBRARY_SUFFIXES ".a") -find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) -find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) -find_library(MINIXML_LIBRARY mxml) -find_library(CURL_LIBRARY curl) - target_link_libraries( vnode PUBLIC os @@ -160,28 +154,24 @@ target_link_libraries( PUBLIC transport PUBLIC stream PUBLIC index - - # s3 - cos_c_sdk_static - ${APR_UTIL_LIBRARY} - ${APR_LIBRARY} - ${MINIXML_LIBRARY} - ${CURL_LIBRARY} ) -IF (TD_GRANT) - TARGET_LINK_LIBRARIES(vnode PUBLIC grant) -ENDIF () +if(${TD_LINUX}) +set(CMAKE_FIND_LIBRARY_SUFFIXES ".a") +find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) +find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) +find_library(MINIXML_LIBRARY mxml) +find_library(CURL_LIBRARY curl) +target_link_libraries( + vnode -target_compile_definitions(vnode PUBLIC -DMETA_REFACT) - -if(${BUILD_WITH_INVERTEDINDEX}) - add_definitions(-DUSE_INVERTED_INDEX) -endif(${BUILD_WITH_INVERTEDINDEX}) - -if(${BUILD_WITH_ROCKSDB}) - add_definitions(-DUSE_ROCKSDB) -endif(${BUILD_WITH_ROCKSDB}) + # s3 + PUBLIC cos_c_sdk_static + PUBLIC ${APR_UTIL_LIBRARY} + PUBLIC ${APR_LIBRARY} + PUBLIC ${MINIXML_LIBRARY} + PUBLIC ${CURL_LIBRARY} +) # s3 FIND_PROGRAM(APR_CONFIG_BIN NAMES apr-config apr-1-config PATHS /usr/bin /usr/local/bin /usr/local/apr/bin/) @@ -199,6 +189,22 @@ target_include_directories( PUBLIC "$ENV{HOME}/.cos-local.1/include" ) +endif(${TD_LINUX}) + +IF (TD_GRANT) + TARGET_LINK_LIBRARIES(vnode PUBLIC grant) +ENDIF () + +target_compile_definitions(vnode PUBLIC -DMETA_REFACT) + +if(${BUILD_WITH_INVERTEDINDEX}) + add_definitions(-DUSE_INVERTED_INDEX) +endif(${BUILD_WITH_INVERTEDINDEX}) + +if(${BUILD_WITH_ROCKSDB}) + add_definitions(-DUSE_ROCKSDB) +endif(${BUILD_WITH_ROCKSDB}) + if(${BUILD_TEST}) add_subdirectory(test) endif(${BUILD_TEST}) diff --git a/source/dnode/vnode/src/vnd/vnodeCos.c b/source/dnode/vnode/src/vnd/vnodeCos.c index a40e046972..52d5fc2b1b 100644 --- a/source/dnode/vnode/src/vnd/vnodeCos.c +++ b/source/dnode/vnode/src/vnd/vnodeCos.c @@ -12,6 +12,7 @@ extern char tsS3AccessKeySecret[]; extern char tsS3BucketName[]; extern char tsS3AppId[]; +#ifdef USE_COS int32_t s3Init() { if (cos_http_io_initialize(NULL, 0) != COSE_OK) { return -1; @@ -294,3 +295,16 @@ long s3Size(const char *object_name) { return size; } + +#else + +int32_t s3Init() { return 0; } +void s3CleanUp() {} +void s3PutObjectFromFile(const char *file, const char *object) {} +void s3DeleteObjects(const char *object_name[], int nobject) {} +bool s3Exists(const char *object_name) { return false; } +bool s3Get(const char *object_name, const char *path) { return false; } +void s3EvictCache(const char *path, long object_size) {} +long s3Size(const char *object_name) { return 0; } + +#endif From 989abc2bf6264ea6c2837c3daa44027581405807 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Mon, 14 Aug 2023 15:03:17 +0800 Subject: [PATCH 106/122] vnode/cos: move includes into USE_COS --- source/dnode/vnode/src/vnd/vnodeCos.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/source/dnode/vnode/src/vnd/vnodeCos.c b/source/dnode/vnode/src/vnd/vnodeCos.c index 52d5fc2b1b..b28b7ad747 100644 --- a/source/dnode/vnode/src/vnd/vnodeCos.c +++ b/source/dnode/vnode/src/vnd/vnodeCos.c @@ -2,10 +2,6 @@ #include "vndCos.h" -#include "cos_api.h" -#include "cos_http_io.h" -#include "cos_log.h" - extern char tsS3Endpoint[]; extern char tsS3AccessKeyId[]; extern char tsS3AccessKeySecret[]; @@ -13,6 +9,10 @@ extern char tsS3BucketName[]; extern char tsS3AppId[]; #ifdef USE_COS +#include "cos_api.h" +#include "cos_http_io.h" +#include "cos_log.h" + int32_t s3Init() { if (cos_http_io_initialize(NULL, 0) != COSE_OK) { return -1; From 26951294cfa76ef77a1b185346a9cf6637982967 Mon Sep 17 00:00:00 2001 From: haoranchen Date: Mon, 14 Aug 2023 15:10:18 +0800 Subject: [PATCH 107/122] Update cases.task --- tests/parallel_test/cases.task | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index a946a7feaf..0a65340976 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -456,7 +456,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py -N 6 -M 3 #,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py -N 6 -M 3 -n 3 ,,n,system-test,python3 ./test.py -f 6-cluster/manually-test/6dnode3mnodeInsertLessDataAlterRep3to1to3.py -N 6 -M 3 -#,,n,system-test,python ./test.py -f 6-cluster/5dnode3mnodeRoll.py -N 3 -C 1 +,,n,system-test,python3 ./test.py -f 6-cluster/5dnode3mnodeRoll.py -N 3 -C 1 ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 7 -M 3 -C 6 ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 7 -M 3 -C 6 -n 3 #,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeDrop.py -N 5 From 5c276fa547c0b5ba9a7a7332968ecc89f2b51105 Mon Sep 17 00:00:00 2001 From: wangjiaming0909 <604227650@qq.com> Date: Fri, 11 Aug 2023 17:15:17 +0800 Subject: [PATCH 108/122] fix: make kill query work for sysscanoperator --- source/libs/executor/inc/querytask.h | 2 +- source/libs/executor/inc/tsort.h | 3 ++- source/libs/executor/src/querytask.c | 2 +- source/libs/executor/src/scanoperator.c | 5 +++-- source/libs/executor/src/sysscanoperator.c | 5 +++++ source/libs/executor/src/tsort.c | 13 ++++++++++--- 6 files changed, 22 insertions(+), 8 deletions(-) diff --git a/source/libs/executor/inc/querytask.h b/source/libs/executor/inc/querytask.h index 7241b015a0..0742b9ba4c 100644 --- a/source/libs/executor/inc/querytask.h +++ b/source/libs/executor/inc/querytask.h @@ -99,7 +99,7 @@ struct SExecTaskInfo { void buildTaskId(uint64_t taskId, uint64_t queryId, char* dst); SExecTaskInfo* doCreateTask(uint64_t queryId, uint64_t taskId, int32_t vgId, EOPTR_EXEC_MODEL model, SStorageAPI* pAPI); void doDestroyTask(SExecTaskInfo* pTaskInfo); -bool isTaskKilled(SExecTaskInfo* pTaskInfo); +bool isTaskKilled(void* pTaskInfo); void setTaskKilled(SExecTaskInfo* pTaskInfo, int32_t rspCode); void setTaskStatus(SExecTaskInfo* pTaskInfo, int8_t status); int32_t createExecTaskInfo(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SReadHandle* pHandle, uint64_t taskId, diff --git a/source/libs/executor/inc/tsort.h b/source/libs/executor/inc/tsort.h index 57c8bce275..3180173ca7 100644 --- a/source/libs/executor/inc/tsort.h +++ b/source/libs/executor/inc/tsort.h @@ -191,7 +191,8 @@ int32_t getProperSortPageSize(size_t rowSize, uint32_t numOfCols); bool tsortIsClosed(SSortHandle* pHandle); void tsortSetClosed(SSortHandle* pHandle); -void setSingleTableMerge(SSortHandle* pHandle); +void tsortSetSingleTableMerge(SSortHandle* pHandle); +void tsortSetAbortCheckFn(SSortHandle* pHandle, bool (*checkFn)(void* param), void* param); #ifdef __cplusplus } diff --git a/source/libs/executor/src/querytask.c b/source/libs/executor/src/querytask.c index 22d171e74a..980ef1a61a 100644 --- a/source/libs/executor/src/querytask.c +++ b/source/libs/executor/src/querytask.c @@ -59,7 +59,7 @@ SExecTaskInfo* doCreateTask(uint64_t queryId, uint64_t taskId, int32_t vgId, EOP return pTaskInfo; } -bool isTaskKilled(SExecTaskInfo* pTaskInfo) { return (0 != pTaskInfo->code); } +bool isTaskKilled(void* pTaskInfo) { return (0 != ((SExecTaskInfo*)pTaskInfo)->code); } void setTaskKilled(SExecTaskInfo* pTaskInfo, int32_t rspCode) { pTaskInfo->code = rspCode; diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 71b0747be8..a3c5a76a7f 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2928,8 +2928,9 @@ int32_t startGroupTableMergeScan(SOperatorInfo* pOperator) { int32_t numOfBufPage = pInfo->sortBufSize / pInfo->bufPageSize; pInfo->pSortHandle = tsortCreateSortHandle(pInfo->pSortInfo, SORT_BLOCK_TS_MERGE, pInfo->bufPageSize, numOfBufPage, pInfo->pSortInputBlock, pTaskInfo->id.str, 0, 0, 0); - + tsortSetMergeLimit(pInfo->pSortHandle, mergeLimit); + tsortSetAbortCheckFn(pInfo->pSortHandle, isTaskKilled, pOperator->pTaskInfo); } tsortSetFetchRawDataFp(pInfo->pSortHandle, getBlockForTableMergeScan, NULL, NULL); @@ -2949,7 +2950,7 @@ int32_t startGroupTableMergeScan(SOperatorInfo* pOperator) { int32_t code = TSDB_CODE_SUCCESS; if (numOfTable == 1) { - setSingleTableMerge(pInfo->pSortHandle); + tsortSetSingleTableMerge(pInfo->pSortHandle); } else { code = tsortOpen(pInfo->pSortHandle); } diff --git a/source/libs/executor/src/sysscanoperator.c b/source/libs/executor/src/sysscanoperator.c index a1f83dda2f..9048dd43d7 100644 --- a/source/libs/executor/src/sysscanoperator.c +++ b/source/libs/executor/src/sysscanoperator.c @@ -1601,6 +1601,11 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) { SSysTableScanInfo* pInfo = pOperator->info; char dbName[TSDB_DB_NAME_LEN] = {0}; + if (isTaskKilled(pOperator->pTaskInfo)) { + setOperatorCompleted(pOperator); + return NULL; + } + blockDataCleanup(pInfo->pRes); const char* name = tNameGetTableName(&pInfo->name); diff --git a/source/libs/executor/src/tsort.c b/source/libs/executor/src/tsort.c index 1891e93c61..6c4a780dfb 100644 --- a/source/libs/executor/src/tsort.c +++ b/source/libs/executor/src/tsort.c @@ -71,12 +71,20 @@ struct SSortHandle { SMultiwayMergeTreeInfo* pMergeTree; bool singleTableMerge; + + bool (*abortCheckFn)(void* param); + void* abortCheckParam; }; -void setSingleTableMerge(SSortHandle* pHandle) { +void tsortSetSingleTableMerge(SSortHandle* pHandle) { pHandle->singleTableMerge = true; } +void tsortSetAbortCheckFn(SSortHandle *pHandle, bool (*checkFn)(void *), void* param) { + pHandle->abortCheckFn = checkFn; + pHandle->abortCheckParam = param; +} + static int32_t msortComparFn(const void* pLeft, const void* pRight, void* param); // | offset[0] | offset[1] |....| nullbitmap | data |...| @@ -726,11 +734,10 @@ static int32_t doInternalMergeSort(SSortHandle* pHandle) { SArray* pPageIdList = taosArrayInit(4, sizeof(int32_t)); while (1) { - if (tsortIsClosed(pHandle)) { + if (tsortIsClosed(pHandle) || (pHandle->abortCheckFn && pHandle->abortCheckFn(pHandle->abortCheckParam))) { code = terrno = TSDB_CODE_TSC_QUERY_CANCELLED; return code; } - SSDataBlock* pDataBlock = getSortedBlockDataInner(pHandle, &pHandle->cmpParam, numOfRows); if (pDataBlock == NULL) { break; From 57d1957dee1d8a738dfe0f2fe3efd9716433a280 Mon Sep 17 00:00:00 2001 From: slzhou Date: Mon, 14 Aug 2023 15:57:27 +0800 Subject: [PATCH 109/122] enhance: tag scan code refactoring --- source/libs/executor/inc/executorInt.h | 2 + source/libs/executor/src/scanoperator.c | 110 +++++++++--------------- 2 files changed, 43 insertions(+), 69 deletions(-) diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h index 2b25feabb3..cb066d809c 100644 --- a/source/libs/executor/inc/executorInt.h +++ b/source/libs/executor/inc/executorInt.h @@ -263,6 +263,8 @@ typedef struct STagScanInfo { void* pCtbCursor; SNode* pTagCond; SNode* pTagIndexCond; + SArray* aUidTags; // SArray + SArray* aFilterIdxs; // SArray SStorageAPI* pStorageAPI; } STagScanInfo; diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index ac20bae167..71352b1c6e 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2813,7 +2813,7 @@ static void tagScanFillOneCellWithTag(const STUidTagInfo* pUidTagInfo, SExprInfo // } else { // name is not retrieved during filter // pAPI->metaFn.getTableNameByUid(pVnode, pUidTagInfo->uid, str); // } - STR_TO_VARSTR(str, "zsl"); + STR_TO_VARSTR(str, "ctbidx"); colDataSetVal(pColInfo, rowIndex, str, false); } else { @@ -2841,66 +2841,32 @@ static void tagScanFillOneCellWithTag(const STUidTagInfo* pUidTagInfo, SExprInfo } } -static int32_t tagScanFillResultBlock(SOperatorInfo* pOperator, SSDataBlock* pRes, SArray* aUidTags, SArray* aFilterIdxs, +static int32_t tagScanFillResultBlock(SOperatorInfo* pOperator, SSDataBlock* pRes, SArray* aUidTags, SArray* aFilterIdxs, bool ignoreFilterIdx, SStorageAPI* pAPI) { STagScanInfo* pInfo = pOperator->info; SExprInfo* pExprInfo = &pOperator->exprSupp.pExprInfo[0]; - - size_t szTables = taosArrayGetSize(aFilterIdxs); - for (int i = 0; i < szTables; ++i) { - int32_t idx = *(int32_t*)taosArrayGet(aFilterIdxs, i); - STUidTagInfo* pUidTagInfo = taosArrayGet(aUidTags, idx); - for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) { - SColumnInfoData* pDst = taosArrayGet(pRes->pDataBlock, pExprInfo[j].base.resSchema.slotId); - tagScanFillOneCellWithTag(pUidTagInfo, &pExprInfo[j], pDst, i, pAPI, pInfo->readHandle.vnode); + if (!ignoreFilterIdx) { + size_t szTables = taosArrayGetSize(aFilterIdxs); + for (int i = 0; i < szTables; ++i) { + int32_t idx = *(int32_t*)taosArrayGet(aFilterIdxs, i); + STUidTagInfo* pUidTagInfo = taosArrayGet(aUidTags, idx); + for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) { + SColumnInfoData* pDst = taosArrayGet(pRes->pDataBlock, pExprInfo[j].base.resSchema.slotId); + tagScanFillOneCellWithTag(pUidTagInfo, &pExprInfo[j], pDst, i, pAPI, pInfo->readHandle.vnode); + } } - } - return 0; -} - -#if 0 -static int32_t tagScanFillResultBlock(SOperatorInfo* pOperator, SSDataBlock* pRes, SArray* aUidTags, - SStorageAPI* pAPI) { - STagScanInfo* pInfo = pOperator->info; - SExprInfo* pExprInfo = &pOperator->exprSupp.pExprInfo[0]; - - int32_t nTbls = taosArrayGetSize(aUidTags); - for (int i = 0; i < nTbls; ++i) { - STUidTagInfo* pUidTagInfo = taosArrayGet(aUidTags, i); - for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) { - SColumnInfoData* pDst = taosArrayGet(pRes->pDataBlock, pExprInfo[j].base.resSchema.slotId); - - // refactor later - if (fmIsScanPseudoColumnFunc(pExprInfo[j].pExpr->_function.functionId)) { - char str[512]; - - STR_TO_VARSTR(str, "zsl"); - colDataSetVal(pDst, (i), str, false); - } else { // it is a tag value - STagVal val = {0}; - val.cid = pExprInfo[j].base.pParam[0].pCol->colId; - const char* p = pAPI->metaFn.extractTagVal(pUidTagInfo->pTagVal, pDst->info.type, &val); - - char* data = NULL; - if (pDst->info.type != TSDB_DATA_TYPE_JSON && p != NULL) { - data = tTagValToData((const STagVal*)p, false); - } else { - data = (char*)p; - } - colDataSetVal(pDst, i, data, - (data == NULL) || (pDst->info.type == TSDB_DATA_TYPE_JSON && tTagIsJsonNull(data))); - - if (pDst->info.type != TSDB_DATA_TYPE_JSON && p != NULL && IS_VAR_DATA_TYPE(((const STagVal*)p)->type) && - data != NULL) { - taosMemoryFree(data); - } + } else { + size_t szTables = taosArrayGetSize(aUidTags); + for (int i = 0; i < szTables; ++i) { + STUidTagInfo* pUidTagInfo = taosArrayGet(aUidTags, i); + for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) { + SColumnInfoData* pDst = taosArrayGet(pRes->pDataBlock, pExprInfo[j].base.resSchema.slotId); + tagScanFillOneCellWithTag(pUidTagInfo, &pExprInfo[j], pDst, i, pAPI, pInfo->readHandle.vnode); } } } return 0; } -#endif - static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { if (pOperator->status == OP_EXEC_DONE) { @@ -2912,16 +2878,19 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { STagScanInfo* pInfo = pOperator->info; SSDataBlock* pRes = pInfo->pRes; blockDataCleanup(pRes); - int32_t count = 0; if (pInfo->pCtbCursor == NULL) { pInfo->pCtbCursor = pAPI->metaFn.openCtbCursor(pInfo->readHandle.vnode, pInfo->suid, 1); } - SArray* aUidTags = taosArrayInit(pOperator->resultInfo.capacity, sizeof(STUidTagInfo)); - SArray* aFilterIdxs = taosArrayInit(pOperator->resultInfo.capacity, sizeof(int32_t)); + SArray* aUidTags = pInfo->aUidTags; + SArray* aFilterIdxs = pInfo->aFilterIdxs; + int32_t count = 0; while (1) { + taosArrayClearEx(aUidTags, tagScanFreeUidTag); + taosArrayClear(aFilterIdxs); + int32_t numTables = 0; while (numTables < pOperator->resultInfo.capacity) { SMCtbCursor* pCur = pInfo->pCtbCursor; @@ -2939,34 +2908,29 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { if (numTables == 0) { break; } + bool ignoreFilterIdx = true; if (pInfo->pTagCond != NULL) { + ignoreFilterIdx = false; tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, aFilterIdxs, pInfo->readHandle.vnode, pAPI); } else { - for (int i = 0; i < numTables; ++i) { - taosArrayPush(aFilterIdxs, &i); - } + ignoreFilterIdx = true; } - tagScanFillResultBlock(pOperator, pRes, aUidTags, aFilterIdxs, pAPI); - count = taosArrayGetSize(aFilterIdxs); + tagScanFillResultBlock(pOperator, pRes, aUidTags, aFilterIdxs, ignoreFilterIdx, pAPI); + + count = ignoreFilterIdx ? taosArrayGetSize(aUidTags): taosArrayGetSize(aFilterIdxs); if (count != 0) { break; } - - taosArrayClearEx(aUidTags, tagScanFreeUidTag); - taosArrayClear(aFilterIdxs); } - - taosArrayDestroy(aFilterIdxs); - taosArrayDestroyEx(aUidTags, tagScanFreeUidTag); - + pRes->info.rows = count; pOperator->resultInfo.totalRows += count; return (pRes->info.rows == 0) ? NULL : pInfo->pRes; } -static SSDataBlock* doTagScan(SOperatorInfo* pOperator) { +static SSDataBlock* doTagScanFromMetaEntry(SOperatorInfo* pOperator) { if (pOperator->status == OP_EXEC_DONE) { return NULL; } @@ -3027,6 +2991,10 @@ static void destroyTagScanOperatorInfo(void* param) { if (pInfo->pCtbCursor != NULL) { pInfo->pStorageAPI->metaFn.closeCtbCursor(pInfo->pCtbCursor, 1); } + + taosArrayDestroy(pInfo->aFilterIdxs); + taosArrayDestroyEx(pInfo->aUidTags, tagScanFreeUidTag); + pInfo->pRes = blockDataDestroy(pInfo->pRes); taosArrayDestroy(pInfo->matchInfo.pList); pInfo->pTableListInfo = tableListDestroy(pInfo->pTableListInfo); @@ -3072,7 +3040,11 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi initResultSizeInfo(&pOperator->resultInfo, 4096); blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity); - __optr_fn_t tagScanNextFn = (pPhyNode->onlyMetaCtbIdx) ? doTagScanFromCtbIdx : doTagScan; + if (pPhyNode->onlyMetaCtbIdx) { + pInfo->aUidTags = taosArrayInit(pOperator->resultInfo.capacity, sizeof(STUidTagInfo)); + pInfo->aFilterIdxs = taosArrayInit(pOperator->resultInfo.capacity, sizeof(int32_t)); + } + __optr_fn_t tagScanNextFn = (pPhyNode->onlyMetaCtbIdx) ? doTagScanFromCtbIdx : doTagScanFromMetaEntry; pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, tagScanNextFn, NULL, destroyTagScanOperatorInfo, optrDefaultBufFn, NULL); From 60191bef9eb67c3ff2c7c2281d35b06e3178c145 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Mon, 14 Aug 2023 16:24:56 +0800 Subject: [PATCH 110/122] fix:tmq interface & remove snapshot code --- docs/en/14-reference/03-connector/04-java.mdx | 3 -- docs/examples/c/tmq_example.c | 6 --- docs/examples/go/sub/main.go | 1 - docs/zh/08-connector/10-cpp.mdx | 9 ++++ examples/c/tmq.c | 5 -- include/client/taos.h | 54 ++++++++----------- 6 files changed, 31 insertions(+), 47 deletions(-) diff --git a/docs/en/14-reference/03-connector/04-java.mdx b/docs/en/14-reference/03-connector/04-java.mdx index 69bbd287ed..b12ef38ea8 100644 --- a/docs/en/14-reference/03-connector/04-java.mdx +++ b/docs/en/14-reference/03-connector/04-java.mdx @@ -1091,8 +1091,6 @@ public abstract class ConsumerLoop { config.setProperty("client.id", "1"); config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer"); config.setProperty("value.deserializer.encoding", "UTF-8"); - config.setProperty("experimental.snapshot.enable", "true"); - this.consumer = new TaosConsumer<>(config); this.topics = Collections.singletonList("topic_speed"); @@ -1176,7 +1174,6 @@ public abstract class ConsumerLoop { config.setProperty("client.id", "1"); config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer"); config.setProperty("value.deserializer.encoding", "UTF-8"); - config.setProperty("experimental.snapshot.enable", "true"); this.consumer = new TaosConsumer<>(config); this.topics = Collections.singletonList("topic_speed"); diff --git a/docs/examples/c/tmq_example.c b/docs/examples/c/tmq_example.c index d958428b8f..ca7c23e93b 100644 --- a/docs/examples/c/tmq_example.c +++ b/docs/examples/c/tmq_example.c @@ -227,12 +227,6 @@ tmq_t* build_consumer() { return NULL; } - code = tmq_conf_set(conf, "experimental.snapshot.enable", "false"); - if (TMQ_CONF_OK != code) { - tmq_conf_destroy(conf); - return NULL; - } - tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL); tmq_t* tmq = tmq_consumer_new(conf, NULL, 0); diff --git a/docs/examples/go/sub/main.go b/docs/examples/go/sub/main.go index cb24e351ab..ed335cfdea 100644 --- a/docs/examples/go/sub/main.go +++ b/docs/examples/go/sub/main.go @@ -35,7 +35,6 @@ func main() { "td.connect.port": "6030", "client.id": "test_tmq_client", "enable.auto.commit": "false", - "experimental.snapshot.enable": "true", "msg.with.table.name": "true", }) if err != nil { diff --git a/docs/zh/08-connector/10-cpp.mdx b/docs/zh/08-connector/10-cpp.mdx index 9c5095f09c..53c4bca755 100644 --- a/docs/zh/08-connector/10-cpp.mdx +++ b/docs/zh/08-connector/10-cpp.mdx @@ -515,3 +515,12 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多 - 带_raw的接口通过传递的参数lines指针和长度len来表示数据,为了解决原始接口数据包含'\0'而被截断的问题。totalRows指针返回解析出来的数据行数。 - 带_ttl的接口可以传递ttl参数来控制建表的ttl到期时间。 - 带_reqid的接口可以通过传递reqid参数来追踪整个的调用链。 + +### 数据订阅 API + +除了使用 SQL 方式或者使用参数绑定 API 写入数据外,还可以使用 Schemaless 的方式完成写入。Schemaless 可以免于预先创建超级表/数据子表的数据结构,而是可以直接写入数据,TDengine 系统会根据写入的数据内容自动创建和维护所需要的表结构。Schemaless 的使用方式详见 [Schemaless 写入](/reference/schemaless/) 章节,这里介绍与之配套使用的 C/C++ API。 + +- `TAOS_RES* taos_schemaless_insert(TAOS* taos, const char* lines[], int numLines, int protocol, int precision)` + +**功能说明** +该接口将行协议的文本数据写入到 TDengine 中。 diff --git a/examples/c/tmq.c b/examples/c/tmq.c index e1133c109e..136c54b874 100644 --- a/examples/c/tmq.c +++ b/examples/c/tmq.c @@ -228,11 +228,6 @@ tmq_t* build_consumer() { tmq_conf_destroy(conf); return NULL; } - code = tmq_conf_set(conf, "experimental.snapshot.enable", "false"); - if (TMQ_CONF_OK != code) { - tmq_conf_destroy(conf); - return NULL; - } tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL); tmq = tmq_consumer_new(conf, NULL, 0); diff --git a/include/client/taos.h b/include/client/taos.h index 3cc2d907ab..a8136461f8 100644 --- a/include/client/taos.h +++ b/include/client/taos.h @@ -260,19 +260,14 @@ typedef struct tmq_t tmq_t; typedef struct tmq_conf_t tmq_conf_t; typedef struct tmq_list_t tmq_list_t; -typedef void(tmq_commit_cb(tmq_t *, int32_t code, void *param)); +typedef void(tmq_commit_cb(tmq_t *tmq, int32_t code, void *param)); -DLL_EXPORT tmq_list_t *tmq_list_new(); -DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *); -DLL_EXPORT void tmq_list_destroy(tmq_list_t *); -DLL_EXPORT int32_t tmq_list_get_size(const tmq_list_t *); -DLL_EXPORT char **tmq_list_to_c_array(const tmq_list_t *); +typedef enum tmq_conf_res_t { + TMQ_CONF_UNKNOWN = -2, + TMQ_CONF_INVALID = -1, + TMQ_CONF_OK = 0, +} tmq_conf_res_t; -DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen); - -DLL_EXPORT const char *tmq_err2str(int32_t code); - -/* ------------------------TMQ CONSUMER INTERFACE------------------------ */ typedef struct tmq_topic_assignment { int32_t vgId; int64_t currentOffset; @@ -280,6 +275,18 @@ typedef struct tmq_topic_assignment { int64_t end; } tmq_topic_assignment; +DLL_EXPORT tmq_conf_t *tmq_conf_new(); +DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value); +DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf); +DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param); + +DLL_EXPORT tmq_list_t *tmq_list_new(); +DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *); +DLL_EXPORT void tmq_list_destroy(tmq_list_t *); +DLL_EXPORT int32_t tmq_list_get_size(const tmq_list_t *); +DLL_EXPORT char **tmq_list_to_c_array(const tmq_list_t *); + +DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen); DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list); DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq); DLL_EXPORT int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics); @@ -289,34 +296,17 @@ DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg); DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param); DLL_EXPORT int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset); DLL_EXPORT void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb *cb, void *param); -DLL_EXPORT int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment, - int32_t *numOfAssignment); +DLL_EXPORT int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment,int32_t *numOfAssignment); DLL_EXPORT void tmq_free_assignment(tmq_topic_assignment* pAssignment); DLL_EXPORT int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset); +DLL_EXPORT int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId); +DLL_EXPORT int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId); DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res); DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res); DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res); DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res); -DLL_EXPORT int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId); -DLL_EXPORT int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId); - -/* ----------------------TMQ CONFIGURATION INTERFACE---------------------- */ - -enum tmq_conf_res_t { - TMQ_CONF_UNKNOWN = -2, - TMQ_CONF_INVALID = -1, - TMQ_CONF_OK = 0, -}; - -typedef enum tmq_conf_res_t tmq_conf_res_t; - -DLL_EXPORT tmq_conf_t *tmq_conf_new(); -DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value); -DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf); -DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param); - -/* -------------------------TMQ MSG HANDLE INTERFACE---------------------- */ +DLL_EXPORT const char *tmq_err2str(int32_t code); /* ------------------------------ TAOSX -----------------------------------*/ // note: following apis are unstable From f9c897221cc66cd49bfeaa0905e802a305aece6a Mon Sep 17 00:00:00 2001 From: slzhou Date: Mon, 14 Aug 2023 16:30:31 +0800 Subject: [PATCH 111/122] fix: move the setting of onlyCtbIdx to front end --- source/libs/executor/src/operator.c | 1 - source/libs/planner/src/planPhysiCreater.c | 15 +++++++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/source/libs/executor/src/operator.c b/source/libs/executor/src/operator.c index abef8298e5..7f0c5baa36 100644 --- a/source/libs/executor/src/operator.c +++ b/source/libs/executor/src/operator.c @@ -371,7 +371,6 @@ SOperatorInfo* createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SR pOperator = createTableCountScanOperatorInfo(pHandle, pTblCountScanNode, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN == type) { STagScanPhysiNode* pTagScanPhyNode = (STagScanPhysiNode*)pPhyNode; - pTagScanPhyNode->onlyMetaCtbIdx = false; STableListInfo* pTableListInfo = tableListCreate(); if (!pTagScanPhyNode->onlyMetaCtbIdx) { int32_t code = createScanTableListInfo((SScanPhysiNode*)pTagScanPhyNode, NULL, false, pHandle, pTableListInfo, pTagCond, diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index 06859e195d..8efa9c1048 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -511,6 +511,20 @@ static int32_t createSimpleScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSub return createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, pScan, pPhyNode); } +static int32_t createTagScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubplan, SScanLogicNode* pScanLogicNode, + SPhysiNode** pPhyNode) { + STagScanPhysiNode* pScan = + (STagScanPhysiNode*)makePhysiNode(pCxt, (SLogicNode*)pScanLogicNode, QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN); + if (NULL == pScan) { + return TSDB_CODE_OUT_OF_MEMORY; + } + vgroupInfoToNodeAddr(pScanLogicNode->pVgroupList->vgroups, &pSubplan->execNode); + + pScan->onlyMetaCtbIdx = false; + + return createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, (SScanPhysiNode*)pScan, pPhyNode); +} + static int32_t createLastRowScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubplan, SScanLogicNode* pScanLogicNode, SPhysiNode** pPhyNode) { SLastRowScanPhysiNode* pScan = @@ -646,6 +660,7 @@ static int32_t createScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubplan, pCxt->hasScan = true; switch (pScanLogicNode->scanType) { case SCAN_TYPE_TAG: + return createTagScanPhysiNode(pCxt, pSubplan, pScanLogicNode, pPhyNode); case SCAN_TYPE_BLOCK_INFO: return createSimpleScanPhysiNode(pCxt, pSubplan, pScanLogicNode, pPhyNode); case SCAN_TYPE_TABLE_COUNT: From 8d0461e98ca13a58c4bee1b9964a1ee3920b5346 Mon Sep 17 00:00:00 2001 From: kailixu Date: Mon, 14 Aug 2023 16:31:58 +0800 Subject: [PATCH 112/122] fix: use taos_static for tmq_sim on windows --- cmake/cmake.define | 6 ++++++ tests/script/wtest.bat | 14 +++++++++++--- utils/test/c/CMakeLists.txt | 2 +- utils/tsim/CMakeLists.txt | 2 +- 4 files changed, 19 insertions(+), 5 deletions(-) diff --git a/cmake/cmake.define b/cmake/cmake.define index cf7f450994..6f4153c7d0 100644 --- a/cmake/cmake.define +++ b/cmake/cmake.define @@ -78,6 +78,12 @@ ELSE () SET(TD_TAOS_TOOLS TRUE) ENDIF () +IF (${TD_WINDOWS}) + SET(TAOS_LIB taos_static) +ELSE () + SET(TAOS_LIB taos) +ENDIF () + IF (TD_WINDOWS) MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}") SET(COMMON_FLAGS "/w /D_WIN32 /DWIN32 /Zi /MTd") diff --git a/tests/script/wtest.bat b/tests/script/wtest.bat index b642bad285..88ae703b7c 100644 --- a/tests/script/wtest.bat +++ b/tests/script/wtest.bat @@ -17,6 +17,9 @@ rem echo SIM_DIR: %SIM_DIR% set "TSIM_DIR=%SIM_DIR%tsim\" rem echo TSIM_DIR: %TSIM_DIR% +set "DATA_DIR=%TSIM_DIR%data\" +rem echo DATA_DIR: %DATA_DIR% + set "CFG_DIR=%TSIM_DIR%cfg\" rem echo CFG_DIR: %CFG_DIR% @@ -30,25 +33,30 @@ if not exist %SIM_DIR% mkdir %SIM_DIR% if not exist %TSIM_DIR% mkdir %TSIM_DIR% if exist %CFG_DIR% rmdir /s/q %CFG_DIR% if exist %LOG_DIR% rmdir /s/q %LOG_DIR% +if exist %DATA_DIR% rmdir /s/q %DATA_DIR% if not exist %CFG_DIR% mkdir %CFG_DIR% if not exist %LOG_DIR% mkdir %LOG_DIR% +if not exist %DATA_DIR% mkdir %DATA_DIR% set "fqdn=localhost" for /f "skip=1" %%A in ( 'wmic computersystem get caption' ) do if not defined fqdn set "fqdn=%%A" -echo firstEp %fqdn% > %TAOS_CFG% +echo firstEp %fqdn%:7100 > %TAOS_CFG% +echo secondEp %fqdn%:7200 >> %TAOS_CFG% echo fqdn %fqdn% >> %TAOS_CFG% echo serverPort 7100 >> %TAOS_CFG% +echo dataDir %DATA_DIR% >> %TAOS_CFG% echo logDir %LOG_DIR% >> %TAOS_CFG% echo scriptDir %SCRIPT_DIR% >> %TAOS_CFG% echo numOfLogLines 100000000 >> %TAOS_CFG% echo rpcDebugFlag 143 >> %TAOS_CFG% echo tmrDebugFlag 131 >> %TAOS_CFG% -echo cDebugFlag 135 >> %TAOS_CFG% +echo cDebugFlag 143 >> %TAOS_CFG% echo qDebugFlag 143 >> %TAOS_CFG% -echo udebugFlag 135 >> %TAOS_CFG% +echo uDebugFlag 143 >> %TAOS_CFG% +echo debugFlag 143 >> %TAOS_CFG% echo wal 0 >> %TAOS_CFG% echo asyncLog 0 >> %TAOS_CFG% echo locale en_US.UTF-8 >> %TAOS_CFG% diff --git a/utils/test/c/CMakeLists.txt b/utils/test/c/CMakeLists.txt index 3f52fc8e5d..b96814c13b 100644 --- a/utils/test/c/CMakeLists.txt +++ b/utils/test/c/CMakeLists.txt @@ -31,7 +31,7 @@ target_link_libraries( ) target_link_libraries( tmq_sim - PUBLIC taos + PUBLIC ${TAOS_LIB} PUBLIC util PUBLIC common PUBLIC os diff --git a/utils/tsim/CMakeLists.txt b/utils/tsim/CMakeLists.txt index c2cf7ac3c5..209982c659 100644 --- a/utils/tsim/CMakeLists.txt +++ b/utils/tsim/CMakeLists.txt @@ -2,7 +2,7 @@ aux_source_directory(src TSIM_SRC) add_executable(tsim ${TSIM_SRC}) target_link_libraries( tsim - PUBLIC taos_static + PUBLIC ${TAOS_LIB} PUBLIC util PUBLIC common PUBLIC os From 62cad7c54f9a1784a03949c152c0bdb684f21316 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Mon, 14 Aug 2023 17:16:11 +0800 Subject: [PATCH 113/122] docs:tmq interface --- docs/en/07-develop/07-tmq.mdx | 88 ++++++++++++++++++++++----------- docs/zh/07-develop/07-tmq.mdx | 88 +++++++++++++++++++++++---------- docs/zh/08-connector/10-cpp.mdx | 9 ---- 3 files changed, 121 insertions(+), 64 deletions(-) diff --git a/docs/en/07-develop/07-tmq.mdx b/docs/en/07-develop/07-tmq.mdx index ccf39ef581..3326164f49 100644 --- a/docs/en/07-develop/07-tmq.mdx +++ b/docs/en/07-develop/07-tmq.mdx @@ -23,7 +23,20 @@ By subscribing to a topic, a consumer can obtain the latest data in that topic i To implement these features, TDengine indexes its write-ahead log (WAL) file for fast random access and provides configurable methods for replacing and retaining this file. You can define a retention period and size for this file. For information, see the CREATE DATABASE statement. In this way, the WAL file is transformed into a persistent storage engine that remembers the order in which events occur. However, note that configuring an overly long retention period for your WAL files makes database compression inefficient. TDengine then uses the WAL file instead of the time-series database as its storage engine for queries in the form of topics. TDengine reads the data from the WAL file; uses a unified query engine instance to perform filtering, transformations, and other operations; and finally pushes the data to consumers. -Tips: Data subscription is to consume data from the wal. If some wal files are deleted according to WAL retention policy, the deleted data can't be consumed any more. So you need to set a reasonable value for parameter `WAL_RETENTION_PERIOD` or `WAL_RETENTION_SIZE` when creating the database and make sure your application consume the data in a timely way to make sure there is no data loss. This behavior is similar to Kafka and other widely used message queue products. +Tips:(c interface for example) +1. A consumption group consumes all data under the same topic, and different consumption groups are independent of each other; +2. A consumption group consumes all vgroups of the same topic, which can be composed of multiple consumers, but a vgroup is only consumed by one consumer. If the number of consumers exceeds the number of vgroups, the excess consumers do not consume data; +3. On the server side, only one offset is saved for each vgroup, and the offsets for each vgroup are monotonically increasing, but not necessarily continuous. There is no correlation between the offsets of various vgroups; +4. Each poll server will return a result block, which belongs to a vgroup and may contain data from multiple versions of wal. This block can be accessed through tmq_get_vgroup_offset. The offset interface obtains the offset of the first record in the block; +5. If a consumer group has never committed an offset, when its member consumers restart and pull data again, they start consuming from the set value of the parameter auto.offset.reset; In a consumer lifecycle, the client locally records the offset of the most recent pull data and will not pull duplicate data; +6. If a consumer terminates abnormally (without calling tmq_close), they need to wait for about 12 seconds to trigger their consumer group rebalance. The consumer's status on the server will change to LOST, and after about 1 day, the consumer will be automatically deleted; Exit normally, and after exiting, the consumer will be deleted; Add a new consumer, wait for about 2 seconds to trigger Rebalance, and the consumer's status on the server will change to ready; +7. The consumer group Rebalance will reassign Vgroups to all consumer members in the ready state of the group, and consumers can only assign/see/commit/poll operations to the Vgroups they are responsible for; +8. Consumers can tmq_position to obtain the offset of the current consumption, seek to the specified offset, and consume again; +9. Seek points the position to the specified offset without executing the commit operation. Once the seek is successful, it can poll the specified offset and subsequent data; +10. Before the seek operation, tmq must be call tmq_get_topic_assignment, The assignment interface obtains the vgroup ID and offset range of the consumer. The seek operation will detect whether the vgroup ID and offset are legal, and if they are illegal, an error will be reported; +11. Due to the existence of a WAL expiration deletion mechanism, even if the seek operation is successful, it is possible that the offset has expired when polling data. If the offset of poll is less than the WAL minimum version number, it will be consumed from the WAL minimum version number; +12. The tmq_get_vgroup_offset interface obtains the offset of the first data in the result block where the record is located. When seeking to this offset, it will consume all the data in this block. Refer to point four; +13. Data subscription is to consume data from the wal. If some wal files are deleted according to WAL retention policy, the deleted data can't be consumed any more. So you need to set a reasonable value for parameter `WAL_RETENTION_PERIOD` or `WAL_RETENTION_SIZE` when creating the database and make sure your application consume the data in a timely way to make sure there is no data loss. This behavior is similar to Kafka and other widely used message queue products. ## Data Schema and API @@ -33,40 +46,59 @@ The related schemas and APIs in various languages are described as follows: ```c -typedef struct tmq_t tmq_t; -typedef struct tmq_conf_t tmq_conf_t; -typedef struct tmq_list_t tmq_list_t; + typedef struct tmq_t tmq_t; + typedef struct tmq_conf_t tmq_conf_t; + typedef struct tmq_list_t tmq_list_t; -typedef void(tmq_commit_cb(tmq_t *, int32_t code, void *param)); + typedef void(tmq_commit_cb(tmq_t *tmq, int32_t code, void *param)); -DLL_EXPORT tmq_list_t *tmq_list_new(); -DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *); -DLL_EXPORT void tmq_list_destroy(tmq_list_t *); -DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen); -DLL_EXPORT const char *tmq_err2str(int32_t code); + typedef enum tmq_conf_res_t { + TMQ_CONF_UNKNOWN = -2, + TMQ_CONF_INVALID = -1, + TMQ_CONF_OK = 0, +} tmq_conf_res_t; -DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list); -DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq); -DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout); -DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq); -DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg); -DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param); + typedef struct tmq_topic_assignment { + int32_t vgId; + int64_t currentOffset; + int64_t begin; + int64_t end; +} tmq_topic_assignment; -enum tmq_conf_res_t { - TMQ_CONF_UNKNOWN = -2, - TMQ_CONF_INVALID = -1, - TMQ_CONF_OK = 0, -}; -typedef enum tmq_conf_res_t tmq_conf_res_t; + DLL_EXPORT tmq_conf_t *tmq_conf_new(); + DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value); + DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf); + DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param); -DLL_EXPORT tmq_conf_t *tmq_conf_new(); -DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value); -DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf); -DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param); + DLL_EXPORT tmq_list_t *tmq_list_new(); + DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *); + DLL_EXPORT void tmq_list_destroy(tmq_list_t *); + DLL_EXPORT int32_t tmq_list_get_size(const tmq_list_t *); + DLL_EXPORT char **tmq_list_to_c_array(const tmq_list_t *); + + DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen); + DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list); + DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq); + DLL_EXPORT int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics); + DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout); + DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq); + DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg); + DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param); + DLL_EXPORT int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset); + DLL_EXPORT void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb *cb, void *param); + DLL_EXPORT int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment,int32_t *numOfAssignment); + DLL_EXPORT void tmq_free_assignment(tmq_topic_assignment* pAssignment); + DLL_EXPORT int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset); + DLL_EXPORT int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId); + DLL_EXPORT int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId); + + DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res); + DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res); + DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res); + DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res); + DLL_EXPORT const char *tmq_err2str(int32_t code);DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param); ``` -For more information, see [C/C++ Connector](/reference/connector/cpp). - The following example is based on the smart meter table described in Data Models. For complete sample code, see the C language section below. diff --git a/docs/zh/07-develop/07-tmq.mdx b/docs/zh/07-develop/07-tmq.mdx index 38b91d7cea..6852d5551e 100644 --- a/docs/zh/07-develop/07-tmq.mdx +++ b/docs/zh/07-develop/07-tmq.mdx @@ -25,7 +25,20 @@ import CDemo from "./_sub_c.mdx"; 本文档不对消息队列本身的基础知识做介绍,如果需要了解,请自行搜索。 -注意:数据订阅是从 WAL 消费数据,如果一些 WAL 文件被基于 WAL 保留策略删除,则已经删除的 WAL 文件中的数据就无法再消费到。需要根据业务需要在创建数据库时合理设置 `WAL_RETENTION_PERIOD` 或 `WAL_RETENTION_SIZE` ,并确保应用及时消费数据,这样才不会产生数据丢失的现象。数据订阅的行为与 Kafka 等广泛使用的消息队列类产品的行为相似。 +说明(以c接口为例): +1. 一个消费组消费同一个topic下的所有数据,不同消费组之间相互独立; +2. 一个消费组消费同一个topic所有的vgroup,消费组可由多个消费者组成,但一个vgroup仅被一个消费者消费,如果消费者数量超过了vgroup数量,多余的消费者不消费数据; +3. 在服务端每个vgroup仅保存一个offset,每个vgroup的offset是单调递增的,但不一定连续。各个vgroup的offset之间没有关联; +4. 每次poll服务端会返回一个结果block,该block属于一个vgroup,可能包含多个wal版本的数据,可以通过 tmq_get_vgroup_offset 接口获得是该block第一条记录的offset; +5. 一个消费组如果从未commit过offset,当其成员消费者重启重新拉取数据时,均从参数auto.offset.reset设定值开始消费;在一个消费者生命周期中,客户端本地记录了最近一次拉取数据的offset,不会拉取重复数据; +6. 消费者如果异常终止(没有调用tmq_close),需等约12秒后触发其所属消费组rebalance,该消费者在服务端状态变为LOST,约1天后该消费者自动被删除;正常退出,退出后就会删除消费者;新增消费者,需等约2秒触发rebalance,该消费者在服务端状态变为ready; +7. 消费组rebalance会对该组所有ready状态的消费者成员重新进行vgroup分配,消费者仅能对自己负责的vgroup进行assignment/seek/commit/poll操作; +8. 消费者可利用 tmq_position 获得当前消费的offset,并seek到指定offset,重新消费; +9. seek将position指向指定offset,不执行commit操作,一旦seek成功,可poll拉取指定offset及以后的数据; +10. seek 操作之前须调用 tmq_get_topic_assignment 接口获取该consumer的vgroup ID和offset范围。seek 操作会检测vgroup ID 和 offset是否合法,如非法将报错; +11. tmq_get_vgroup_offset接口获取的是记录所在结果block块里的第一条数据的offset,当seek至该offset时,将消费到这个block里的全部数据。参见第四点; +12. 由于存在 WAL 过期删除机制,即使seek 操作成功,poll数据时有可能offset已失效。如果poll 的offset 小于 WAL 最小版本号,将会从WAL最小版本号消费; +13. 数据订阅是从 WAL 消费数据,如果一些 WAL 文件被基于 WAL 保留策略删除,则已经删除的 WAL 文件中的数据就无法再消费到。需要根据业务需要在创建数据库时合理设置 `WAL_RETENTION_PERIOD` 或 `WAL_RETENTION_SIZE` ,并确保应用及时消费数据,这样才不会产生数据丢失的现象。数据订阅的行为与 Kafka 等广泛使用的消息队列类产品的行为相似; ## 主要数据结构和 API @@ -35,39 +48,60 @@ import CDemo from "./_sub_c.mdx"; ```c -typedef struct tmq_t tmq_t; -typedef struct tmq_conf_t tmq_conf_t; -typedef struct tmq_list_t tmq_list_t; + typedef struct tmq_t tmq_t; + typedef struct tmq_conf_t tmq_conf_t; + typedef struct tmq_list_t tmq_list_t; -typedef void(tmq_commit_cb(tmq_t *, int32_t code, void *param)); + typedef void(tmq_commit_cb(tmq_t *tmq, int32_t code, void *param)); -DLL_EXPORT tmq_list_t *tmq_list_new(); -DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *); -DLL_EXPORT void tmq_list_destroy(tmq_list_t *); -DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen); -DLL_EXPORT const char *tmq_err2str(int32_t code); + typedef enum tmq_conf_res_t { + TMQ_CONF_UNKNOWN = -2, + TMQ_CONF_INVALID = -1, + TMQ_CONF_OK = 0, +} tmq_conf_res_t; -DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list); -DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq); -DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout); -DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq); -DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg); -DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param); + typedef struct tmq_topic_assignment { + int32_t vgId; + int64_t currentOffset; + int64_t begin; + int64_t end; +} tmq_topic_assignment; -enum tmq_conf_res_t { - TMQ_CONF_UNKNOWN = -2, - TMQ_CONF_INVALID = -1, - TMQ_CONF_OK = 0, -}; -typedef enum tmq_conf_res_t tmq_conf_res_t; + DLL_EXPORT tmq_conf_t *tmq_conf_new(); + DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value); + DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf); + DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param); -DLL_EXPORT tmq_conf_t *tmq_conf_new(); -DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value); -DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf); -DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param); + DLL_EXPORT tmq_list_t *tmq_list_new(); + DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *); + DLL_EXPORT void tmq_list_destroy(tmq_list_t *); + DLL_EXPORT int32_t tmq_list_get_size(const tmq_list_t *); + DLL_EXPORT char **tmq_list_to_c_array(const tmq_list_t *); + + DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen); + DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list); + DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq); + DLL_EXPORT int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics); + DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout); + DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq); + DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg); + DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param); + DLL_EXPORT int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset); + DLL_EXPORT void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb *cb, void *param); + DLL_EXPORT int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment,int32_t *numOfAssignment); + DLL_EXPORT void tmq_free_assignment(tmq_topic_assignment* pAssignment); + DLL_EXPORT int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset); + DLL_EXPORT int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId); + DLL_EXPORT int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId); + + DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res); + DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res); + DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res); + DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res); + DLL_EXPORT const char *tmq_err2str(int32_t code);DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param); ``` -这些 API 的文档请见 [C/C++ Connector](../../connector/cpp),下面介绍一下它们的具体用法(超级表和子表结构请参考“数据建模”一节),完整的示例代码请见下面 C 语言的示例代码。 +下面介绍一下它们的具体用法(超级表和子表结构请参考“数据建模”一节),完整的示例代码请见下面 C 语言的示例代码。 diff --git a/docs/zh/08-connector/10-cpp.mdx b/docs/zh/08-connector/10-cpp.mdx index 53c4bca755..9c5095f09c 100644 --- a/docs/zh/08-connector/10-cpp.mdx +++ b/docs/zh/08-connector/10-cpp.mdx @@ -515,12 +515,3 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多 - 带_raw的接口通过传递的参数lines指针和长度len来表示数据,为了解决原始接口数据包含'\0'而被截断的问题。totalRows指针返回解析出来的数据行数。 - 带_ttl的接口可以传递ttl参数来控制建表的ttl到期时间。 - 带_reqid的接口可以通过传递reqid参数来追踪整个的调用链。 - -### 数据订阅 API - -除了使用 SQL 方式或者使用参数绑定 API 写入数据外,还可以使用 Schemaless 的方式完成写入。Schemaless 可以免于预先创建超级表/数据子表的数据结构,而是可以直接写入数据,TDengine 系统会根据写入的数据内容自动创建和维护所需要的表结构。Schemaless 的使用方式详见 [Schemaless 写入](/reference/schemaless/) 章节,这里介绍与之配套使用的 C/C++ API。 - -- `TAOS_RES* taos_schemaless_insert(TAOS* taos, const char* lines[], int numLines, int protocol, int precision)` - -**功能说明** -该接口将行协议的文本数据写入到 TDengine 中。 From 90cb67d006693a86cfc52b8675f2a98f451be537 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Mon, 14 Aug 2023 17:33:13 +0800 Subject: [PATCH 114/122] fix: table count issue --- tests/develop-test/2-query/table_count_scan.py | 16 ++++++++-------- tests/script/tsim/query/sys_tbname.sim | 2 +- tests/script/tsim/query/tableCount.sim | 6 +++--- tests/system-test/0-others/information_schema.py | 2 +- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/tests/develop-test/2-query/table_count_scan.py b/tests/develop-test/2-query/table_count_scan.py index 758d28948d..d6e49964eb 100644 --- a/tests/develop-test/2-query/table_count_scan.py +++ b/tests/develop-test/2-query/table_count_scan.py @@ -65,7 +65,7 @@ class TDTestCase: tdSql.query('select count(*),db_name, stable_name from information_schema.ins_tables group by db_name, stable_name;') tdSql.checkRows(3) - tdSql.checkData(0, 0, 24) + tdSql.checkData(0, 0, 23) tdSql.checkData(0, 1, 'information_schema') tdSql.checkData(0, 2, None) tdSql.checkData(1, 0, 3) @@ -77,7 +77,7 @@ class TDTestCase: tdSql.query('select count(1) v,db_name, stable_name from information_schema.ins_tables group by db_name, stable_name order by v desc;') tdSql.checkRows(3) - tdSql.checkData(0, 0, 24) + tdSql.checkData(0, 0, 23) tdSql.checkData(0, 1, 'information_schema') tdSql.checkData(0, 2, None) tdSql.checkData(1, 0, 5) @@ -93,7 +93,7 @@ class TDTestCase: tdSql.checkData(1, 1, 'performance_schema') tdSql.checkData(0, 0, 3) tdSql.checkData(0, 1, 'tbl_count') - tdSql.checkData(2, 0, 24) + tdSql.checkData(2, 0, 23) tdSql.checkData(2, 1, 'information_schema') tdSql.query("select count(*) from information_schema.ins_tables where db_name='tbl_count'") @@ -106,7 +106,7 @@ class TDTestCase: tdSql.query('select count(*) from information_schema.ins_tables') tdSql.checkRows(1) - tdSql.checkData(0, 0, 32) + tdSql.checkData(0, 0, 31) tdSql.execute('create table stba (ts timestamp, c1 bool, c2 tinyint, c3 smallint, c4 int, c5 bigint, c6 float, c7 double, c8 binary(10), c9 nchar(10), c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned) TAGS(t1 int, t2 binary(10), t3 double);') @@ -189,7 +189,7 @@ class TDTestCase: tdSql.checkData(2, 0, 5) tdSql.checkData(2, 1, 'performance_schema') tdSql.checkData(2, 2, None) - tdSql.checkData(3, 0, 24) + tdSql.checkData(3, 0, 23) tdSql.checkData(3, 1, 'information_schema') tdSql.checkData(3, 2, None) @@ -204,7 +204,7 @@ class TDTestCase: tdSql.checkData(2, 0, 5) tdSql.checkData(2, 1, 'performance_schema') tdSql.checkData(2, 2, None) - tdSql.checkData(3, 0, 24) + tdSql.checkData(3, 0, 23) tdSql.checkData(3, 1, 'information_schema') tdSql.checkData(3, 2, None) @@ -215,7 +215,7 @@ class TDTestCase: tdSql.checkData(0, 1, 'tbl_count') tdSql.checkData(1, 0, 5) tdSql.checkData(1, 1, 'performance_schema') - tdSql.checkData(2, 0, 24) + tdSql.checkData(2, 0, 23) tdSql.checkData(2, 1, 'information_schema') tdSql.query("select count(*) from information_schema.ins_tables where db_name='tbl_count'") @@ -228,7 +228,7 @@ class TDTestCase: tdSql.query('select count(*) from information_schema.ins_tables') tdSql.checkRows(1) - tdSql.checkData(0, 0, 33) + tdSql.checkData(0, 0, 32) tdSql.execute('drop database tbl_count') diff --git a/tests/script/tsim/query/sys_tbname.sim b/tests/script/tsim/query/sys_tbname.sim index f49a8e0a7d..6ed978aa15 100644 --- a/tests/script/tsim/query/sys_tbname.sim +++ b/tests/script/tsim/query/sys_tbname.sim @@ -58,7 +58,7 @@ endi sql select tbname from information_schema.ins_tables; print $rows $data00 -if $rows != 33 then +if $rows != 32 then return -1 endi if $data00 != @ins_tables@ then diff --git a/tests/script/tsim/query/tableCount.sim b/tests/script/tsim/query/tableCount.sim index 6e65852dcc..524b43620c 100644 --- a/tests/script/tsim/query/tableCount.sim +++ b/tests/script/tsim/query/tableCount.sim @@ -53,7 +53,7 @@ sql select stable_name,count(table_name) from information_schema.ins_tables grou if $rows != 3 then return -1 endi -if $data01 != 30 then +if $data01 != 29 then return -1 endi if $data11 != 10 then @@ -72,7 +72,7 @@ endi if $data11 != 5 then return -1 endi -if $data21 != 24 then +if $data21 != 23 then return -1 endi if $data31 != 5 then @@ -97,7 +97,7 @@ endi if $data42 != 3 then return -1 endi -if $data52 != 24 then +if $data52 != 23 then return -1 endi if $data62 != 5 then diff --git a/tests/system-test/0-others/information_schema.py b/tests/system-test/0-others/information_schema.py index 762361f051..f7788d1d50 100644 --- a/tests/system-test/0-others/information_schema.py +++ b/tests/system-test/0-others/information_schema.py @@ -55,7 +55,7 @@ class TDTestCase: ] self.binary_str = 'taosdata' self.nchar_str = '涛思数据' - self.ins_list = ['ins_dnodes','ins_mnodes','ins_modules','ins_qnodes','ins_snodes','ins_cluster','ins_databases','ins_functions',\ + self.ins_list = ['ins_dnodes','ins_mnodes','ins_qnodes','ins_snodes','ins_cluster','ins_databases','ins_functions',\ 'ins_indexes','ins_stables','ins_tables','ins_tags','ins_columns','ins_users','ins_grants','ins_vgroups','ins_configs','ins_dnode_variables',\ 'ins_topics','ins_subscriptions','ins_streams','ins_stream_tasks','ins_vnodes','ins_user_privileges'] self.perf_list = ['perf_connections','perf_queries','perf_consumers','perf_trans','perf_apps'] From 012248b68142496f0e8e2fa1833b4df4912b2c82 Mon Sep 17 00:00:00 2001 From: slzhou Date: Mon, 14 Aug 2023 19:26:53 +0800 Subject: [PATCH 115/122] fix: move the only ctb idx flag to logical plan --- include/libs/nodes/plannodes.h | 1 + source/libs/nodes/src/nodesCodeFuncs.c | 10 ++++++++-- source/libs/planner/src/planOptimizer.c | 3 +++ source/libs/planner/src/planPhysiCreater.c | 2 +- 4 files changed, 13 insertions(+), 3 deletions(-) diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h index 0830dc4918..3e24e417fc 100644 --- a/include/libs/nodes/plannodes.h +++ b/include/libs/nodes/plannodes.h @@ -107,6 +107,7 @@ typedef struct SScanLogicNode { bool sortPrimaryKey; bool igLastNull; bool groupOrderScan; + bool onlyMetaCtbIdx; // for tag scan with no tbname } SScanLogicNode; typedef struct SJoinLogicNode { diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index a2de0bc63a..4dfc55c0fa 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -660,6 +660,7 @@ static const char* jkScanLogicPlanDynamicScanFuncs = "DynamicScanFuncs"; static const char* jkScanLogicPlanDataRequired = "DataRequired"; static const char* jkScanLogicPlanTagCond = "TagCond"; static const char* jkScanLogicPlanGroupTags = "GroupTags"; +static const char* jkScanLogicPlanOnlyMetaCtbIdx = "OnlyMetaCtbIdx"; static int32_t logicScanNodeToJson(const void* pObj, SJson* pJson) { const SScanLogicNode* pNode = (const SScanLogicNode*)pObj; @@ -701,7 +702,9 @@ static int32_t logicScanNodeToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = nodeListToJson(pJson, jkScanLogicPlanGroupTags, pNode->pGroupTags); } - + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddBoolToObject(pJson, jkScanLogicPlanOnlyMetaCtbIdx, pNode->onlyMetaCtbIdx); + } return code; } @@ -746,7 +749,10 @@ static int32_t jsonToLogicScanNode(const SJson* pJson, void* pObj) { if (TSDB_CODE_SUCCESS == code) { code = jsonToNodeList(pJson, jkScanLogicPlanGroupTags, &pNode->pGroupTags); } - + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetBoolValue(pJson, jkScanLogicPlanOnlyMetaCtbIdx, &pNode->onlyMetaCtbIdx); + } + return code; } diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c index 16440be511..6944fc9f18 100644 --- a/source/libs/planner/src/planOptimizer.c +++ b/source/libs/planner/src/planOptimizer.c @@ -2679,6 +2679,9 @@ static int32_t tagScanOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogicSubp } nodesDestroyNode((SNode*)pAgg); tagScanOptCloneAncestorSlimit((SLogicNode*)pScanNode); + + pScanNode->onlyMetaCtbIdx = false; + pCxt->optimized = true; return TSDB_CODE_SUCCESS; } diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index 8efa9c1048..5f78b5de9c 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -520,7 +520,7 @@ static int32_t createTagScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubpla } vgroupInfoToNodeAddr(pScanLogicNode->pVgroupList->vgroups, &pSubplan->execNode); - pScan->onlyMetaCtbIdx = false; + pScan->onlyMetaCtbIdx = pScanLogicNode->onlyMetaCtbIdx; return createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, (SScanPhysiNode*)pScan, pPhyNode); } From 8639c22cc0cd5e684710fe4dfda644a6e0362e54 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Tue, 15 Aug 2023 14:53:58 +0800 Subject: [PATCH 116/122] vnode, common: USE_COS def --- source/common/CMakeLists.txt | 8 ++++++++ source/dnode/vnode/CMakeLists.txt | 4 ++++ 2 files changed, 12 insertions(+) diff --git a/source/common/CMakeLists.txt b/source/common/CMakeLists.txt index 356ea2be1c..b010467f20 100644 --- a/source/common/CMakeLists.txt +++ b/source/common/CMakeLists.txt @@ -16,6 +16,14 @@ ENDIF () IF (TD_STORAGE) ADD_DEFINITIONS(-D_STORAGE) TARGET_LINK_LIBRARIES(common PRIVATE storage) + + IF(${TD_LINUX}) + IF(${BUILD_WITH_COS}) + add_definitions(-DUSE_COS) + ENDIF(${BUILD_WITH_COS}) + + ENDIF(${TD_LINUX}) + ENDIF () target_include_directories( diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index 052b6be37f..c70df86e20 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -189,6 +189,10 @@ target_include_directories( PUBLIC "$ENV{HOME}/.cos-local.1/include" ) +if(${BUILD_WITH_COS}) + add_definitions(-DUSE_COS) +endif(${BUILD_WITH_COS}) + endif(${TD_LINUX}) IF (TD_GRANT) From bd758e0269786711de840303a921b93b4e097d2c Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Tue, 15 Aug 2023 15:14:34 +0800 Subject: [PATCH 117/122] retention: remove old files last --- source/dnode/vnode/src/inc/vndCos.h | 2 +- source/dnode/vnode/src/tsdb/tsdbRetention.c | 23 +++++++++++---------- source/dnode/vnode/src/vnd/vnodeCos.c | 11 ++++++++-- 3 files changed, 22 insertions(+), 14 deletions(-) diff --git a/source/dnode/vnode/src/inc/vndCos.h b/source/dnode/vnode/src/inc/vndCos.h index f6db7f096e..cf2c5eb441 100644 --- a/source/dnode/vnode/src/inc/vndCos.h +++ b/source/dnode/vnode/src/inc/vndCos.h @@ -26,7 +26,7 @@ extern int8_t tsS3Enabled; int32_t s3Init(); void s3CleanUp(); -void s3PutObjectFromFile(const char *file, const char *object); +int32_t s3PutObjectFromFile(const char *file, const char *object); void s3DeleteObjects(const char *object_name[], int nobject); bool s3Exists(const char *object_name); bool s3Get(const char *object_name, const char *path); diff --git a/source/dnode/vnode/src/tsdb/tsdbRetention.c b/source/dnode/vnode/src/tsdb/tsdbRetention.c index ebe20c0e85..46a5d19a1a 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRetention.c +++ b/source/dnode/vnode/src/tsdb/tsdbRetention.c @@ -114,7 +114,8 @@ static int32_t tsdbCopyFileS3(SRTNer *rtner, const STFileObj *from, const STFile TSDB_CHECK_CODE(code, lino, _exit); char *object_name = taosDirEntryBaseName(fname); - s3PutObjectFromFile(from->fname, object_name); + code = s3PutObjectFromFile(from->fname, object_name); + TSDB_CHECK_CODE(code, lino, _exit); taosCloseFile(&fdFrom); @@ -178,16 +179,6 @@ static int32_t tsdbMigrateDataFileS3(SRTNer *rtner, const STFileObj *fobj, const int32_t lino = 0; STFileOp op = {0}; - // remove old - op = (STFileOp){ - .optype = TSDB_FOP_REMOVE, - .fid = fobj->f->fid, - .of = fobj->f[0], - }; - - code = TARRAY2_APPEND(rtner->fopArr, op); - TSDB_CHECK_CODE(code, lino, _exit); - // create new op = (STFileOp){ .optype = TSDB_FOP_CREATE, @@ -213,6 +204,16 @@ static int32_t tsdbMigrateDataFileS3(SRTNer *rtner, const STFileObj *fobj, const code = tsdbCopyFileS3(rtner, fobj, &op.nf); TSDB_CHECK_CODE(code, lino, _exit); + // remove old + op = (STFileOp){ + .optype = TSDB_FOP_REMOVE, + .fid = fobj->f->fid, + .of = fobj->f[0], + }; + + code = TARRAY2_APPEND(rtner->fopArr, op); + TSDB_CHECK_CODE(code, lino, _exit); + _exit: if (code) { TSDB_ERROR_LOG(TD_VID(rtner->tsdb->pVnode), lino, code); diff --git a/source/dnode/vnode/src/vnd/vnodeCos.c b/source/dnode/vnode/src/vnd/vnodeCos.c index b28b7ad747..02021831bf 100644 --- a/source/dnode/vnode/src/vnd/vnodeCos.c +++ b/source/dnode/vnode/src/vnd/vnodeCos.c @@ -51,7 +51,8 @@ static void s3InitRequestOptions(cos_request_options_t *options, int is_cname) { options->ctl = cos_http_controller_create(options->pool, 0); } -void s3PutObjectFromFile(const char *file_str, const char *object_str) { +int32_t s3PutObjectFromFile(const char *file_str, const char *object_str) { + int32_t code = 0; cos_pool_t *p = NULL; int is_cname = 0; cos_status_t *s = NULL; @@ -76,6 +77,12 @@ void s3PutObjectFromFile(const char *file_str, const char *object_str) { log_status(s); cos_pool_destroy(p); + + if (s->code != 200) { + return code = s->code; + } + + return code; } void s3DeleteObjects(const char *object_name[], int nobject) { @@ -300,7 +307,7 @@ long s3Size(const char *object_name) { int32_t s3Init() { return 0; } void s3CleanUp() {} -void s3PutObjectFromFile(const char *file, const char *object) {} +int32_t s3PutObjectFromFile(const char *file, const char *object) { return 0; } void s3DeleteObjects(const char *object_name[], int nobject) {} bool s3Exists(const char *object_name) { return false; } bool s3Get(const char *object_name, const char *path) { return false; } From c4e5cfd2fbb0d2a85d141ee57d6e03b095c31061 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Tue, 15 Aug 2023 15:40:40 +0800 Subject: [PATCH 118/122] retention: return code from copy --- source/dnode/vnode/src/tsdb/tsdbRetention.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRetention.c b/source/dnode/vnode/src/tsdb/tsdbRetention.c index 46a5d19a1a..267e8b4117 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRetention.c +++ b/source/dnode/vnode/src/tsdb/tsdbRetention.c @@ -179,6 +179,16 @@ static int32_t tsdbMigrateDataFileS3(SRTNer *rtner, const STFileObj *fobj, const int32_t lino = 0; STFileOp op = {0}; + // remove old + op = (STFileOp){ + .optype = TSDB_FOP_REMOVE, + .fid = fobj->f->fid, + .of = fobj->f[0], + }; + + code = TARRAY2_APPEND(rtner->fopArr, op); + TSDB_CHECK_CODE(code, lino, _exit); + // create new op = (STFileOp){ .optype = TSDB_FOP_CREATE, @@ -204,16 +214,6 @@ static int32_t tsdbMigrateDataFileS3(SRTNer *rtner, const STFileObj *fobj, const code = tsdbCopyFileS3(rtner, fobj, &op.nf); TSDB_CHECK_CODE(code, lino, _exit); - // remove old - op = (STFileOp){ - .optype = TSDB_FOP_REMOVE, - .fid = fobj->f->fid, - .of = fobj->f[0], - }; - - code = TARRAY2_APPEND(rtner->fopArr, op); - TSDB_CHECK_CODE(code, lino, _exit); - _exit: if (code) { TSDB_ERROR_LOG(TD_VID(rtner->tsdb->pVnode), lino, code); From 84e472ad03b71aabd88670184d1de52c9b85dd3e Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Tue, 15 Aug 2023 16:10:54 +0800 Subject: [PATCH 119/122] enhance: tag cond col list only once and tag scan derive from scan --- include/libs/nodes/plannodes.h | 10 +---- source/libs/command/src/explain.c | 16 +++---- source/libs/executor/inc/executorInt.h | 7 +++ source/libs/executor/src/scanoperator.c | 36 +++++++-------- source/libs/nodes/src/nodesCloneFuncs.c | 9 +--- source/libs/nodes/src/nodesCodeFuncs.c | 48 ++------------------ source/libs/nodes/src/nodesMsgFuncs.c | 58 +++---------------------- 7 files changed, 41 insertions(+), 143 deletions(-) diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h index 3e24e417fc..4529520ace 100644 --- a/include/libs/nodes/plannodes.h +++ b/include/libs/nodes/plannodes.h @@ -336,15 +336,7 @@ typedef struct SScanPhysiNode { } SScanPhysiNode; typedef struct STagScanPhysiNode { - // SScanPhysiNode scan; //TODO? - SPhysiNode node; - SNodeList* pScanCols; - SNodeList* pScanPseudoCols; - uint64_t uid; // unique id of the table - uint64_t suid; - int8_t tableType; - SName tableName; - bool groupOrderScan; + SScanPhysiNode scan; bool onlyMetaCtbIdx; //no tbname, tag index not used. } STagScanPhysiNode; diff --git a/source/libs/command/src/explain.c b/source/libs/command/src/explain.c index e917de33dd..e167b31ef8 100644 --- a/source/libs/command/src/explain.c +++ b/source/libs/command/src/explain.c @@ -291,17 +291,17 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i switch (pNode->type) { case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN: { STagScanPhysiNode *pTagScanNode = (STagScanPhysiNode *)pNode; - EXPLAIN_ROW_NEW(level, EXPLAIN_TAG_SCAN_FORMAT, pTagScanNode->tableName.tname); + EXPLAIN_ROW_NEW(level, EXPLAIN_TAG_SCAN_FORMAT, pTagScanNode->scan.tableName.tname); EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT); if (pResNode->pExecInfo) { QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); } - if (pTagScanNode->pScanPseudoCols) { - EXPLAIN_ROW_APPEND(EXPLAIN_PSEUDO_COLUMNS_FORMAT, pTagScanNode->pScanPseudoCols->length); + if (pTagScanNode->scan.pScanPseudoCols) { + EXPLAIN_ROW_APPEND(EXPLAIN_PSEUDO_COLUMNS_FORMAT, pTagScanNode->scan.pScanPseudoCols->length); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); } - EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pTagScanNode->node.pOutputDataBlockDesc->totalRowSize); + EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pTagScanNode->scan.node.pOutputDataBlockDesc->totalRowSize); EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level)); @@ -309,11 +309,11 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i if (verbose) { EXPLAIN_ROW_NEW(level + 1, EXPLAIN_OUTPUT_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, - nodesGetOutputNumFromSlotList(pTagScanNode->node.pOutputDataBlockDesc->pSlots)); + nodesGetOutputNumFromSlotList(pTagScanNode->scan.node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); - EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pTagScanNode->node.pOutputDataBlockDesc->outputRowSize); - EXPLAIN_ROW_APPEND_LIMIT(pTagScanNode->node.pLimit); - EXPLAIN_ROW_APPEND_SLIMIT(pTagScanNode->node.pSlimit); + EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pTagScanNode->scan.node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pTagScanNode->scan.node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pTagScanNode->scan.node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h index cb066d809c..dad15dc6bc 100644 --- a/source/libs/executor/inc/executorInt.h +++ b/source/libs/executor/inc/executorInt.h @@ -251,6 +251,12 @@ typedef struct STableMergeScanInfo { SSortExecInfo sortExecInfo; } STableMergeScanInfo; +typedef struct STagScanFilterContext { + SHashObj* colHash; + int32_t index; + SArray* cInfoList; +} STagScanFilterContext; + typedef struct STagScanInfo { SColumnInfo* pCols; SSDataBlock* pRes; @@ -263,6 +269,7 @@ typedef struct STagScanInfo { void* pCtbCursor; SNode* pTagCond; SNode* pTagIndexCond; + STagScanFilterContext filterCtx; SArray* aUidTags; // SArray SArray* aFilterIdxs; // SArray SStorageAPI* pStorageAPI; diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 71352b1c6e..ef28875be4 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2719,12 +2719,6 @@ static int32_t tagScanCreateResultData(SDataType* pType, int32_t numOfRows, SSca return TSDB_CODE_SUCCESS; } -typedef struct STagScanFilterContext { - SHashObj* colHash; - int32_t index; - SArray* cInfoList; -} STagScanFilterContext; - static EDealRes tagScanRewriteTagColumn(SNode** pNode, void* pContext) { SColumnNode* pSColumnNode = NULL; if (QUERY_NODE_COLUMN == nodeType((*pNode))) { @@ -2767,17 +2761,11 @@ static EDealRes tagScanRewriteTagColumn(SNode** pNode, void* pContext) { } -static void tagScanFilterByTagCond(SArray* aUidTags, SNode* pTagCond, SArray* aFilterIdxs, void* pVnode, SStorageAPI* pAPI) { +static void tagScanFilterByTagCond(SArray* aUidTags, SNode* pTagCond, SArray* aFilterIdxs, void* pVnode, SStorageAPI* pAPI, STagScanInfo* pInfo) { int32_t code = 0; int32_t numOfTables = taosArrayGetSize(aUidTags); - STagScanFilterContext ctx = {0}; - ctx.colHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_SMALLINT), false, HASH_NO_LOCK); - ctx.cInfoList = taosArrayInit(4, sizeof(SColumnInfo)); - - nodesRewriteExprPostOrder(&pTagCond, tagScanRewriteTagColumn, (void*)&ctx); - - SSDataBlock* pResBlock = createTagValBlockForFilter(ctx.cInfoList, numOfTables, aUidTags, pVnode, pAPI); + SSDataBlock* pResBlock = createTagValBlockForFilter(pInfo->filterCtx.cInfoList, numOfTables, aUidTags, pVnode, pAPI); SArray* pBlockList = taosArrayInit(1, POINTER_BYTES); taosArrayPush(pBlockList, &pResBlock); @@ -2801,8 +2789,7 @@ static void tagScanFilterByTagCond(SArray* aUidTags, SNode* pTagCond, SArray* aF blockDataDestroy(pResBlock); taosArrayDestroy(pBlockList); - taosHashCleanup(ctx.colHash); - taosArrayDestroy(ctx.cInfoList); + } static void tagScanFillOneCellWithTag(const STUidTagInfo* pUidTagInfo, SExprInfo* pExprInfo, SColumnInfoData* pColInfo, int rowIndex, const SStorageAPI* pAPI, void* pVnode) { @@ -2911,7 +2898,7 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { bool ignoreFilterIdx = true; if (pInfo->pTagCond != NULL) { ignoreFilterIdx = false; - tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, aFilterIdxs, pInfo->readHandle.vnode, pAPI); + tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, aFilterIdxs, pInfo->readHandle.vnode, pAPI, pInfo); } else { ignoreFilterIdx = true; } @@ -2991,7 +2978,8 @@ static void destroyTagScanOperatorInfo(void* param) { if (pInfo->pCtbCursor != NULL) { pInfo->pStorageAPI->metaFn.closeCtbCursor(pInfo->pCtbCursor, 1); } - + taosHashCleanup(pInfo->filterCtx.colHash); + taosArrayDestroy(pInfo->filterCtx.cInfoList); taosArrayDestroy(pInfo->aFilterIdxs); taosArrayDestroyEx(pInfo->aUidTags, tagScanFreeUidTag); @@ -3001,8 +2989,9 @@ static void destroyTagScanOperatorInfo(void* param) { taosMemoryFreeClear(param); } -SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysiNode* pPhyNode, +SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysiNode* pTagScanNode, STableListInfo* pTableListInfo, SNode* pTagCond, SNode* pTagIndexCond, SExecTaskInfo* pTaskInfo) { + SScanPhysiNode* pPhyNode = (STagScanPhysiNode*)pTagScanNode; STagScanInfo* pInfo = taosMemoryCalloc(1, sizeof(STagScanInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { @@ -3040,11 +3029,16 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi initResultSizeInfo(&pOperator->resultInfo, 4096); blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity); - if (pPhyNode->onlyMetaCtbIdx) { + if (pTagScanNode->onlyMetaCtbIdx) { pInfo->aUidTags = taosArrayInit(pOperator->resultInfo.capacity, sizeof(STUidTagInfo)); pInfo->aFilterIdxs = taosArrayInit(pOperator->resultInfo.capacity, sizeof(int32_t)); + pInfo->filterCtx.colHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_SMALLINT), false, HASH_NO_LOCK); + pInfo->filterCtx.cInfoList = taosArrayInit(4, sizeof(SColumnInfo)); + if (pInfo->pTagCond != NULL) { + nodesRewriteExprPostOrder(&pTagCond, tagScanRewriteTagColumn, (void*)&pInfo->filterCtx); + } } - __optr_fn_t tagScanNextFn = (pPhyNode->onlyMetaCtbIdx) ? doTagScanFromCtbIdx : doTagScanFromMetaEntry; + __optr_fn_t tagScanNextFn = (pTagScanNode->onlyMetaCtbIdx) ? doTagScanFromCtbIdx : doTagScanFromMetaEntry; pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, tagScanNextFn, NULL, destroyTagScanOperatorInfo, optrDefaultBufFn, NULL); diff --git a/source/libs/nodes/src/nodesCloneFuncs.c b/source/libs/nodes/src/nodesCloneFuncs.c index 965af41fa7..d3cbaac5e1 100644 --- a/source/libs/nodes/src/nodesCloneFuncs.c +++ b/source/libs/nodes/src/nodesCloneFuncs.c @@ -564,14 +564,7 @@ static int32_t physiScanCopy(const SScanPhysiNode* pSrc, SScanPhysiNode* pDst) { } static int32_t physiTagScanCopy(const STagScanPhysiNode* pSrc, STagScanPhysiNode* pDst) { - COPY_BASE_OBJECT_FIELD(node, physiNodeCopy); - CLONE_NODE_LIST_FIELD(pScanCols); - CLONE_NODE_LIST_FIELD(pScanPseudoCols); - COPY_SCALAR_FIELD(uid); - COPY_SCALAR_FIELD(suid); - COPY_SCALAR_FIELD(tableType); - COPY_OBJECT_FIELD(tableName, sizeof(SName)); - COPY_SCALAR_FIELD(groupOrderScan); + COPY_BASE_OBJECT_FIELD(scan, physiScanCopy); COPY_SCALAR_FIELD(onlyMetaCtbIdx); return TSDB_CODE_SUCCESS; } diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index 4dfc55c0fa..64a4e0e7d3 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -1630,28 +1630,8 @@ static const char* jkTagScanPhysiOnlyMetaCtbIdx = "OnlyMetaCtbIdx"; static int32_t physiTagScanNodeToJson(const void* pObj, SJson* pJson) { const STagScanPhysiNode* pNode = (const STagScanPhysiNode*)pObj; - int32_t code = physicPlanNodeToJson(pObj, pJson); - if (TSDB_CODE_SUCCESS == code) { - code = nodeListToJson(pJson, jkScanPhysiPlanScanCols, pNode->pScanCols); - } - if (TSDB_CODE_SUCCESS == code) { - code = nodeListToJson(pJson, jkScanPhysiPlanScanPseudoCols, pNode->pScanPseudoCols); - } - if (TSDB_CODE_SUCCESS == code) { - code = tjsonAddIntegerToObject(pJson, jkScanPhysiPlanTableId, pNode->uid); - } - if (TSDB_CODE_SUCCESS == code) { - code = tjsonAddIntegerToObject(pJson, jkScanPhysiPlanSTableId, pNode->suid); - } - if (TSDB_CODE_SUCCESS == code) { - code = tjsonAddIntegerToObject(pJson, jkScanPhysiPlanTableType, pNode->tableType); - } - if (TSDB_CODE_SUCCESS == code) { - code = tjsonAddObject(pJson, jkScanPhysiPlanTableName, nameToJson, &pNode->tableName); - } - if (TSDB_CODE_SUCCESS == code) { - code = tjsonAddBoolToObject(pJson, jkScanPhysiPlanGroupOrderScan, pNode->groupOrderScan); - } + int32_t code = physiScanNodeToJson(pObj, pJson); + if (TSDB_CODE_SUCCESS == code) { code = tjsonAddBoolToObject(pJson, jkTagScanPhysiOnlyMetaCtbIdx, pNode->onlyMetaCtbIdx); } @@ -1661,28 +1641,8 @@ static int32_t physiTagScanNodeToJson(const void* pObj, SJson* pJson) { static int32_t jsonToPhysiTagScanNode(const SJson* pJson, void* pObj) { STagScanPhysiNode* pNode = (STagScanPhysiNode*)pObj; - int32_t code = jsonToPhysicPlanNode(pJson, pObj); - if (TSDB_CODE_SUCCESS == code) { - code = jsonToNodeList(pJson, jkScanPhysiPlanScanCols, &pNode->pScanCols); - } - if (TSDB_CODE_SUCCESS == code) { - code = jsonToNodeList(pJson, jkScanPhysiPlanScanPseudoCols, &pNode->pScanPseudoCols); - } - if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetUBigIntValue(pJson, jkScanPhysiPlanTableId, &pNode->uid); - } - if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetUBigIntValue(pJson, jkScanPhysiPlanSTableId, &pNode->suid); - } - if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetTinyIntValue(pJson, jkScanPhysiPlanTableType, &pNode->tableType); - } - if (TSDB_CODE_SUCCESS == code) { - code = tjsonToObject(pJson, jkScanPhysiPlanTableName, jsonToName, &pNode->tableName); - } - if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetBoolValue(pJson, jkScanPhysiPlanGroupOrderScan, &pNode->groupOrderScan); - } + int32_t code = jsonToPhysiScanNode(pObj, pJson); + if (TSDB_CODE_SUCCESS == code) { code = tjsonGetBoolValue(pJson, jkTagScanPhysiOnlyMetaCtbIdx, &pNode->onlyMetaCtbIdx); } diff --git a/source/libs/nodes/src/nodesMsgFuncs.c b/source/libs/nodes/src/nodesMsgFuncs.c index 4d1120861d..cade77fc17 100644 --- a/source/libs/nodes/src/nodesMsgFuncs.c +++ b/source/libs/nodes/src/nodesMsgFuncs.c @@ -2004,42 +2004,15 @@ static int32_t msgToPhysiScanNode(STlvDecoder* pDecoder, void* pObj) { } enum { - PHY_TAG_SCAN_CODE_BASE_NODE = 1, - PHY_TAG_SCAN_CODE_SCAN_COLS, - PHY_TAG_SCAN_CODE_SCAN_PSEUDO_COLS, - PHY_TAG_SCAN_CODE_BASE_UID, - PHY_TAG_SCAN_CODE_BASE_SUID, - PHY_TAG_SCAN_CODE_BASE_TABLE_TYPE, - PHY_TAG_SCAN_CODE_BASE_TABLE_NAME, - PHY_TAG_SCAN_CODE_BASE_GROUP_ORDER_SCAN, + PHY_TAG_SCAN_CODE_SCAN = 1, PHY_TAG_SCAN_CODE_ONLY_META_CTB_IDX }; static int32_t physiTagScanNodeToMsg(const void* pObj, STlvEncoder* pEncoder) { const STagScanPhysiNode* pNode = (const STagScanPhysiNode*)pObj; - int32_t code = tlvEncodeObj(pEncoder, PHY_TAG_SCAN_CODE_BASE_NODE, physiNodeToMsg, &pNode->node); - if (TSDB_CODE_SUCCESS == code) { - code = tlvEncodeObj(pEncoder, PHY_TAG_SCAN_CODE_SCAN_COLS, nodeListToMsg, pNode->pScanCols); - } - if (TSDB_CODE_SUCCESS == code) { - code = tlvEncodeObj(pEncoder, PHY_TAG_SCAN_CODE_SCAN_PSEUDO_COLS, nodeListToMsg, pNode->pScanPseudoCols); - } - if (TSDB_CODE_SUCCESS == code) { - code = tlvEncodeU64(pEncoder, PHY_TAG_SCAN_CODE_BASE_UID, pNode->uid); - } - if (TSDB_CODE_SUCCESS == code) { - code = tlvEncodeU64(pEncoder, PHY_TAG_SCAN_CODE_BASE_SUID, pNode->suid); - } - if (TSDB_CODE_SUCCESS == code) { - code = tlvEncodeI8(pEncoder, PHY_TAG_SCAN_CODE_BASE_TABLE_TYPE, pNode->tableType); - } - if (TSDB_CODE_SUCCESS == code) { - code = tlvEncodeObj(pEncoder, PHY_TAG_SCAN_CODE_BASE_TABLE_NAME, nameToMsg, &pNode->tableName); - } - if (TSDB_CODE_SUCCESS == code) { - code = tlvEncodeBool(pEncoder, PHY_TAG_SCAN_CODE_BASE_GROUP_ORDER_SCAN, pNode->groupOrderScan); - } + int32_t code = tlvEncodeObj(pEncoder, PHY_TAG_SCAN_CODE_SCAN, physiScanNodeToMsg, &pNode->scan); + if (TSDB_CODE_SUCCESS == code) { code = tlvEncodeBool(pEncoder, PHY_TAG_SCAN_CODE_ONLY_META_CTB_IDX, pNode->onlyMetaCtbIdx); } @@ -2053,29 +2026,8 @@ static int32_t msgToPhysiTagScanNode(STlvDecoder* pDecoder, void* pObj) { STlv* pTlv = NULL; tlvForEach(pDecoder, pTlv, code) { switch (pTlv->type) { - case PHY_TAG_SCAN_CODE_BASE_NODE: - code = tlvDecodeObjFromTlv(pTlv, msgToPhysiNode, &pNode->node); - break; - case PHY_TAG_SCAN_CODE_SCAN_COLS: - code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pScanCols); - break; - case PHY_TAG_SCAN_CODE_SCAN_PSEUDO_COLS: - code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pScanPseudoCols); - break; - case PHY_TAG_SCAN_CODE_BASE_UID: - code = tlvDecodeU64(pTlv, &pNode->uid); - break; - case PHY_TAG_SCAN_CODE_BASE_SUID: - code = tlvDecodeU64(pTlv, &pNode->suid); - break; - case PHY_TAG_SCAN_CODE_BASE_TABLE_TYPE: - code = tlvDecodeI8(pTlv, &pNode->tableType); - break; - case PHY_TAG_SCAN_CODE_BASE_TABLE_NAME: - code = tlvDecodeObjFromTlv(pTlv, msgToName, &pNode->tableName); - break; - case PHY_TAG_SCAN_CODE_BASE_GROUP_ORDER_SCAN: - code = tlvDecodeBool(pTlv, &pNode->groupOrderScan); + case PHY_TAG_SCAN_CODE_SCAN: + code = tlvDecodeObjFromTlv(pTlv, msgToPhysiScanNode, &pNode->scan); break; case PHY_TAG_SCAN_CODE_ONLY_META_CTB_IDX: code = tlvDecodeBool(pTlv, &pNode->onlyMetaCtbIdx); From 1792bf306e9be53779425a59b60af7ef43242d96 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Tue, 15 Aug 2023 16:54:26 +0800 Subject: [PATCH 120/122] vnode: fix cos cache evicting --- source/dnode/vnode/src/vnd/vnodeCos.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/source/dnode/vnode/src/vnd/vnodeCos.c b/source/dnode/vnode/src/vnd/vnodeCos.c index 02021831bf..4c76538eb2 100644 --- a/source/dnode/vnode/src/vnd/vnodeCos.c +++ b/source/dnode/vnode/src/vnd/vnodeCos.c @@ -212,19 +212,20 @@ static int32_t evictFileCompareAsce(const void *pLeft, const void *pRight) { void s3EvictCache(const char *path, long object_size) { SDiskSize disk_size = {0}; - if (taosGetDiskSize((char *)path, &disk_size) < 0) { + char dir_name[TSDB_FILENAME_LEN] = "\0"; + + tstrncpy(dir_name, path, TSDB_FILENAME_LEN); + taosDirName(dir_name); + + if (taosGetDiskSize((char *)dir_name, &disk_size) < 0) { terrno = TAOS_SYSTEM_ERROR(errno); vError("failed to get disk:%s size since %s", path, terrstr()); return; } - if (object_size >= disk_size.avail + 1 << 30) { + if (object_size >= disk_size.avail - (1 << 30)) { // evict too old files // 1, list data files' atime under dir(path) - char dir_name[TSDB_FILENAME_LEN] = "\0"; - tstrncpy(dir_name, path, TSDB_FILENAME_LEN); - taosDirName(dir_name); - tdbDirPtr pDir = taosOpenDir(dir_name); if (pDir == NULL) { terrno = TAOS_SYSTEM_ERROR(errno); @@ -236,9 +237,14 @@ void s3EvictCache(const char *path, long object_size) { char *name = taosGetDirEntryName(pDirEntry); if (!strncmp(name + strlen(name) - 5, ".data", 5)) { SEvictFile e_file = {0}; + char entry_name[TSDB_FILENAME_LEN] = "\0"; + int dir_len = strlen(dir_name); - tstrncpy(e_file.name, name, TSDB_FILENAME_LEN); - taosStatFile(name, &e_file.size, NULL, &e_file.atime); + memcpy(e_file.name, dir_name, dir_len); + e_file.name[dir_len] = '/'; + memcpy(e_file.name + dir_len + 1, name, strlen(name)); + + taosStatFile(e_file.name, &e_file.size, NULL, &e_file.atime); taosArrayPush(evict_files, &e_file); } From 450d7e2d3c6cdebf2dc6fbf2a279666e45efd576 Mon Sep 17 00:00:00 2001 From: slzhou Date: Tue, 15 Aug 2023 16:56:15 +0800 Subject: [PATCH 121/122] enhance: compilation error --- source/libs/nodes/src/nodesCodeFuncs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index 64a4e0e7d3..48c9bf33dd 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -1641,7 +1641,7 @@ static int32_t physiTagScanNodeToJson(const void* pObj, SJson* pJson) { static int32_t jsonToPhysiTagScanNode(const SJson* pJson, void* pObj) { STagScanPhysiNode* pNode = (STagScanPhysiNode*)pObj; - int32_t code = jsonToPhysiScanNode(pObj, pJson); + int32_t code = jsonToPhysiScanNode(pJson, pObj); if (TSDB_CODE_SUCCESS == code) { code = tjsonGetBoolValue(pJson, jkTagScanPhysiOnlyMetaCtbIdx, &pNode->onlyMetaCtbIdx); From 49e4b11547c03cfd7ed5dc993d2fa6e42bab9a8b Mon Sep 17 00:00:00 2001 From: slzhou Date: Tue, 15 Aug 2023 17:00:53 +0800 Subject: [PATCH 122/122] fix: fix compilation error --- source/libs/executor/src/scanoperator.c | 2 +- source/libs/nodes/test/nodesCloneTest.cpp | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index ef28875be4..d7d97cc514 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2991,7 +2991,7 @@ static void destroyTagScanOperatorInfo(void* param) { SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysiNode* pTagScanNode, STableListInfo* pTableListInfo, SNode* pTagCond, SNode* pTagIndexCond, SExecTaskInfo* pTaskInfo) { - SScanPhysiNode* pPhyNode = (STagScanPhysiNode*)pTagScanNode; + SScanPhysiNode* pPhyNode = (SScanPhysiNode*)pTagScanNode; STagScanInfo* pInfo = taosMemoryCalloc(1, sizeof(STagScanInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { diff --git a/source/libs/nodes/test/nodesCloneTest.cpp b/source/libs/nodes/test/nodesCloneTest.cpp index e1e99abab3..8b8893d317 100644 --- a/source/libs/nodes/test/nodesCloneTest.cpp +++ b/source/libs/nodes/test/nodesCloneTest.cpp @@ -199,9 +199,10 @@ TEST_F(NodesCloneTest, physiScan) { ASSERT_EQ(nodeType(pSrc), nodeType(pDst)); STagScanPhysiNode* pSrcNode = (STagScanPhysiNode*)pSrc; STagScanPhysiNode* pDstNode = (STagScanPhysiNode*)pDst; - ASSERT_EQ(pSrcNode->uid, pDstNode->uid); - ASSERT_EQ(pSrcNode->suid, pDstNode->suid); - ASSERT_EQ(pSrcNode->tableType, pDstNode->tableType); + ASSERT_EQ(pSrcNode->scan.uid, pDstNode->scan.uid); + ASSERT_EQ(pSrcNode->scan.suid, pDstNode->scan.suid); + ASSERT_EQ(pSrcNode->scan.tableType, pDstNode->scan.tableType); + ASSERT_EQ(pSrcNode->onlyMetaCtbIdx, pDstNode->onlyMetaCtbIdx); }); std::unique_ptr srcNode(nullptr, nodesDestroyNode);