From 0d73ed62b2fd4dabc64a75a7a87fc0b391af8a9f Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Tue, 23 Aug 2022 17:05:19 +0800 Subject: [PATCH 01/79] fix(query): change mnd redo action sleep TD-18483 --- source/dnode/mnode/impl/src/mndTrans.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/mnode/impl/src/mndTrans.c b/source/dnode/mnode/impl/src/mndTrans.c index 17b4336465..06a95cfc93 100644 --- a/source/dnode/mnode/impl/src/mndTrans.c +++ b/source/dnode/mnode/impl/src/mndTrans.c @@ -1312,7 +1312,7 @@ static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans) { if (pTrans->failedTimes < 6) { mError("trans:%d, stage keep on redoAction since action:%d code:0x%x not 0x%x, failedTimes:%d", pTrans->id, pTrans->lastAction, pTrans->code, pAction->retryCode, pTrans->failedTimes); - taosMsleep(1000); + taosMsleep(100); continueExec = true; return true; } From 2944ca63af8883d8ac701bb056f2222d710c483f Mon Sep 17 00:00:00 2001 From: afwerar <1296468573@qq.com> Date: Tue, 23 Aug 2022 18:07:17 +0800 Subject: [PATCH 02/79] build: cmake warning error --- cmake/cmake.define | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cmake/cmake.define b/cmake/cmake.define index 989b69a89b..f38fac17a1 100644 --- a/cmake/cmake.define +++ b/cmake/cmake.define @@ -2,8 +2,6 @@ cmake_minimum_required(VERSION 3.0) set(CMAKE_VERBOSE_MAKEFILE OFF) -SET(BUILD_SHARED_LIBS "OFF") - #set output directory SET(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/build/lib) SET(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR}/build/bin) @@ -103,6 +101,9 @@ IF (TD_WINDOWS) SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMMON_FLAGS}") ELSE () + IF (${TD_DARWIN}) + set(CMAKE_MACOSX_RPATH 0) + ELSE () IF (${COVER} MATCHES "true") MESSAGE(STATUS "Test coverage mode, add extra flags") SET(GCC_COVERAGE_COMPILE_FLAGS "-fprofile-arcs -ftest-coverage") From 0ebd3281c504013a92132e276fb85efd4cddf621 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Tue, 23 Aug 2022 18:26:27 +0800 Subject: [PATCH 03/79] enh: update table meta after creating table --- include/common/tmsg.h | 48 +++++++++----- source/client/inc/clientInt.h | 5 +- source/client/src/clientImpl.c | 35 ++++++---- source/client/src/clientMsgHandler.c | 27 +++++++- source/common/src/tmsg.c | 87 +++++++++++++++++++++++-- source/dnode/mnode/impl/inc/mndStb.h | 1 + source/dnode/mnode/impl/src/mndStb.c | 61 +++++++++++++++++ source/dnode/mnode/impl/src/mndTrans.c | 25 ++++--- source/dnode/vnode/src/inc/vnodeInt.h | 2 +- source/dnode/vnode/src/meta/metaTable.c | 17 ++++- source/dnode/vnode/src/vnd/vnodeSvr.c | 11 +++- source/libs/qcom/src/queryUtil.c | 5 ++ source/libs/scheduler/src/schRemote.c | 15 ++++- 13 files changed, 285 insertions(+), 54 deletions(-) diff --git a/include/common/tmsg.h b/include/common/tmsg.h index 8f199c72f7..ae6f034df5 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -441,6 +441,25 @@ static FORCE_INLINE int32_t tDecodeSSchemaWrapperEx(SDecoder* pDecoder, SSchemaW STSchema* tdGetSTSChemaFromSSChema(SSchema* pSchema, int32_t nCols, int32_t sver); + +typedef struct { + char tbName[TSDB_TABLE_NAME_LEN]; + char stbName[TSDB_TABLE_NAME_LEN]; + char dbFName[TSDB_DB_FNAME_LEN]; + int64_t dbId; + int32_t numOfTags; + int32_t numOfColumns; + int8_t precision; + int8_t tableType; + int32_t sversion; + int32_t tversion; + uint64_t suid; + uint64_t tuid; + int32_t vgId; + SSchema* pSchemas; +} STableMetaRsp; + + typedef struct { char name[TSDB_TABLE_FNAME_LEN]; int8_t igExists; @@ -472,6 +491,14 @@ int32_t tSerializeSMCreateStbReq(void* buf, int32_t bufLen, SMCreateStbReq* pReq int32_t tDeserializeSMCreateStbReq(void* buf, int32_t bufLen, SMCreateStbReq* pReq); void tFreeSMCreateStbReq(SMCreateStbReq* pReq); +typedef struct { + STableMetaRsp* pMeta; +} SMCreateStbRsp; + +int32_t tEncodeSMCreateStbRsp(SEncoder* pEncoder, const SMCreateStbRsp* pRsp); +int32_t tDecodeSMCreateStbRsp(SDecoder* pDecoder, SMCreateStbRsp* pRsp); +void tFreeSMCreateStbRsp(SMCreateStbRsp* pRsp); + typedef struct { char name[TSDB_TABLE_FNAME_LEN]; int8_t igNotExists; @@ -1239,23 +1266,6 @@ typedef struct { SVgroupInfo vgroups[]; } SVgroupsInfo; -typedef struct { - char tbName[TSDB_TABLE_NAME_LEN]; - char stbName[TSDB_TABLE_NAME_LEN]; - char dbFName[TSDB_DB_FNAME_LEN]; - int64_t dbId; - int32_t numOfTags; - int32_t numOfColumns; - int8_t precision; - int8_t tableType; - int32_t sversion; - int32_t tversion; - uint64_t suid; - uint64_t tuid; - int32_t vgId; - SSchema* pSchemas; -} STableMetaRsp; - typedef struct { STableMetaRsp* pMeta; } SMAlterStbRsp; @@ -2028,11 +2038,13 @@ int tEncodeSVCreateTbBatchReq(SEncoder* pCoder, const SVCreateTbBatchReq* pReq); int tDecodeSVCreateTbBatchReq(SDecoder* pCoder, SVCreateTbBatchReq* pReq); typedef struct { - int32_t code; + int32_t code; + STableMetaRsp* pMeta; } SVCreateTbRsp, SVUpdateTbRsp; int tEncodeSVCreateTbRsp(SEncoder* pCoder, const SVCreateTbRsp* pRsp); int tDecodeSVCreateTbRsp(SDecoder* pCoder, SVCreateTbRsp* pRsp); +void tFreeSVCreateTbRsp(void* param); int32_t tSerializeSVCreateTbReq(void** buf, SVCreateTbReq* pReq); void* tDeserializeSVCreateTbReq(void* buf, SVCreateTbReq* pReq); diff --git a/source/client/inc/clientInt.h b/source/client/inc/clientInt.h index f275ae0885..de91703f82 100644 --- a/source/client/inc/clientInt.h +++ b/source/client/inc/clientInt.h @@ -363,8 +363,9 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* int32_t refreshMeta(STscObj* pTscObj, SRequestObj* pRequest); int32_t updateQnodeList(SAppInstInfo* pInfo, SArray* pNodeList); void doAsyncQuery(SRequestObj* pRequest, bool forceUpdateMeta); -int32_t removeMeta(STscObj* pTscObj, SArray* tbList); // todo move to clientImpl.c and become a static function -int32_t handleAlterTbExecRes(void* res, struct SCatalog* pCatalog); // todo move to xxx +int32_t removeMeta(STscObj* pTscObj, SArray* tbList); +int32_t handleAlterTbExecRes(void* res, struct SCatalog* pCatalog); +int32_t handleCreateTbExecRes(void* res, SCatalog* pCatalog); bool qnodeRequired(SRequestObj* pRequest); #ifdef __cplusplus diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 5f0af55d13..df3e82a05e 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -780,6 +780,10 @@ int32_t handleAlterTbExecRes(void* res, SCatalog* pCatalog) { return catalogUpdateTableMeta(pCatalog, (STableMetaRsp*)res); } +int32_t handleCreateTbExecRes(void* res, SCatalog* pCatalog) { + return catalogUpdateTableMeta(pCatalog, (STableMetaRsp*)res); +} + int32_t handleQueryExecRsp(SRequestObj* pRequest) { if (NULL == pRequest->body.resInfo.execRes.res) { return TSDB_CODE_SUCCESS; @@ -802,6 +806,19 @@ int32_t handleQueryExecRsp(SRequestObj* pRequest) { code = handleAlterTbExecRes(pRes->res, pCatalog); break; } + case TDMT_VND_CREATE_TABLE: { + SArray* pList = (SArray*)pRes->res; + int32_t num = taosArrayGetSize(pList); + for (int32_t i = 0; i < num; ++i) { + void* res = taosArrayGetP(pList, i); + code = handleCreateTbExecRes(res, pCatalog); + } + break; + } + case TDMT_MND_CREATE_STB: { + code = handleCreateTbExecRes(pRes->res, pCatalog); + break; + } case TDMT_VND_SUBMIT: { atomic_add_fetch_64((int64_t*)&pAppInfo->summary.insertBytes, pRes->numOfBytes); @@ -859,17 +876,13 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) { return; } - if (code == TSDB_CODE_SUCCESS) { - code = handleQueryExecRsp(pRequest); - ASSERT(pRequest->code == TSDB_CODE_SUCCESS); - pRequest->code = code; - } - tscDebug("schedulerExecCb request type %s", TMSG_INFO(pRequest->type)); - if (NEED_CLIENT_RM_TBLMETA_REQ(pRequest->type)) { + if (NEED_CLIENT_RM_TBLMETA_REQ(pRequest->type) && NULL == pRequest->body.resInfo.execRes.res) { removeMeta(pTscObj, pRequest->targetTableList); } + handleQueryExecRsp(pRequest); + // return to client pRequest->body.queryFp(pRequest->body.param, pRequest, code); } @@ -930,6 +943,10 @@ SRequestObj* launchQueryImpl(SRequestObj* pRequest, SQuery* pQuery, bool keepQue qDestroyQuery(pQuery); } + if (NEED_CLIENT_RM_TBLMETA_REQ(pRequest->type) && NULL == pRequest->body.resInfo.execRes.res) { + removeMeta(pRequest->pTscObj, pRequest->targetTableList); + } + handleQueryExecRsp(pRequest); if (NULL != pRequest && TSDB_CODE_SUCCESS != code) { @@ -1127,10 +1144,6 @@ SRequestObj* execQuery(uint64_t connId, const char* sql, int sqlLen, bool valida inRetry = true; } while (retryNum++ < REQUEST_TOTAL_EXEC_TIMES); - if (NEED_CLIENT_RM_TBLMETA_REQ(pRequest->type)) { - removeMeta(pRequest->pTscObj, pRequest->targetTableList); - } - return pRequest; } diff --git a/source/client/src/clientMsgHandler.c b/source/client/src/clientMsgHandler.c index 0c4cf23c4e..68aeb68ee0 100644 --- a/source/client/src/clientMsgHandler.c +++ b/source/client/src/clientMsgHandler.c @@ -232,13 +232,36 @@ int32_t processCreateSTableRsp(void* param, SDataBuf* pMsg, int32_t code) { assert(pMsg != NULL && param != NULL); SRequestObj* pRequest = param; - taosMemoryFree(pMsg->pData); if (code != TSDB_CODE_SUCCESS) { setErrno(pRequest, code); + } else { + SMCreateStbRsp createRsp = {0}; + SDecoder coder = {0}; + tDecoderInit(&coder, pMsg->pData, pMsg->len); + tDecodeSMCreateStbRsp(&coder, &createRsp); + tDecoderClear(&coder); + + pRequest->body.resInfo.execRes.msgType = TDMT_MND_CREATE_STB; + pRequest->body.resInfo.execRes.res = createRsp.pMeta; } + taosMemoryFree(pMsg->pData); + if (pRequest->body.queryFp != NULL) { - removeMeta(pRequest->pTscObj, pRequest->tableList); + SExecResult* pRes = &pRequest->body.resInfo.execRes; + + if (code == TSDB_CODE_SUCCESS) { + SCatalog* pCatalog = NULL; + int32_t ret = catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog); + if (pRes->res != NULL) { + ret = handleCreateTbExecRes(pRes->res, pCatalog); + } + + if (ret != TSDB_CODE_SUCCESS) { + code = ret; + } + } + pRequest->body.queryFp(pRequest->body.param, pRequest, code); } else { tsem_post(&pRequest->body.rspSem); diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index 533d924546..3ceb9ca192 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -3196,12 +3196,16 @@ static int32_t tDecodeSTableMetaRsp(SDecoder *pDecoder, STableMetaRsp *pRsp) { if (tDecodeI32(pDecoder, &pRsp->vgId) < 0) return -1; int32_t totalCols = pRsp->numOfTags + pRsp->numOfColumns; - pRsp->pSchemas = taosMemoryMalloc(sizeof(SSchema) * totalCols); - if (pRsp->pSchemas == NULL) return -1; + if (totalCols > 0) { + pRsp->pSchemas = taosMemoryMalloc(sizeof(SSchema) * totalCols); + if (pRsp->pSchemas == NULL) return -1; - for (int32_t i = 0; i < totalCols; ++i) { - SSchema *pSchema = &pRsp->pSchemas[i]; - if (tDecodeSSchema(pDecoder, pSchema) < 0) return -1; + for (int32_t i = 0; i < totalCols; ++i) { + SSchema *pSchema = &pRsp->pSchemas[i]; + if (tDecodeSSchema(pDecoder, pSchema) < 0) return -1; + } + } else { + pRsp->pSchemas = NULL; } return 0; @@ -5090,6 +5094,10 @@ int tEncodeSVCreateTbRsp(SEncoder *pCoder, const SVCreateTbRsp *pRsp) { if (tStartEncode(pCoder) < 0) return -1; if (tEncodeI32(pCoder, pRsp->code) < 0) return -1; + if (tEncodeI32(pCoder, pRsp->pMeta ? 1 : 0) < 0) return -1; + if (pRsp->pMeta) { + if (tEncodeSTableMetaRsp(pCoder, pRsp->pMeta) < 0) return -1; + } tEndEncode(pCoder); return 0; @@ -5100,10 +5108,25 @@ int tDecodeSVCreateTbRsp(SDecoder *pCoder, SVCreateTbRsp *pRsp) { if (tDecodeI32(pCoder, &pRsp->code) < 0) return -1; + int32_t meta = 0; + if (tDecodeI32(pCoder, &meta) < 0) return -1; + if (meta) { + pRsp->pMeta = taosMemoryCalloc(1, sizeof(STableMetaRsp)); + if (NULL == pRsp->pMeta) return -1; + if (tDecodeSTableMetaRsp(pCoder, pRsp->pMeta) < 0) return -1; + } else { + pRsp->pMeta = NULL; + } + tEndDecode(pCoder); return 0; } +void tFreeSVCreateTbRsp(void* param) { + SVCreateTbRsp* pRsp = (SVCreateTbRsp*)param; + taosMemoryFree(pRsp->pMeta); +} + // TDMT_VND_DROP_TABLE ================= static int32_t tEncodeSVDropTbReq(SEncoder *pCoder, const SVDropTbReq *pReq) { if (tStartEncode(pCoder) < 0) return -1; @@ -5558,6 +5581,60 @@ void tFreeSMAlterStbRsp(SMAlterStbRsp *pRsp) { } } + +int32_t tEncodeSMCreateStbRsp(SEncoder *pEncoder, const SMCreateStbRsp *pRsp) { + if (tStartEncode(pEncoder) < 0) return -1; + if (tEncodeI32(pEncoder, pRsp->pMeta->pSchemas ? 1 : 0) < 0) return -1; + if (pRsp->pMeta->pSchemas) { + if (tEncodeSTableMetaRsp(pEncoder, pRsp->pMeta) < 0) return -1; + } + tEndEncode(pEncoder); + return 0; +} + +int32_t tDecodeSMCreateStbRsp(SDecoder *pDecoder, SMCreateStbRsp *pRsp) { + int32_t meta = 0; + if (tStartDecode(pDecoder) < 0) return -1; + if (tDecodeI32(pDecoder, &meta) < 0) return -1; + if (meta) { + pRsp->pMeta = taosMemoryCalloc(1, sizeof(STableMetaRsp)); + if (NULL == pRsp->pMeta) return -1; + if (tDecodeSTableMetaRsp(pDecoder, pRsp->pMeta) < 0) return -1; + } + tEndDecode(pDecoder); + return 0; +} + +int32_t tDeserializeSMCreateStbRsp(void *buf, int32_t bufLen, SMCreateStbRsp *pRsp) { + int32_t meta = 0; + SDecoder decoder = {0}; + tDecoderInit(&decoder, buf, bufLen); + + if (tStartDecode(&decoder) < 0) return -1; + if (tDecodeI32(&decoder, &meta) < 0) return -1; + if (meta) { + pRsp->pMeta = taosMemoryCalloc(1, sizeof(STableMetaRsp)); + if (NULL == pRsp->pMeta) return -1; + if (tDecodeSTableMetaRsp(&decoder, pRsp->pMeta) < 0) return -1; + } + tEndDecode(&decoder); + tDecoderClear(&decoder); + return 0; +} + +void tFreeSMCreateStbRsp(SMCreateStbRsp *pRsp) { + if (NULL == pRsp) { + return; + } + + if (pRsp->pMeta) { + taosMemoryFree(pRsp->pMeta->pSchemas); + taosMemoryFree(pRsp->pMeta); + } +} + + + int32_t tEncodeSTqOffsetVal(SEncoder *pEncoder, const STqOffsetVal *pOffsetVal) { if (tEncodeI8(pEncoder, pOffsetVal->type) < 0) return -1; if (pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_DATA) { diff --git a/source/dnode/mnode/impl/inc/mndStb.h b/source/dnode/mnode/impl/inc/mndStb.h index 010199a89f..8f0d55e100 100644 --- a/source/dnode/mnode/impl/inc/mndStb.h +++ b/source/dnode/mnode/impl/inc/mndStb.h @@ -35,6 +35,7 @@ SDbObj *mndAcquireDbByStb(SMnode *pMnode, const char *stbName); int32_t mndBuildStbFromReq(SMnode *pMnode, SStbObj *pDst, SMCreateStbReq *pCreate, SDbObj *pDb); int32_t mndAddStbToTrans(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SStbObj *pStb); void mndFreeStb(SStbObj *pStb); +int32_t mndBuildSMCreateStbRsp(SMnode *pMnode, char* dbFName, char* stbFName, void **pCont, int32_t *pLen); void mndExtractDbNameFromStbFullName(const char *stbFullName, char *dst); void mndExtractTbNameFromStbFullName(const char *stbFullName, char *dst, int32_t dstSize); diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c index ebec3d5ea6..b29271e455 100644 --- a/source/dnode/mnode/impl/src/mndStb.c +++ b/source/dnode/mnode/impl/src/mndStb.c @@ -1774,6 +1774,67 @@ static int32_t mndBuildSMAlterStbRsp(SDbObj *pDb, SStbObj *pObj, void **pCont, i return 0; } +int32_t mndBuildSMCreateStbRsp(SMnode *pMnode, char* dbFName, char* stbFName, void **pCont, int32_t *pLen) { + int32_t ret = -1; + SDbObj *pDb = mndAcquireDb(pMnode, dbFName); + if (NULL == pDb) { + return -1; + } + + SStbObj *pObj = mndAcquireStb(pMnode, stbFName); + if (NULL == pObj) { + goto _OVER; + } + + SEncoder ec = {0}; + uint32_t contLen = 0; + SMCreateStbRsp stbRsp = {0}; + SName name = {0}; + tNameFromString(&name, pObj->name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); + + stbRsp.pMeta = taosMemoryCalloc(1, sizeof(STableMetaRsp)); + if (NULL == stbRsp.pMeta) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + goto _OVER; + } + + ret = mndBuildStbSchemaImp(pDb, pObj, name.tname, stbRsp.pMeta); + if (ret) { + tFreeSMCreateStbRsp(&stbRsp); + goto _OVER; + } + + tEncodeSize(tEncodeSMCreateStbRsp, &stbRsp, contLen, ret); + if (ret) { + tFreeSMCreateStbRsp(&stbRsp); + goto _OVER; + } + + void *cont = taosMemoryMalloc(contLen); + tEncoderInit(&ec, cont, contLen); + tEncodeSMCreateStbRsp(&ec, &stbRsp); + tEncoderClear(&ec); + + tFreeSMCreateStbRsp(&stbRsp); + + *pCont = cont; + *pLen = contLen; + + ret = 0; + +_OVER: + if (pObj) { + mndReleaseStb(pMnode, pObj); + } + + if (pDb) { + mndReleaseDb(pMnode, pDb); + } + + return ret; +} + + static int32_t mndAlterStbImp(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SStbObj *pStb, bool needRsp, void *alterOriData, int32_t alterOriDataLen) { int32_t code = -1; diff --git a/source/dnode/mnode/impl/src/mndTrans.c b/source/dnode/mnode/impl/src/mndTrans.c index 17b4336465..e610fa6d27 100644 --- a/source/dnode/mnode/impl/src/mndTrans.c +++ b/source/dnode/mnode/impl/src/mndTrans.c @@ -17,6 +17,7 @@ #include "mndTrans.h" #include "mndConsumer.h" #include "mndDb.h" +#include "mndStb.h" #include "mndPrivilege.h" #include "mndShow.h" #include "mndSync.h" @@ -900,15 +901,6 @@ static void mndTransSendRpcRsp(SMnode *pMnode, STrans *pTrans) { } SRpcMsg rspMsg = {.code = code, .info = *pInfo}; - if (pTrans->rpcRspLen != 0) { - void *rpcCont = rpcMallocCont(pTrans->rpcRspLen); - if (rpcCont != NULL) { - memcpy(rpcCont, pTrans->rpcRsp, pTrans->rpcRspLen); - rspMsg.pCont = rpcCont; - rspMsg.contLen = pTrans->rpcRspLen; - } - } - if (pTrans->originRpcType == TDMT_MND_CREATE_DB) { mDebug("trans:%d, origin msgtype:%s", pTrans->id, TMSG_INFO(pTrans->originRpcType)); SDbObj *pDb = mndAcquireDb(pMnode, pTrans->dbname1); @@ -924,6 +916,21 @@ static void mndTransSendRpcRsp(SMnode *pMnode, STrans *pTrans) { } } mndReleaseDb(pMnode, pDb); + } else if (pTrans->originRpcType == TDMT_MND_CREATE_STB) { + void *pCont = NULL; + int32_t contLen = 0; + if (0 == mndBuildSMCreateStbRsp(pMnode, pTrans->dbname1, pTrans->dbname2, &pCont, &contLen) != 0) { + mndTransSetRpcRsp(pTrans, pCont, contLen); + } + } + + if (pTrans->rpcRspLen != 0) { + void *rpcCont = rpcMallocCont(pTrans->rpcRspLen); + if (rpcCont != NULL) { + memcpy(rpcCont, pTrans->rpcRsp, pTrans->rpcRspLen); + rspMsg.pCont = rpcCont; + rspMsg.contLen = pTrans->rpcRspLen; + } } tmsgSendRsp(&rspMsg); diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h index 39c5f3873e..927c314a4c 100644 --- a/source/dnode/vnode/src/inc/vnodeInt.h +++ b/source/dnode/vnode/src/inc/vnodeInt.h @@ -102,7 +102,7 @@ int metaCommit(SMeta* pMeta); int metaCreateSTable(SMeta* pMeta, int64_t version, SVCreateStbReq* pReq); int metaAlterSTable(SMeta* pMeta, int64_t version, SVCreateStbReq* pReq); int metaDropSTable(SMeta* pMeta, int64_t verison, SVDropStbReq* pReq, SArray* tbUidList); -int metaCreateTable(SMeta* pMeta, int64_t version, SVCreateTbReq* pReq); +int metaCreateTable(SMeta* pMeta, int64_t version, SVCreateTbReq* pReq, STableMetaRsp **pMetaRsp); int metaDropTable(SMeta* pMeta, int64_t version, SVDropTbReq* pReq, SArray* tbUids); int metaTtlDropTable(SMeta* pMeta, int64_t ttl, SArray* tbUids); int metaAlterTable(SMeta* pMeta, int64_t version, SVAlterTbReq* pReq, STableMetaRsp* pMetaRsp); diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c index aa107ab253..6b18e1b48d 100644 --- a/source/dnode/vnode/src/meta/metaTable.c +++ b/source/dnode/vnode/src/meta/metaTable.c @@ -367,7 +367,7 @@ int metaAlterSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) { return 0; } -int metaCreateTable(SMeta *pMeta, int64_t version, SVCreateTbReq *pReq) { +int metaCreateTable(SMeta *pMeta, int64_t version, SVCreateTbReq *pReq, STableMetaRsp **pMetaRsp) { SMetaEntry me = {0}; SMetaReader mr = {0}; @@ -427,6 +427,21 @@ int metaCreateTable(SMeta *pMeta, int64_t version, SVCreateTbReq *pReq) { if (metaHandleEntry(pMeta, &me) < 0) goto _err; + if (pMetaRsp) { + *pMetaRsp = taosMemoryCalloc(1, sizeof(STableMetaRsp)); + + if (*pMetaRsp) { + if (me.type == TSDB_CHILD_TABLE) { + (*pMetaRsp)->tableType = TSDB_CHILD_TABLE; + (*pMetaRsp)->tuid = pReq->uid; + (*pMetaRsp)->suid = pReq->ctb.suid; + strcpy((*pMetaRsp)->tbName, pReq->name); + } else { + metaUpdateMetaRsp(pReq->uid, pReq->name, pReq->ntb.schemaRow, *pMetaRsp); + } + } + } + metaDebug("vgId:%d, table:%s uid %" PRId64 " is created, type:%" PRId8, TD_VID(pMeta->pVnode), pReq->name, pReq->uid, pReq->type); return 0; diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index 7a8d168f4f..947fb845cd 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -370,6 +370,10 @@ void smaHandleRes(void *pVnode, int64_t smaId, const SArray *data) { } void vnodeUpdateMetaRsp(SVnode *pVnode, STableMetaRsp *pMetaRsp) { + if (NULL == pMetaRsp) { + return; + } + strcpy(pMetaRsp->dbFName, pVnode->config.dbname); pMetaRsp->dbId = pVnode->config.dbId; pMetaRsp->vgId = TD_VID(pVnode); @@ -514,7 +518,7 @@ static int32_t vnodeProcessCreateTbReq(SVnode *pVnode, int64_t version, void *pR } // do create table - if (metaCreateTable(pVnode->pMeta, version, pCreateReq) < 0) { + if (metaCreateTable(pVnode->pMeta, version, pCreateReq, &cRsp.pMeta) < 0) { if (pCreateReq->flags & TD_CREATE_IF_NOT_EXISTS && terrno == TSDB_CODE_TDB_TABLE_ALREADY_EXIST) { cRsp.code = TSDB_CODE_SUCCESS; } else { @@ -524,6 +528,7 @@ static int32_t vnodeProcessCreateTbReq(SVnode *pVnode, int64_t version, void *pR cRsp.code = TSDB_CODE_SUCCESS; tdFetchTbUidList(pVnode->pSma, &pStore, pCreateReq->ctb.suid, pCreateReq->uid); taosArrayPush(tbUids, &pCreateReq->uid); + vnodeUpdateMetaRsp(pVnode, cRsp.pMeta); } taosArrayPush(rsp.pArray, &cRsp); @@ -552,7 +557,7 @@ _exit: pCreateReq = req.pReqs + iReq; taosArrayDestroy(pCreateReq->ctb.tagName); } - taosArrayDestroy(rsp.pArray); + taosArrayDestroyEx(rsp.pArray, tFreeSVCreateTbRsp); taosArrayDestroy(tbUids); tDecoderClear(&decoder); tEncoderClear(&encoder); @@ -864,7 +869,7 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq goto _exit; } - if (metaCreateTable(pVnode->pMeta, version, &createTbReq) < 0) { + if (metaCreateTable(pVnode->pMeta, version, &createTbReq, NULL) < 0) { if (terrno != TSDB_CODE_TDB_TABLE_ALREADY_EXIST) { submitBlkRsp.code = terrno; pRsp->code = terrno; diff --git a/source/libs/qcom/src/queryUtil.c b/source/libs/qcom/src/queryUtil.c index 5143aa4af1..3c8b019d81 100644 --- a/source/libs/qcom/src/queryUtil.c +++ b/source/libs/qcom/src/queryUtil.c @@ -219,6 +219,11 @@ void destroyQueryExecRes(SExecResult* pRes) { } switch (pRes->msgType) { + case TDMT_VND_CREATE_TABLE: { + taosArrayDestroyEx((SArray*)pRes->res, tFreeSTableMetaRsp); + break; + } + case TDMT_MND_CREATE_STB: case TDMT_VND_ALTER_TABLE: case TDMT_MND_ALTER_STB: { tFreeSTableMetaRsp((STableMetaRsp*)pRes->res); diff --git a/source/libs/scheduler/src/schRemote.c b/source/libs/scheduler/src/schRemote.c index ecd9daf1bc..bd0c3009b0 100644 --- a/source/libs/scheduler/src/schRemote.c +++ b/source/libs/scheduler/src/schRemote.c @@ -102,15 +102,26 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t execId, SDa tDecoderInit(&coder, msg, msgSize); code = tDecodeSVCreateTbBatchRsp(&coder, &batchRsp); if (TSDB_CODE_SUCCESS == code && batchRsp.nRsps > 0) { + pJob->execRes.res = taosArrayInit(batchRsp.nRsps, POINTER_BYTES); + pJob->execRes.msgType = TDMT_VND_CREATE_TABLE; + for (int32_t i = 0; i < batchRsp.nRsps; ++i) { SVCreateTbRsp *rsp = batchRsp.pRsps + i; + if (rsp->pMeta) { + taosArrayPush((SArray*)pJob->execRes.res, &rsp->pMeta); + } + if (TSDB_CODE_SUCCESS != rsp->code) { code = rsp->code; - tDecoderClear(&coder); - SCH_ERR_JRET(code); } } + + if (taosArrayGetSize((SArray*)pJob->execRes.res) <= 0) { + taosArrayDestroy((SArray*)pJob->execRes.res); + pJob->execRes.res = NULL; + } } + tDecoderClear(&coder); SCH_ERR_JRET(code); } From 06ac4c96360aeef167148b057a7bcc172a464362 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 23 Aug 2022 19:14:21 +0800 Subject: [PATCH 04/79] Update README.md (#16347) --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 02dd9984e8..ef85aa67e0 100644 --- a/README.md +++ b/README.md @@ -256,6 +256,7 @@ After building successfully, TDengine can be installed by: nmake install ``` + ## Quick Run From 941128659558116d6ca8acc9b8b0836ba18716a2 Mon Sep 17 00:00:00 2001 From: afwerar <1296468573@qq.com> Date: Wed, 24 Aug 2022 08:49:37 +0800 Subject: [PATCH 05/79] build: cmake warning error --- cmake/cmake.define | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/cmake.define b/cmake/cmake.define index f38fac17a1..5d64815a9a 100644 --- a/cmake/cmake.define +++ b/cmake/cmake.define @@ -103,7 +103,7 @@ IF (TD_WINDOWS) ELSE () IF (${TD_DARWIN}) set(CMAKE_MACOSX_RPATH 0) - ELSE () + ENDIF () IF (${COVER} MATCHES "true") MESSAGE(STATUS "Test coverage mode, add extra flags") SET(GCC_COVERAGE_COMPILE_FLAGS "-fprofile-arcs -ftest-coverage") From 742411dfcb20eaf7372e93d38ad861bfc9a027e3 Mon Sep 17 00:00:00 2001 From: Jeff Tao Date: Wed, 24 Aug 2022 09:00:33 +0800 Subject: [PATCH 06/79] Update README.md --- README.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index ef85aa67e0..2a879ba0fd 100644 --- a/README.md +++ b/README.md @@ -21,17 +21,17 @@ English | [简体中文](README-CN.md) | We are hiring, check [here](https://tde TDengine is an open source, high-performance, cloud native time-series database optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. TDengine differentiates itself from other time-seires databases with the following advantages: -- **High-Performance**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression. +- **[High-Performance](https://tdengine.com/tdengine/high-performance-time-series-database/)**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression. -- **Simplified Solution**: Through built-in caching, stream processing and data subscription features, TDengine provides a simplified solution for time-series data processing. It reduces system design complexity and operation costs significantly. +- **[Simplified Solution](https://tdengine.com/tdengine/simplified-time-series-data-solution/)**: Through built-in caching, stream processing and data subscription features, TDengine provides a simplified solution for time-series data processing. It reduces system design complexity and operation costs significantly. -- **Cloud Native**: Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for kubernetes deployment and full observability, TDengine is a cloud native Time-Series Database and can be deployed on public, private or hybrid clouds. +- **[Cloud Native**](https://tdengine.com/tdengine/cloud-native-time-series-database/): Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for kubernetes deployment and full observability, TDengine is a cloud native Time-Series Database and can be deployed on public, private or hybrid clouds. -- **Ease of Use**: For administrators, TDengine significantly reduces the effort to deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access. +- **[Ease of Use](https://docs.tdengine.com/get-started/docker/)**: For administrators, TDengine significantly reduces the effort to deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access. -- **Easy Data Analytics**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way. +- **[Easy Data Analytics](https://tdengine.com/tdengine/time-series-data-analytics-made-easy/)**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way. -- **Open Source**: TDengine’s core modules, including cluster feature, are all available under open source licenses. It has gathered 18.8k stars on GitHub. There is an active developer community, and over 139k running instances worldwide. +- **[Open Source](https://tdengine.com/tdengine/open-source-time-series-database/)**: TDengine’s core modules, including cluster feature, are all available under open source licenses. It has gathered 18.8k stars on GitHub. There is an active developer community, and over 139k running instances worldwide. # Documentation @@ -39,9 +39,9 @@ For user manual, system design and architecture, please refer to [TDengine Docum # Building -At the moment, TDengine server supports running on Linux, Windows systems.Any OS application can also choose the RESTful interface of taosAdapter to connect the taosd service . TDengine supports X64/ARM64 CPU , and it will support MIPS64, Alpha64, ARM32, RISC-V and other CPU architectures in the future. +At the moment, TDengine server supports running on Linux and Windows systems. Any application can also choose the RESTful interface provided by taosAdapter to connect the taosd service . TDengine supports X64/ARM64 CPU, and it will support MIPS64, Alpha64, ARM32, RISC-V and other CPU architectures in the future. -You can choose to install through source code according to your needs, [container](https://docs.taosdata.com/get-started/docker/), [installation package](https://docs.taosdata.com/get-started/package/) or [Kubernetes](https://docs.taosdata.com/deployment/k8s/) to install. This quick guide only applies to installing from source. +You can choose to install through source code, [container](https://docs.taosdata.com/get-started/docker/), [installation package](https://docs.taosdata.com/get-started/package/) or [Kubernetes](https://docs.taosdata.com/deployment/k8s/). This quick guide only applies to installing from source. TDengine provide a few useful tools such as taosBenchmark (was named taosdemo) and taosdump. They were part of TDengine. By default, TDengine compiling does not include taosTools. You can use `cmake .. -DBUILD_TOOLS=true` to make them be compiled with TDengine. From f63935f27ef0d1a870fae9f454b02b9180a12a10 Mon Sep 17 00:00:00 2001 From: Jeff Tao Date: Wed, 24 Aug 2022 09:01:01 +0800 Subject: [PATCH 07/79] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 2a879ba0fd..a9719df3b2 100644 --- a/README.md +++ b/README.md @@ -25,7 +25,7 @@ TDengine is an open source, high-performance, cloud native time-series database - **[Simplified Solution](https://tdengine.com/tdengine/simplified-time-series-data-solution/)**: Through built-in caching, stream processing and data subscription features, TDengine provides a simplified solution for time-series data processing. It reduces system design complexity and operation costs significantly. -- **[Cloud Native**](https://tdengine.com/tdengine/cloud-native-time-series-database/): Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for kubernetes deployment and full observability, TDengine is a cloud native Time-Series Database and can be deployed on public, private or hybrid clouds. +- **[Cloud Native](https://tdengine.com/tdengine/cloud-native-time-series-database/)**: Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for kubernetes deployment and full observability, TDengine is a cloud native Time-Series Database and can be deployed on public, private or hybrid clouds. - **[Ease of Use](https://docs.tdengine.com/get-started/docker/)**: For administrators, TDengine significantly reduces the effort to deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access. From dc10d983c3d2be9f44dd26cd3d1a10ef4324f7d1 Mon Sep 17 00:00:00 2001 From: Jeff Tao Date: Wed, 24 Aug 2022 09:03:21 +0800 Subject: [PATCH 08/79] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index a9719df3b2..57b184682e 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ English | [简体中文](README-CN.md) | We are hiring, check [here](https://tde # What is TDengine? -TDengine is an open source, high-performance, cloud native time-series database optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. TDengine differentiates itself from other time-seires databases with the following advantages: +TDengine is an open source, high-performance, cloud native [time-series database](https://tdengine.com/tsdb/what-is-a-time-series-database/) optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. TDengine differentiates itself from other time-seires databases with the following advantages: - **[High-Performance](https://tdengine.com/tdengine/high-performance-time-series-database/)**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression. From 01eac71b65c85d2190cc31d7ac149de1ec826b9f Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 24 Aug 2022 10:42:37 +0800 Subject: [PATCH 09/79] fix(query): fix the invalid last block check condition. --- source/dnode/vnode/src/tsdb/tsdbRead.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index a4738781f5..4a5d8d11f9 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -1414,7 +1414,7 @@ static int32_t doMergeBufAndFileRows_Rv(STsdbReader* pReader, STableBlockScanInf int64_t minKey = 0; if (pReader->order == TSDB_ORDER_ASC) { minKey = INT64_MAX; // chosen the minimum value - if (minKey > tsLast && pLastBlockReader->lastBlockData.nRow > 0) { + if (minKey > tsLast && hasDataInLastBlock(pLastBlockReader)) { minKey = tsLast; } @@ -1427,7 +1427,7 @@ static int32_t doMergeBufAndFileRows_Rv(STsdbReader* pReader, STableBlockScanInf } } else { minKey = INT64_MIN; - if (minKey < tsLast && pLastBlockReader->lastBlockData.nRow > 0) { + if (minKey < tsLast && hasDataInLastBlock(pLastBlockReader)) { minKey = tsLast; } From bd02bfe58dbee05e12fb3e6eb8bbb21be179a349 Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Wed, 24 Aug 2022 10:57:06 +0800 Subject: [PATCH 10/79] enh: assign specific threads for rsma --- docs/zh/14-reference/12-config/index.md | 239 ++++++++++++------------ include/common/tglobal.h | 1 + source/common/src/tglobal.c | 8 + source/dnode/vnode/src/inc/sma.h | 6 +- source/dnode/vnode/src/inc/vnodeInt.h | 3 - source/dnode/vnode/src/sma/smaEnv.c | 109 +++++++++-- source/dnode/vnode/src/sma/smaOpen.c | 14 -- source/dnode/vnode/src/sma/smaRollup.c | 106 ++--------- source/dnode/vnode/src/vnd/vnodeOpen.c | 3 - source/dnode/vnode/src/vnd/vnodeSvr.c | 6 +- 10 files changed, 244 insertions(+), 251 deletions(-) diff --git a/docs/zh/14-reference/12-config/index.md b/docs/zh/14-reference/12-config/index.md index d2efc5baf3..7b31e10572 100644 --- a/docs/zh/14-reference/12-config/index.md +++ b/docs/zh/14-reference/12-config/index.md @@ -698,122 +698,123 @@ charset 的有效值是 UTF-8。 | 45 | numOfVnodeFetchThreads | 否 | 是 | | 46 | numOfVnodeWriteThreads | 否 | 是 | | 47 | numOfVnodeSyncThreads | 否 | 是 | -| 48 | numOfQnodeQueryThreads | 否 | 是 | -| 49 | numOfQnodeFetchThreads | 否 | 是 | -| 50 | numOfSnodeSharedThreads | 否 | 是 | -| 51 | numOfSnodeUniqueThreads | 否 | 是 | -| 52 | rpcQueueMemoryAllowed | 否 | 是 | -| 53 | logDir | 是 | 是 | -| 54 | minimalLogDirGB | 是 | 是 | -| 55 | numOfLogLines | 是 | 是 | -| 56 | asyncLog | 是 | 是 | -| 57 | logKeepDays | 是 | 是 | -| 58 | debugFlag | 是 | 是 | -| 59 | tmrDebugFlag | 是 | 是 | -| 60 | uDebugFlag | 是 | 是 | -| 61 | rpcDebugFlag | 是 | 是 | -| 62 | jniDebugFlag | 是 | 是 | -| 63 | qDebugFlag | 是 | 是 | -| 64 | cDebugFlag | 是 | 是 | -| 65 | dDebugFlag | 是 | 是 | -| 66 | vDebugFlag | 是 | 是 | -| 67 | mDebugFlag | 是 | 是 | -| 68 | wDebugFlag | 是 | 是 | -| 69 | sDebugFlag | 是 | 是 | -| 70 | tsdbDebugFlag | 是 | 是 | -| 71 | tqDebugFlag | 否 | 是 | -| 72 | fsDebugFlag | 是 | 是 | -| 73 | udfDebugFlag | 否 | 是 | -| 74 | smaDebugFlag | 否 | 是 | -| 75 | idxDebugFlag | 否 | 是 | -| 76 | tdbDebugFlag | 否 | 是 | -| 77 | metaDebugFlag | 否 | 是 | -| 78 | timezone | 是 | 是 | -| 79 | locale | 是 | 是 | -| 80 | charset | 是 | 是 | -| 81 | udf | 是 | 是 | -| 82 | enableCoreFile | 是 | 是 | -| 83 | arbitrator | 是 | 否 | -| 84 | numOfThreadsPerCore | 是 | 否 | -| 85 | numOfMnodes | 是 | 否 | -| 86 | vnodeBak | 是 | 否 | -| 87 | balance | 是 | 否 | -| 88 | balanceInterval | 是 | 否 | -| 89 | offlineThreshold | 是 | 否 | -| 90 | role | 是 | 否 | -| 91 | dnodeNopLoop | 是 | 否 | -| 92 | keepTimeOffset | 是 | 否 | -| 93 | rpcTimer | 是 | 否 | -| 94 | rpcMaxTime | 是 | 否 | -| 95 | rpcForceTcp | 是 | 否 | -| 96 | tcpConnTimeout | 是 | 否 | -| 97 | syncCheckInterval | 是 | 否 | -| 98 | maxTmrCtrl | 是 | 否 | -| 99 | monitorReplica | 是 | 否 | -| 100 | smlTagNullName | 是 | 否 | -| 101 | keepColumnName | 是 | 否 | -| 102 | ratioOfQueryCores | 是 | 否 | -| 103 | maxStreamCompDelay | 是 | 否 | -| 104 | maxFirstStreamCompDelay | 是 | 否 | -| 105 | retryStreamCompDelay | 是 | 否 | -| 106 | streamCompDelayRatio | 是 | 否 | -| 107 | maxVgroupsPerDb | 是 | 否 | -| 108 | maxTablesPerVnode | 是 | 否 | -| 109 | minTablesPerVnode | 是 | 否 | -| 110 | tableIncStepPerVnode | 是 | 否 | -| 111 | cache | 是 | 否 | -| 112 | blocks | 是 | 否 | -| 113 | days | 是 | 否 | -| 114 | keep | 是 | 否 | -| 115 | minRows | 是 | 否 | -| 116 | maxRows | 是 | 否 | -| 117 | quorum | 是 | 否 | -| 118 | comp | 是 | 否 | -| 119 | walLevel | 是 | 否 | -| 120 | fsync | 是 | 否 | -| 121 | replica | 是 | 否 | -| 122 | partitions | 是 | 否 | -| 123 | quorum | 是 | 否 | -| 124 | update | 是 | 否 | -| 125 | cachelast | 是 | 否 | -| 126 | maxSQLLength | 是 | 否 | -| 127 | maxWildCardsLength | 是 | 否 | -| 128 | maxRegexStringLen | 是 | 否 | -| 129 | maxNumOfOrderedRes | 是 | 否 | -| 130 | maxConnections | 是 | 否 | -| 131 | mnodeEqualVnodeNum | 是 | 否 | -| 132 | http | 是 | 否 | -| 133 | httpEnableRecordSql | 是 | 否 | -| 134 | httpMaxThreads | 是 | 否 | -| 135 | restfulRowLimit | 是 | 否 | -| 136 | httpDbNameMandatory | 是 | 否 | -| 137 | httpKeepAlive | 是 | 否 | -| 138 | enableRecordSql | 是 | 否 | -| 139 | maxBinaryDisplayWidth | 是 | 否 | -| 140 | stream | 是 | 否 | -| 141 | retrieveBlockingModel | 是 | 否 | -| 142 | tsdbMetaCompactRatio | 是 | 否 | -| 143 | defaultJSONStrType | 是 | 否 | -| 144 | walFlushSize | 是 | 否 | -| 145 | keepTimeOffset | 是 | 否 | -| 146 | flowctrl | 是 | 否 | -| 147 | slaveQuery | 是 | 否 | -| 148 | adjustMaster | 是 | 否 | -| 149 | topicBinaryLen | 是 | 否 | -| 150 | telegrafUseFieldNum | 是 | 否 | -| 151 | deadLockKillQuery | 是 | 否 | -| 152 | clientMerge | 是 | 否 | -| 153 | sdbDebugFlag | 是 | 否 | -| 154 | odbcDebugFlag | 是 | 否 | -| 155 | httpDebugFlag | 是 | 否 | -| 156 | monDebugFlag | 是 | 否 | -| 157 | cqDebugFlag | 是 | 否 | -| 158 | shortcutFlag | 是 | 否 | -| 159 | probeSeconds | 是 | 否 | -| 160 | probeKillSeconds | 是 | 否 | -| 161 | probeInterval | 是 | 否 | -| 162 | lossyColumns | 是 | 否 | -| 163 | fPrecision | 是 | 否 | -| 164 | dPrecision | 是 | 否 | -| 165 | maxRange | 是 | 否 | -| 166 | range | 是 | 否 | +| 48 | numOfVnodeRsmaThreads | 否 | 是 | +| 49 | numOfQnodeQueryThreads | 否 | 是 | +| 50 | numOfQnodeFetchThreads | 否 | 是 | +| 51 | numOfSnodeSharedThreads | 否 | 是 | +| 52 | numOfSnodeUniqueThreads | 否 | 是 | +| 53 | rpcQueueMemoryAllowed | 否 | 是 | +| 54 | logDir | 是 | 是 | +| 55 | minimalLogDirGB | 是 | 是 | +| 56 | numOfLogLines | 是 | 是 | +| 57 | asyncLog | 是 | 是 | +| 58 | logKeepDays | 是 | 是 | +| 59 | debugFlag | 是 | 是 | +| 60 | tmrDebugFlag | 是 | 是 | +| 61 | uDebugFlag | 是 | 是 | +| 62 | rpcDebugFlag | 是 | 是 | +| 63 | jniDebugFlag | 是 | 是 | +| 64 | qDebugFlag | 是 | 是 | +| 65 | cDebugFlag | 是 | 是 | +| 66 | dDebugFlag | 是 | 是 | +| 67 | vDebugFlag | 是 | 是 | +| 68 | mDebugFlag | 是 | 是 | +| 69 | wDebugFlag | 是 | 是 | +| 70 | sDebugFlag | 是 | 是 | +| 71 | tsdbDebugFlag | 是 | 是 | +| 72 | tqDebugFlag | 否 | 是 | +| 73 | fsDebugFlag | 是 | 是 | +| 74 | udfDebugFlag | 否 | 是 | +| 75 | smaDebugFlag | 否 | 是 | +| 76 | idxDebugFlag | 否 | 是 | +| 77 | tdbDebugFlag | 否 | 是 | +| 78 | metaDebugFlag | 否 | 是 | +| 79 | timezone | 是 | 是 | +| 80 | locale | 是 | 是 | +| 81 | charset | 是 | 是 | +| 82 | udf | 是 | 是 | +| 83 | enableCoreFile | 是 | 是 | +| 84 | arbitrator | 是 | 否 | +| 85 | numOfThreadsPerCore | 是 | 否 | +| 86 | numOfMnodes | 是 | 否 | +| 87 | vnodeBak | 是 | 否 | +| 88 | balance | 是 | 否 | +| 89 | balanceInterval | 是 | 否 | +| 90 | offlineThreshold | 是 | 否 | +| 91 | role | 是 | 否 | +| 92 | dnodeNopLoop | 是 | 否 | +| 93 | keepTimeOffset | 是 | 否 | +| 94 | rpcTimer | 是 | 否 | +| 95 | rpcMaxTime | 是 | 否 | +| 96 | rpcForceTcp | 是 | 否 | +| 97 | tcpConnTimeout | 是 | 否 | +| 98 | syncCheckInterval | 是 | 否 | +| 99 | maxTmrCtrl | 是 | 否 | +| 100 | monitorReplica | 是 | 否 | +| 101 | smlTagNullName | 是 | 否 | +| 102 | keepColumnName | 是 | 否 | +| 103 | ratioOfQueryCores | 是 | 否 | +| 104 | maxStreamCompDelay | 是 | 否 | +| 105 | maxFirstStreamCompDelay | 是 | 否 | +| 106 | retryStreamCompDelay | 是 | 否 | +| 107 | streamCompDelayRatio | 是 | 否 | +| 108 | maxVgroupsPerDb | 是 | 否 | +| 109 | maxTablesPerVnode | 是 | 否 | +| 110 | minTablesPerVnode | 是 | 否 | +| 111 | tableIncStepPerVnode | 是 | 否 | +| 112 | cache | 是 | 否 | +| 113 | blocks | 是 | 否 | +| 114 | days | 是 | 否 | +| 115 | keep | 是 | 否 | +| 116 | minRows | 是 | 否 | +| 117 | maxRows | 是 | 否 | +| 118 | quorum | 是 | 否 | +| 119 | comp | 是 | 否 | +| 120 | walLevel | 是 | 否 | +| 121 | fsync | 是 | 否 | +| 122 | replica | 是 | 否 | +| 123 | partitions | 是 | 否 | +| 124 | quorum | 是 | 否 | +| 125 | update | 是 | 否 | +| 126 | cachelast | 是 | 否 | +| 127 | maxSQLLength | 是 | 否 | +| 128 | maxWildCardsLength | 是 | 否 | +| 129 | maxRegexStringLen | 是 | 否 | +| 130 | maxNumOfOrderedRes | 是 | 否 | +| 131 | maxConnections | 是 | 否 | +| 132 | mnodeEqualVnodeNum | 是 | 否 | +| 133 | http | 是 | 否 | +| 134 | httpEnableRecordSql | 是 | 否 | +| 135 | httpMaxThreads | 是 | 否 | +| 136 | restfulRowLimit | 是 | 否 | +| 137 | httpDbNameMandatory | 是 | 否 | +| 138 | httpKeepAlive | 是 | 否 | +| 139 | enableRecordSql | 是 | 否 | +| 140 | maxBinaryDisplayWidth | 是 | 否 | +| 141 | stream | 是 | 否 | +| 142 | retrieveBlockingModel | 是 | 否 | +| 143 | tsdbMetaCompactRatio | 是 | 否 | +| 144 | defaultJSONStrType | 是 | 否 | +| 145 | walFlushSize | 是 | 否 | +| 146 | keepTimeOffset | 是 | 否 | +| 147 | flowctrl | 是 | 否 | +| 148 | slaveQuery | 是 | 否 | +| 149 | adjustMaster | 是 | 否 | +| 150 | topicBinaryLen | 是 | 否 | +| 151 | telegrafUseFieldNum | 是 | 否 | +| 152 | deadLockKillQuery | 是 | 否 | +| 153 | clientMerge | 是 | 否 | +| 154 | sdbDebugFlag | 是 | 否 | +| 155 | odbcDebugFlag | 是 | 否 | +| 156 | httpDebugFlag | 是 | 否 | +| 157 | monDebugFlag | 是 | 否 | +| 158 | cqDebugFlag | 是 | 否 | +| 159 | shortcutFlag | 是 | 否 | +| 160 | probeSeconds | 是 | 否 | +| 161 | probeKillSeconds | 是 | 否 | +| 162 | probeInterval | 是 | 否 | +| 163 | lossyColumns | 是 | 否 | +| 164 | fPrecision | 是 | 否 | +| 165 | dPrecision | 是 | 否 | +| 166 | maxRange | 是 | 否 | +| 167 | range | 是 | 否 | diff --git a/include/common/tglobal.h b/include/common/tglobal.h index cd74ffd477..530df70a5e 100644 --- a/include/common/tglobal.h +++ b/include/common/tglobal.h @@ -66,6 +66,7 @@ extern int32_t tsNumOfVnodeStreamThreads; extern int32_t tsNumOfVnodeFetchThreads; extern int32_t tsNumOfVnodeWriteThreads; extern int32_t tsNumOfVnodeSyncThreads; +extern int32_t tsNumOfVnodeRsmaThreads; extern int32_t tsNumOfQnodeQueryThreads; extern int32_t tsNumOfQnodeFetchThreads; extern int32_t tsNumOfSnodeSharedThreads; diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index adc5af1a17..78afbda686 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -61,6 +61,7 @@ int32_t tsNumOfVnodeStreamThreads = 2; int32_t tsNumOfVnodeFetchThreads = 4; int32_t tsNumOfVnodeWriteThreads = 2; int32_t tsNumOfVnodeSyncThreads = 2; +int32_t tsNumOfVnodeRsmaThreads = 2; int32_t tsNumOfQnodeQueryThreads = 4; int32_t tsNumOfQnodeFetchThreads = 4; int32_t tsNumOfSnodeSharedThreads = 2; @@ -377,6 +378,10 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { tsNumOfVnodeSyncThreads = TMAX(tsNumOfVnodeSyncThreads, 16); if (cfgAddInt32(pCfg, "numOfVnodeSyncThreads", tsNumOfVnodeSyncThreads, 1, 1024, 0) != 0) return -1; + tsNumOfVnodeRsmaThreads = tsNumOfCores; + tsNumOfVnodeRsmaThreads = TMAX(tsNumOfVnodeRsmaThreads, 4); + if (cfgAddInt32(pCfg, "numOfVnodeRsmaThreads", tsNumOfVnodeRsmaThreads, 1, 1024, 0) != 0) return -1; + tsNumOfQnodeQueryThreads = tsNumOfCores * 2; tsNumOfQnodeQueryThreads = TMAX(tsNumOfQnodeQueryThreads, 4); if (cfgAddInt32(pCfg, "numOfQnodeQueryThreads", tsNumOfQnodeQueryThreads, 1, 1024, 0) != 0) return -1; @@ -538,6 +543,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tsNumOfVnodeFetchThreads = cfgGetItem(pCfg, "numOfVnodeFetchThreads")->i32; tsNumOfVnodeWriteThreads = cfgGetItem(pCfg, "numOfVnodeWriteThreads")->i32; tsNumOfVnodeSyncThreads = cfgGetItem(pCfg, "numOfVnodeSyncThreads")->i32; + tsNumOfVnodeRsmaThreads = cfgGetItem(pCfg, "numOfVnodeRsmaThreads")->i32; tsNumOfQnodeQueryThreads = cfgGetItem(pCfg, "numOfQnodeQueryThreads")->i32; tsNumOfQnodeFetchThreads = cfgGetItem(pCfg, "numOfQnodeFetchThreads")->i32; tsNumOfSnodeSharedThreads = cfgGetItem(pCfg, "numOfSnodeSharedThreads")->i32; @@ -782,6 +788,8 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) { tsNumOfVnodeWriteThreads = cfgGetItem(pCfg, "numOfVnodeWriteThreads")->i32; } else if (strcasecmp("numOfVnodeSyncThreads", name) == 0) { tsNumOfVnodeSyncThreads = cfgGetItem(pCfg, "numOfVnodeSyncThreads")->i32; + } else if (strcasecmp("numOfVnodeRsmaThreads", name) == 0) { + tsNumOfVnodeRsmaThreads = cfgGetItem(pCfg, "numOfVnodeRsmaThreads")->i32; } else if (strcasecmp("numOfQnodeQueryThreads", name) == 0) { tsNumOfQnodeQueryThreads = cfgGetItem(pCfg, "numOfQnodeQueryThreads")->i32; } else if (strcasecmp("numOfQnodeFetchThreads", name) == 0) { diff --git a/source/dnode/vnode/src/inc/sma.h b/source/dnode/vnode/src/inc/sma.h index ca77042bb2..abfffc045f 100644 --- a/source/dnode/vnode/src/inc/sma.h +++ b/source/dnode/vnode/src/inc/sma.h @@ -33,7 +33,6 @@ extern "C" { // clang-format on #define RSMA_TASK_INFO_HASH_SLOT (8) -#define RSMA_EXECUTOR_MAX (1) typedef struct SSmaEnv SSmaEnv; typedef struct SSmaStat SSmaStat; @@ -49,9 +48,12 @@ typedef struct SQTaskFWriter SQTaskFWriter; struct SSmaEnv { SRWLatch lock; int8_t type; + int8_t flag; // 0x01 inClose SSmaStat *pStat; }; +#define SMA_ENV_FLG_CLOSE ((int8_t)0x1) + typedef struct { int8_t inited; int32_t rsetId; @@ -93,7 +95,6 @@ struct SRSmaStat { int64_t refId; // shared by fetch tasks volatile int64_t nBufItems; // number of items in queue buffer SRWLatch lock; // r/w lock for rsma fs(e.g. qtaskinfo) - volatile int8_t nExecutor; // [1, max(half of query threads, 4)] int8_t triggerStat; // shared by fetch tasks int8_t commitStat; // 0 not in committing, 1 in committing SArray *aTaskFile; // qTaskFiles committed recently(for recovery/snapshot r/w) @@ -107,6 +108,7 @@ struct SSmaStat { SRSmaStat rsmaStat; // rollup sma }; T_REF_DECLARE() + char data[]; }; #define SMA_STAT_TSMA(s) (&(s)->tsmaStat) diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h index 39c5f3873e..0b51b61c3a 100644 --- a/source/dnode/vnode/src/inc/vnodeInt.h +++ b/source/dnode/vnode/src/inc/vnodeInt.h @@ -189,7 +189,6 @@ SSubmitReq* tqBlockToSubmit(SVnode* pVnode, const SArray* pBlocks, const STSchem int32_t smaInit(); void smaCleanUp(); int32_t smaOpen(SVnode* pVnode); -int32_t smaPreClose(SSma* pSma); int32_t smaClose(SSma* pSma); int32_t smaBegin(SSma* pSma); int32_t smaSyncPreCommit(SSma* pSma); @@ -199,7 +198,6 @@ int32_t smaAsyncPreCommit(SSma* pSma); int32_t smaAsyncCommit(SSma* pSma); int32_t smaAsyncPostCommit(SSma* pSma); int32_t smaDoRetention(SSma* pSma, int64_t now); -int32_t smaProcessExec(SSma* pSma, void* pMsg); int32_t tdProcessTSmaCreate(SSma* pSma, int64_t version, const char* msg); int32_t tdProcessTSmaInsert(SSma* pSma, int64_t indexUid, const char* msg); @@ -323,7 +321,6 @@ struct SVnode { TdThreadMutex lock; bool blocked; bool restored; - bool inClose; tsem_t syncSem; SQHandle* pQuery; }; diff --git a/source/dnode/vnode/src/sma/smaEnv.c b/source/dnode/vnode/src/sma/smaEnv.c index e3b83f9955..32a419022a 100644 --- a/source/dnode/vnode/src/sma/smaEnv.c +++ b/source/dnode/vnode/src/sma/smaEnv.c @@ -23,11 +23,13 @@ extern SSmaMgmt smaMgmt; // declaration of static functions -static int32_t tdInitSmaStat(SSmaStat **pSmaStat, int8_t smaType, const SSma *pSma); -static SSmaEnv *tdNewSmaEnv(const SSma *pSma, int8_t smaType, const char *path); -static int32_t tdInitSmaEnv(SSma *pSma, int8_t smaType, const char *path, SSmaEnv **pEnv); -static void *tdFreeTSmaStat(STSmaStat *pStat); -static void tdDestroyRSmaStat(void *pRSmaStat); +static int32_t tdNewSmaEnv(SSma *pSma, int8_t smaType, SSmaEnv **ppEnv); +static int32_t tdInitSmaEnv(SSma *pSma, int8_t smaType, SSmaEnv **ppEnv); +static int32_t tdInitSmaStat(SSmaStat **pSmaStat, int8_t smaType, const SSma *pSma); +static int32_t tdRsmaStartExecutor(const SSma *pSma); +static int32_t tdRsmaStopExecutor(const SSma *pSma); +static void *tdFreeTSmaStat(STSmaStat *pStat); +static void tdDestroyRSmaStat(void *pRSmaStat); /** * @brief rsma init @@ -97,35 +99,42 @@ void smaCleanUp() { } } -static SSmaEnv *tdNewSmaEnv(const SSma *pSma, int8_t smaType, const char *path) { +static int32_t tdNewSmaEnv(SSma *pSma, int8_t smaType, SSmaEnv **ppEnv) { SSmaEnv *pEnv = NULL; pEnv = (SSmaEnv *)taosMemoryCalloc(1, sizeof(SSmaEnv)); + *ppEnv = pEnv; if (!pEnv) { terrno = TSDB_CODE_OUT_OF_MEMORY; - return NULL; + return TSDB_CODE_FAILED; } SMA_ENV_TYPE(pEnv) = smaType; taosInitRWLatch(&(pEnv->lock)); + (smaType == TSDB_SMA_TYPE_TIME_RANGE) ? atomic_store_ptr(&SMA_TSMA_ENV(pSma), *ppEnv) + : atomic_store_ptr(&SMA_RSMA_ENV(pSma), *ppEnv); + if (tdInitSmaStat(&SMA_ENV_STAT(pEnv), smaType, pSma) != TSDB_CODE_SUCCESS) { tdFreeSmaEnv(pEnv); - return NULL; + *ppEnv = NULL; + (smaType == TSDB_SMA_TYPE_TIME_RANGE) ? atomic_store_ptr(&SMA_TSMA_ENV(pSma), NULL) + : atomic_store_ptr(&SMA_RSMA_ENV(pSma), NULL); + return TSDB_CODE_FAILED; } - return pEnv; + return TSDB_CODE_SUCCESS; } -static int32_t tdInitSmaEnv(SSma *pSma, int8_t smaType, const char *path, SSmaEnv **pEnv) { - if (!pEnv) { +static int32_t tdInitSmaEnv(SSma *pSma, int8_t smaType, SSmaEnv **ppEnv) { + if (!ppEnv) { terrno = TSDB_CODE_INVALID_PTR; return TSDB_CODE_FAILED; } - if (!(*pEnv)) { - if (!(*pEnv = tdNewSmaEnv(pSma, smaType, path))) { + if (!(*ppEnv)) { + if (tdNewSmaEnv(pSma, smaType, ppEnv) != TSDB_CODE_SUCCESS) { return TSDB_CODE_FAILED; } } @@ -199,7 +208,7 @@ static int32_t tdInitSmaStat(SSmaStat **pSmaStat, int8_t smaType, const SSma *pS * tdInitSmaStat invoked in other multithread environment later. */ if (!(*pSmaStat)) { - *pSmaStat = (SSmaStat *)taosMemoryCalloc(1, sizeof(SSmaStat)); + *pSmaStat = (SSmaStat *)taosMemoryCalloc(1, sizeof(SSmaStat) + sizeof(TdThread) * tsNumOfVnodeRsmaThreads); if (!(*pSmaStat)) { terrno = TSDB_CODE_OUT_OF_MEMORY; return TSDB_CODE_FAILED; @@ -231,6 +240,10 @@ static int32_t tdInitSmaStat(SSmaStat **pSmaStat, int8_t smaType, const SSma *pS if (!RSMA_INFO_HASH(pRSmaStat)) { return TSDB_CODE_FAILED; } + + if (tdRsmaStartExecutor(pSma) < 0) { + return TSDB_CODE_FAILED; + } } else if (smaType == TSDB_SMA_TYPE_TIME_RANGE) { // TODO } else { @@ -291,6 +304,9 @@ static void tdDestroyRSmaStat(void *pRSmaStat) { } } + // step 4: + tdRsmaStopExecutor(pSma); + // step 5: free pStat taosMemoryFreeClear(pStat); } @@ -381,17 +397,70 @@ int32_t tdCheckAndInitSmaEnv(SSma *pSma, int8_t smaType) { pEnv = (smaType == TSDB_SMA_TYPE_TIME_RANGE) ? atomic_load_ptr(&SMA_TSMA_ENV(pSma)) : atomic_load_ptr(&SMA_RSMA_ENV(pSma)); if (!pEnv) { - char rname[TSDB_FILENAME_LEN] = {0}; - - if (tdInitSmaEnv(pSma, smaType, rname, &pEnv) < 0) { + if (tdInitSmaEnv(pSma, smaType, &pEnv) < 0) { tdUnLockSma(pSma); return TSDB_CODE_FAILED; } - - (smaType == TSDB_SMA_TYPE_TIME_RANGE) ? atomic_store_ptr(&SMA_TSMA_ENV(pSma), pEnv) - : atomic_store_ptr(&SMA_RSMA_ENV(pSma), pEnv); } tdUnLockSma(pSma); return TSDB_CODE_SUCCESS; }; + +void *tdRSmaExecutorFunc(void *param) { + setThreadName("vnode-rsma"); + + tdRSmaProcessExecImpl((SSma *)param, RSMA_EXEC_OVERFLOW); + return NULL; +} + +static int32_t tdRsmaStartExecutor(const SSma *pSma) { + TdThreadAttr thAttr = {0}; + taosThreadAttrInit(&thAttr); + taosThreadAttrSetDetachState(&thAttr, PTHREAD_CREATE_JOINABLE); + + SSmaEnv *pEnv = SMA_RSMA_ENV(pSma); + SSmaStat *pStat = SMA_ENV_STAT(pEnv); + TdThread *pthread = (TdThread *)&pStat->data; + + for (int32_t i = 0; i < tsNumOfVnodeRsmaThreads; ++i) { + if (taosThreadCreate(&pthread[i], &thAttr, tdRSmaExecutorFunc, (void *)pSma) != 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + smaError("vgId:%d, failed to create pthread for rsma since %s", SMA_VID(pSma), terrstr()); + return -1; + } + smaDebug("vgId:%d, success to create pthread for rsma", SMA_VID(pSma)); + } + + taosThreadAttrDestroy(&thAttr); + return 0; +} + +static int32_t tdRsmaStopExecutor(const SSma *pSma) { + if (pSma && VND_IS_RSMA(pSma->pVnode)) { + SSmaEnv *pEnv = NULL; + SSmaStat *pStat = NULL; + SRSmaStat *pRSmaStat = NULL; + TdThread *pthread = NULL; + + if (!(pEnv = SMA_RSMA_ENV(pSma)) || !(pStat = SMA_ENV_STAT(pEnv))) { + return 0; + } + + pEnv->flag |= SMA_ENV_FLG_CLOSE; + pRSmaStat = (SRSmaStat *)pStat; + pthread = (TdThread *)&pStat->data; + + for (int32_t i = 0; i < tsNumOfVnodeRsmaThreads; ++i) { + tsem_post(&(pRSmaStat->notEmpty)); + } + + for (int32_t i = 0; i < tsNumOfVnodeRsmaThreads; ++i) { + if (taosCheckPthreadValid(pthread[i])) { + smaDebug("vgId:%d, start to join pthread for rsma:%" PRId64, SMA_VID(pSma), pthread[i]); + taosThreadJoin(pthread[i], NULL); + } + } + } + return 0; +} \ No newline at end of file diff --git a/source/dnode/vnode/src/sma/smaOpen.c b/source/dnode/vnode/src/sma/smaOpen.c index e2710b26e3..235fb1f941 100644 --- a/source/dnode/vnode/src/sma/smaOpen.c +++ b/source/dnode/vnode/src/sma/smaOpen.c @@ -146,20 +146,6 @@ int32_t smaClose(SSma *pSma) { return 0; } -int32_t smaPreClose(SSma *pSma) { - if (pSma && VND_IS_RSMA(pSma->pVnode)) { - SSmaEnv *pEnv = NULL; - SRSmaStat *pStat = NULL; - if (!(pEnv = SMA_RSMA_ENV(pSma)) || !(pStat = (SRSmaStat *)SMA_ENV_STAT(pEnv))) { - return 0; - } - for (int32_t i = 0; i < RSMA_EXECUTOR_MAX; ++i) { - tsem_post(&(pStat->notEmpty)); - } - } - return 0; -} - /** * @brief rsma env restore * diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c index 448b8ab508..fabfcd93f1 100644 --- a/source/dnode/vnode/src/sma/smaRollup.c +++ b/source/dnode/vnode/src/sma/smaRollup.c @@ -621,7 +621,7 @@ static int32_t tdFetchSubmitReqSuids(SSubmitReq *pMsg, STbUidStore *pStore) { */ int32_t smaDoRetention(SSma *pSma, int64_t now) { int32_t code = TSDB_CODE_SUCCESS; - if (VND_IS_RSMA(pSma->pVnode)) { + if (!VND_IS_RSMA(pSma->pVnode)) { return code; } @@ -911,39 +911,6 @@ static int32_t tdExecuteRSmaAsync(SSma *pSma, const void *pMsg, int32_t inputTyp return TSDB_CODE_SUCCESS; } -static int32_t tdRSmaExecCheck(SSma *pSma) { - SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pSma); - - if (atomic_load_8(&pRSmaStat->nExecutor) >= TMIN(RSMA_EXECUTOR_MAX, tsNumOfVnodeQueryThreads / 2)) { - return TSDB_CODE_SUCCESS; - } - - SRSmaExecMsg fetchMsg; - int32_t contLen = sizeof(SMsgHead); - void *pBuf = rpcMallocCont(0 + contLen); - - ((SMsgHead *)pBuf)->vgId = SMA_VID(pSma); - ((SMsgHead *)pBuf)->contLen = sizeof(SMsgHead); - - SRpcMsg rpcMsg = { - .code = 0, - .msgType = TDMT_VND_EXEC_RSMA, - .pCont = pBuf, - .contLen = contLen, - }; - - if ((terrno = tmsgPutToQueue(&pSma->pVnode->msgCb, QUERY_QUEUE, &rpcMsg)) != 0) { - smaError("vgId:%d, failed to put rsma exec msg into query-queue since %s", SMA_VID(pSma), terrstr()); - goto _err; - } - - smaDebug("vgId:%d, success to put rsma fetch msg into query-queue", SMA_VID(pSma)); - - return TSDB_CODE_SUCCESS; -_err: - return TSDB_CODE_FAILED; -} - int32_t tdProcessRSmaSubmit(SSma *pSma, void *pMsg, int32_t inputType) { SSmaEnv *pEnv = SMA_RSMA_ENV(pSma); if (!pEnv) { @@ -974,10 +941,6 @@ int32_t tdProcessRSmaSubmit(SSma *pSma, void *pMsg, int32_t inputType) { goto _err; } } - - if (tdRSmaExecCheck(pSma) < 0) { - goto _err; - } } } tdUidStoreDestory(&uidStore); @@ -1591,9 +1554,11 @@ _end: } static void tdFreeRSmaSubmitItems(SArray *pItems) { + ASSERT(taosArrayGetSize(pItems) > 0); for (int32_t i = 0; i < taosArrayGetSize(pItems); ++i) { taosFreeQitem(*(void **)taosArrayGet(pItems, i)); } + taosArrayClear(pItems); } /** @@ -1703,6 +1668,7 @@ _err: * @param type * @return int32_t */ + int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type) { SVnode *pVnode = pSma->pVnode; SSmaEnv *pEnv = SMA_RSMA_ENV(pSma); @@ -1722,9 +1688,7 @@ int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type) { goto _err; } - bool isBusy = false; while (true) { - isBusy = false; // step 1: rsma exec - consume data in buffer queue for all suids if (type == RSMA_EXEC_OVERFLOW || type == RSMA_EXEC_COMMIT) { void *pIter = taosHashIterate(infoHash, NULL); // infoHash has r/w lock @@ -1733,12 +1697,13 @@ int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type) { int64_t itemSize = 0; if ((itemSize = taosQueueItemSize(pInfo->queue)) || RSMA_INFO_ITEM(pInfo, 0)->fetchLevel || RSMA_INFO_ITEM(pInfo, 1)->fetchLevel) { - smaDebug("vgId:%d, queueItemSize is %" PRIi64 " execType:%" PRIi8, SMA_VID(pSma), itemSize, type); if (atomic_val_compare_exchange_8(&pInfo->assigned, 0, 1) == 0) { taosReadAllQitems(pInfo->queue, pInfo->qall); // queue has mutex lock int32_t qallItemSize = taosQallItemSize(pInfo->qall); if (qallItemSize > 0) { tdRSmaBatchExec(pSma, pInfo, pInfo->qall, pSubmitArr, type); + smaDebug("vgId:%d, qitemSize:%" PRIi64 ", batchSize:%d, execType:%" PRIi8, SMA_VID(pSma), itemSize, + qallItemSize, type); } if (type == RSMA_EXEC_OVERFLOW) { @@ -1748,7 +1713,6 @@ int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type) { if (qallItemSize > 0) { // subtract the item size after the task finished, commit should wait for all items be consumed atomic_fetch_sub_64(&pRSmaStat->nBufItems, qallItemSize); - isBusy = true; } ASSERT(1 == atomic_val_compare_exchange_8(&pInfo->assigned, 1, 0)); } @@ -1756,7 +1720,11 @@ int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type) { pIter = taosHashIterate(infoHash, pIter); } if (type == RSMA_EXEC_COMMIT) { - break; + if (atomic_load_64(&pRSmaStat->nBufItems) <= 0) { + break; + } else { + continue; + } } } #if 0 @@ -1790,16 +1758,18 @@ int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type) { } if (atomic_load_64(&pRSmaStat->nBufItems) <= 0) { - if (pVnode->inClose) { - break; - } - tsem_wait(&pRSmaStat->notEmpty); - if (pVnode->inClose && (atomic_load_64(&pRSmaStat->nBufItems) <= 0)) { - smaInfo("vgId:%d, exec task end, inClose:%d, nBufItems:%" PRIi64, SMA_VID(pSma), pVnode->inClose, - atomic_load_64(&pRSmaStat->nBufItems)); + if (pEnv->flag & SMA_ENV_FLG_CLOSE) { break; } } + + tsem_wait(&pRSmaStat->notEmpty); + + if ((pEnv->flag & SMA_ENV_FLG_CLOSE) && (atomic_load_64(&pRSmaStat->nBufItems) <= 0)) { + smaInfo("vgId:%d, exec task end, flag:%" PRIi8 ", nBufItems:%" PRIi64, SMA_VID(pSma), pEnv->flag, + atomic_load_64(&pRSmaStat->nBufItems)); + break; + } } // end of while(true) _end: @@ -1809,39 +1779,3 @@ _err: taosArrayDestroy(pSubmitArr); return TSDB_CODE_FAILED; } - -/** - * @brief exec rsma level 1data, fetch result of level 2/3 and submit - * - * @param pSma - * @param pMsg - * @return int32_t - */ -int32_t smaProcessExec(SSma *pSma, void *pMsg) { - SRpcMsg *pRpcMsg = (SRpcMsg *)pMsg; - SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pSma); - - if (!pRpcMsg || pRpcMsg->contLen < sizeof(SMsgHead)) { - terrno = TSDB_CODE_RSMA_FETCH_MSG_MSSED_UP; - goto _err; - } - smaDebug("vgId:%d, begin to process rsma exec msg by TID:%p", SMA_VID(pSma), (void *)taosGetSelfPthreadId()); - - int8_t nOld = atomic_fetch_add_8(&pRSmaStat->nExecutor, 1); - - if (nOld < TMIN(RSMA_EXECUTOR_MAX, tsNumOfVnodeQueryThreads / 2)) { - if (tdRSmaProcessExecImpl(pSma, RSMA_EXEC_OVERFLOW) < 0) { - goto _err; - } - } else { - atomic_fetch_sub_8(&pRSmaStat->nExecutor, 1); - } - - smaDebug("vgId:%d, success to process rsma exec msg by TID:%p", SMA_VID(pSma), (void *)taosGetSelfPthreadId()); - return TSDB_CODE_SUCCESS; -_err: - atomic_fetch_sub_8(&pRSmaStat->nExecutor, 1); - smaError("vgId:%d, failed to process rsma exec msg by TID:%p since %s", SMA_VID(pSma), (void *)taosGetSelfPthreadId(), - terrstr()); - return TSDB_CODE_FAILED; -} diff --git a/source/dnode/vnode/src/vnd/vnodeOpen.c b/source/dnode/vnode/src/vnd/vnodeOpen.c index dcfbd33b90..a4fd984fb7 100644 --- a/source/dnode/vnode/src/vnd/vnodeOpen.c +++ b/source/dnode/vnode/src/vnd/vnodeOpen.c @@ -87,7 +87,6 @@ SVnode *vnodeOpen(const char *path, STfs *pTfs, SMsgCb msgCb) { pVnode->msgCb = msgCb; taosThreadMutexInit(&pVnode->lock, NULL); pVnode->blocked = false; - pVnode->inClose = false; tsem_init(&pVnode->syncSem, 0, 0); tsem_init(&(pVnode->canCommit), 0, 1); @@ -182,8 +181,6 @@ _err: void vnodePreClose(SVnode *pVnode) { if (pVnode) { syncLeaderTransfer(pVnode->sync); - pVnode->inClose = true; - smaPreClose(pVnode->pSma); } } diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index 7a8d168f4f..495220b5de 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -301,8 +301,6 @@ int32_t vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) { return qWorkerProcessQueryMsg(&handle, pVnode->pQuery, pMsg, 0); case TDMT_SCH_QUERY_CONTINUE: return qWorkerProcessCQueryMsg(&handle, pVnode->pQuery, pMsg, 0); - case TDMT_VND_EXEC_RSMA: - return smaProcessExec(pVnode->pSma, pMsg); default: vError("unknown msg type:%d in query queue", pMsg->msgType); return TSDB_CODE_VND_APP_ERROR; @@ -380,14 +378,14 @@ static int32_t vnodeProcessTrimReq(SVnode *pVnode, int64_t version, void *pReq, int32_t code = 0; SVTrimDbReq trimReq = {0}; - vInfo("vgId:%d, trim vnode request will be processed, time:%d", pVnode->config.vgId, trimReq.timestamp); - // decode if (tDeserializeSVTrimDbReq(pReq, len, &trimReq) != 0) { code = TSDB_CODE_INVALID_MSG; goto _exit; } + vInfo("vgId:%d, trim vnode request will be processed, time:%d", pVnode->config.vgId, trimReq.timestamp); + // process code = tsdbDoRetention(pVnode->pTsdb, trimReq.timestamp); if (code) goto _exit; From 16dd4a98997dfc69de24737acaaa5f2faee03e0e Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Wed, 24 Aug 2022 11:13:11 +0800 Subject: [PATCH 11/79] fix: commit coredump --- source/dnode/vnode/src/tsdb/tsdbCommit.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/source/dnode/vnode/src/tsdb/tsdbCommit.c b/source/dnode/vnode/src/tsdb/tsdbCommit.c index 020f3b0bc6..90116bc95c 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCommit.c +++ b/source/dnode/vnode/src/tsdb/tsdbCommit.c @@ -835,6 +835,9 @@ static int32_t tsdbMergeCommitLast(SCommitter *pCommitter, STbDataIter *pIter) { // set block data schema if need if (pBlockData->suid == 0 && pBlockData->uid == 0) { + code = tsdbCommitterUpdateTableSchema(pCommitter, pTbData->suid, pTbData->uid); + if (code) goto _err; + code = tBlockDataInit(pBlockData, pTbData->suid, pTbData->suid ? 0 : pTbData->uid, pCommitter->skmTable.pTSchema); if (code) goto _err; From 7ef20f7df2b6e9875bf033b0c5acaca916148dde Mon Sep 17 00:00:00 2001 From: tangfangzhi Date: Tue, 23 Aug 2022 18:15:28 +0800 Subject: [PATCH 12/79] ci: git remote prune origin before git pull --- Jenkinsfile2 | 1 + 1 file changed, 1 insertion(+) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index bc309ff66c..d7df07f06a 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -53,6 +53,7 @@ def check_docs() { } sh ''' cd ${WKC} + git remote prune origin git pull >/dev/null git fetch origin +refs/pull/${CHANGE_ID}/merge git checkout -qf FETCH_HEAD From dae7f9415295cf4619f5d5d6ffe8fcaf2069ae73 Mon Sep 17 00:00:00 2001 From: tangfangzhi Date: Tue, 23 Aug 2022 18:15:28 +0800 Subject: [PATCH 13/79] ci: git remote prune origin before git pull --- Jenkinsfile2 | 1 + 1 file changed, 1 insertion(+) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index bc309ff66c..d7df07f06a 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -53,6 +53,7 @@ def check_docs() { } sh ''' cd ${WKC} + git remote prune origin git pull >/dev/null git fetch origin +refs/pull/${CHANGE_ID}/merge git checkout -qf FETCH_HEAD From fd2fccd7d70360ab5ce7cd7985a5f3dcafebc892 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Wed, 24 Aug 2022 13:12:13 +0800 Subject: [PATCH 14/79] fix: data loss --- source/dnode/vnode/src/tsdb/tsdbCommit.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbCommit.c b/source/dnode/vnode/src/tsdb/tsdbCommit.c index 90116bc95c..8d6e82d266 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCommit.c +++ b/source/dnode/vnode/src/tsdb/tsdbCommit.c @@ -966,7 +966,20 @@ static int32_t tsdbCommitTableData(SCommitter *pCommitter, STbData *pTbData) { pRow = NULL; } - if (pRow == NULL) goto _exit; + if (pRow == NULL) { + if (pCommitter->dReader.pBlockIdx && tTABLEIDCmprFn(pCommitter->dReader.pBlockIdx, pTbData) == 0) { + SBlockIdx blockIdx = {.suid = pTbData->suid, .uid = pTbData->uid}; + code = tsdbWriteBlock(pCommitter->dWriter.pWriter, &pCommitter->dWriter.mBlock, &blockIdx); + if (code) goto _err; + + if (taosArrayPush(pCommitter->dWriter.aBlockIdx, &blockIdx) == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _err; + } + } + + goto _exit; + } int32_t iBlock = 0; SBlock block; From 150956ffd38f88b40257dd701a96d96e1d4c75ae Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Wed, 24 Aug 2022 13:19:52 +0800 Subject: [PATCH 15/79] enh: update tbMeta after creating table --- include/common/tmsg.h | 2 +- include/libs/qcom/query.h | 1 + source/common/src/tmsg.c | 2 +- source/dnode/vnode/src/meta/metaTable.c | 2 +- source/libs/catalog/src/catalog.c | 15 ++++++++++++--- source/libs/qcom/src/queryUtil.c | 9 +++++++-- source/libs/qcom/src/querymsg.c | 13 +++++++++++++ source/libs/scheduler/src/schRemote.c | 10 +++++++--- 8 files changed, 43 insertions(+), 11 deletions(-) diff --git a/include/common/tmsg.h b/include/common/tmsg.h index ae6f034df5..7c7f017fe6 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -1276,7 +1276,7 @@ void tFreeSMAlterStbRsp(SMAlterStbRsp* pRsp); int32_t tSerializeSTableMetaRsp(void* buf, int32_t bufLen, STableMetaRsp* pRsp); int32_t tDeserializeSTableMetaRsp(void* buf, int32_t bufLen, STableMetaRsp* pRsp); -void tFreeSTableMetaRsp(STableMetaRsp* pRsp); +void tFreeSTableMetaRsp(void* pRsp); void tFreeSTableIndexRsp(void* info); typedef struct { diff --git a/include/libs/qcom/query.h b/include/libs/qcom/query.h index 34d870397f..1fa7dca7dc 100644 --- a/include/libs/qcom/query.h +++ b/include/libs/qcom/query.h @@ -215,6 +215,7 @@ void initQueryModuleMsgHandle(); const SSchema* tGetTbnameColumnSchema(); bool tIsValidSchema(struct SSchema* pSchema, int32_t numOfCols, int32_t numOfTags); +int32_t queryCreateCTableMetaFromMsg(STableMetaRsp *msg, SCTableMeta *pMeta); int32_t queryCreateTableMetaFromMsg(STableMetaRsp* msg, bool isSuperTable, STableMeta** pMeta); char* jobTaskStatusStr(int32_t status); diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index 3ceb9ca192..b3c0363e44 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -3330,7 +3330,7 @@ int32_t tDeserializeSSTbHbRsp(void *buf, int32_t bufLen, SSTbHbRsp *pRsp) { return 0; } -void tFreeSTableMetaRsp(STableMetaRsp *pRsp) { taosMemoryFreeClear(pRsp->pSchemas); } +void tFreeSTableMetaRsp(void *pRsp) { taosMemoryFreeClear(((STableMetaRsp*)pRsp)->pSchemas); } void tFreeSTableIndexRsp(void *info) { if (NULL == info) { diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c index 6b18e1b48d..811aac28b7 100644 --- a/source/dnode/vnode/src/meta/metaTable.c +++ b/source/dnode/vnode/src/meta/metaTable.c @@ -437,7 +437,7 @@ int metaCreateTable(SMeta *pMeta, int64_t version, SVCreateTbReq *pReq, STableMe (*pMetaRsp)->suid = pReq->ctb.suid; strcpy((*pMetaRsp)->tbName, pReq->name); } else { - metaUpdateMetaRsp(pReq->uid, pReq->name, pReq->ntb.schemaRow, *pMetaRsp); + metaUpdateMetaRsp(pReq->uid, pReq->name, &pReq->ntb.schemaRow, *pMetaRsp); } } } diff --git a/source/libs/catalog/src/catalog.c b/source/libs/catalog/src/catalog.c index b6e958e192..7b32eadcd4 100644 --- a/source/libs/catalog/src/catalog.c +++ b/source/libs/catalog/src/catalog.c @@ -270,13 +270,22 @@ int32_t ctgUpdateTbMeta(SCatalog* pCtg, STableMetaRsp* rspMsg, bool syncOp) { int32_t code = 0; strcpy(output->dbFName, rspMsg->dbFName); - strcpy(output->tbName, rspMsg->tbName); output->dbId = rspMsg->dbId; - SET_META_TYPE_TABLE(output->metaType); + if (TSDB_CHILD_TABLE == rspMsg->tableType && NULL == rspMsg->pSchemas) { + strcpy(output->ctbName, rspMsg->tbName); - CTG_ERR_JRET(queryCreateTableMetaFromMsg(rspMsg, rspMsg->tableType == TSDB_SUPER_TABLE, &output->tbMeta)); + SET_META_TYPE_CTABLE(output->metaType); + + CTG_ERR_JRET(queryCreateCTableMetaFromMsg(rspMsg, &output->ctbMeta)); + } else { + strcpy(output->tbName, rspMsg->tbName); + + SET_META_TYPE_TABLE(output->metaType); + + CTG_ERR_JRET(queryCreateTableMetaFromMsg(rspMsg, rspMsg->tableType == TSDB_SUPER_TABLE, &output->tbMeta)); + } CTG_ERR_JRET(ctgUpdateTbMetaEnqueue(pCtg, output, syncOp)); diff --git a/source/libs/qcom/src/queryUtil.c b/source/libs/qcom/src/queryUtil.c index 3c8b019d81..d848016e46 100644 --- a/source/libs/qcom/src/queryUtil.c +++ b/source/libs/qcom/src/queryUtil.c @@ -213,6 +213,11 @@ SSchema createSchema(int8_t type, int32_t bytes, col_id_t colId, const char* nam return s; } +void freeSTableMetaRspPointer(void *p) { + tFreeSTableMetaRsp(*(void**)p); + taosMemoryFreeClear(*(void**)p); +} + void destroyQueryExecRes(SExecResult* pRes) { if (NULL == pRes || NULL == pRes->res) { return; @@ -220,13 +225,13 @@ void destroyQueryExecRes(SExecResult* pRes) { switch (pRes->msgType) { case TDMT_VND_CREATE_TABLE: { - taosArrayDestroyEx((SArray*)pRes->res, tFreeSTableMetaRsp); + taosArrayDestroyEx((SArray*)pRes->res, freeSTableMetaRspPointer); break; } case TDMT_MND_CREATE_STB: case TDMT_VND_ALTER_TABLE: case TDMT_MND_ALTER_STB: { - tFreeSTableMetaRsp((STableMetaRsp*)pRes->res); + tFreeSTableMetaRsp(pRes->res); taosMemoryFreeClear(pRes->res); break; } diff --git a/source/libs/qcom/src/querymsg.c b/source/libs/qcom/src/querymsg.c index ed8786170d..e2d3ac1583 100644 --- a/source/libs/qcom/src/querymsg.c +++ b/source/libs/qcom/src/querymsg.c @@ -354,6 +354,19 @@ static int32_t queryConvertTableMetaMsg(STableMetaRsp *pMetaMsg) { return TSDB_CODE_SUCCESS; } +int32_t queryCreateCTableMetaFromMsg(STableMetaRsp *msg, SCTableMeta *pMeta) { + pMeta->vgId = msg->vgId; + pMeta->tableType = msg->tableType; + pMeta->uid = msg->tuid; + pMeta->suid = msg->suid; + + qDebug("ctable %s uid %" PRIx64 " meta returned, type %d vgId:%d db %s suid %" PRIx64 , + msg->tbName, pMeta->uid, pMeta->tableType, pMeta->vgId, msg->dbFName, pMeta->suid); + + return TSDB_CODE_SUCCESS; +} + + int32_t queryCreateTableMetaFromMsg(STableMetaRsp *msg, bool isStb, STableMeta **pMeta) { int32_t total = msg->numOfColumns + msg->numOfTags; int32_t metaSize = sizeof(STableMeta) + sizeof(SSchema) * total; diff --git a/source/libs/scheduler/src/schRemote.c b/source/libs/scheduler/src/schRemote.c index bd0c3009b0..fc2a8d1e08 100644 --- a/source/libs/scheduler/src/schRemote.c +++ b/source/libs/scheduler/src/schRemote.c @@ -102,8 +102,11 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t execId, SDa tDecoderInit(&coder, msg, msgSize); code = tDecodeSVCreateTbBatchRsp(&coder, &batchRsp); if (TSDB_CODE_SUCCESS == code && batchRsp.nRsps > 0) { - pJob->execRes.res = taosArrayInit(batchRsp.nRsps, POINTER_BYTES); - pJob->execRes.msgType = TDMT_VND_CREATE_TABLE; + SCH_LOCK(SCH_WRITE, &pJob->resLock); + if (NULL == pJob->execRes.res) { + pJob->execRes.res = taosArrayInit(batchRsp.nRsps, POINTER_BYTES); + pJob->execRes.msgType = TDMT_VND_CREATE_TABLE; + } for (int32_t i = 0; i < batchRsp.nRsps; ++i) { SVCreateTbRsp *rsp = batchRsp.pRsps + i; @@ -115,6 +118,7 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t execId, SDa code = rsp->code; } } + SCH_UNLOCK(SCH_WRITE, &pJob->resLock); if (taosArrayGetSize((SArray*)pJob->execRes.res) <= 0) { taosArrayDestroy((SArray*)pJob->execRes.res); @@ -1113,7 +1117,7 @@ int32_t schBuildAndSendMsg(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr, break; } -#if 1 +#if 0 SSchTrans trans = {.pTrans = pJob->conn.pTrans, .pHandle = SCH_GET_TASK_HANDLE(pTask)}; code = schAsyncSendMsg(pJob, pTask, &trans, addr, msgType, msg, msgSize, persistHandle, (rpcCtx.args ? &rpcCtx : NULL)); msg = NULL; From 8f6aaf0a60e257f06c0db7e1def952d264298d15 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 24 Aug 2022 13:47:27 +0800 Subject: [PATCH 16/79] fix: donot retry if error code not match retry code --- source/dnode/mnode/impl/src/mndTrans.c | 4 ++-- tests/script/tsim/db/basic2.sim | 5 ++++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndTrans.c b/source/dnode/mnode/impl/src/mndTrans.c index 06a95cfc93..c77a80cc82 100644 --- a/source/dnode/mnode/impl/src/mndTrans.c +++ b/source/dnode/mnode/impl/src/mndTrans.c @@ -1308,11 +1308,11 @@ static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans) { if (pTrans->policy == TRN_POLICY_ROLLBACK) { if (pTrans->lastAction != 0) { STransAction *pAction = taosArrayGet(pTrans->redoActions, pTrans->lastAction); - if (pAction->retryCode != 0 && pAction->retryCode != pAction->errCode) { + if (pAction->retryCode != 0 && pAction->retryCode == pAction->errCode) { if (pTrans->failedTimes < 6) { mError("trans:%d, stage keep on redoAction since action:%d code:0x%x not 0x%x, failedTimes:%d", pTrans->id, pTrans->lastAction, pTrans->code, pAction->retryCode, pTrans->failedTimes); - taosMsleep(100); + taosMsleep(1000); continueExec = true; return true; } diff --git a/tests/script/tsim/db/basic2.sim b/tests/script/tsim/db/basic2.sim index b7ac0b5edd..4f0ba4a13c 100644 --- a/tests/script/tsim/db/basic2.sim +++ b/tests/script/tsim/db/basic2.sim @@ -4,7 +4,7 @@ system sh/exec.sh -n dnode1 -s start sql connect print =============== conflict stb -sql create database db vgroups 1; +sql create database db vgroups 4; sql use db; sql create table stb (ts timestamp, i int) tags (j int); sql_error create table stb using stb tags (1); @@ -16,6 +16,9 @@ sql_error create table ctb (ts timestamp, i int) tags (j int); sql create table ntb (ts timestamp, i int); sql_error create table ntb (ts timestamp, i int) tags (j int); +sql drop table ntb +sql create table ntb (ts timestamp, i int) tags (j int); + sql drop database db print =============== create database d1 From 1b964bfdc94e9f7e2df657b1e119fc79a5cec2e4 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 24 Aug 2022 13:47:56 +0800 Subject: [PATCH 17/79] fix: print src ip if found invalid packet --- source/libs/transport/src/transSvr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/transport/src/transSvr.c b/source/libs/transport/src/transSvr.c index 447db76136..6dd9481b95 100644 --- a/source/libs/transport/src/transSvr.c +++ b/source/libs/transport/src/transSvr.c @@ -276,7 +276,7 @@ void uvOnRecvCb(uv_stream_t* cli, ssize_t nread, const uv_buf_t* buf) { while (transReadComplete(pBuf)) { tTrace("%s conn %p alread read complete packet", transLabel(pTransInst), conn); if (true == pBuf->invalid || false == uvHandleReq(conn)) { - tError("%s conn %p read invalid packet", transLabel(pTransInst), conn); + tError("%s conn %p read invalid packet, dst: %s, srv: %s", transLabel(pTransInst), conn, conn->dst, conn->src); destroyConn(conn, true); return; } From 4436af66f4f5114922d290f523cdd160a6256a55 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 24 Aug 2022 14:03:46 +0800 Subject: [PATCH 18/79] refactor: adjust telemetry interval --- source/common/src/tglobal.c | 2 +- source/dnode/mnode/impl/src/mndTelem.c | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index adc5af1a17..3b3a18a4d4 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -76,7 +76,7 @@ bool tsMonitorComp = false; // telem bool tsEnableTelem = true; -int32_t tsTelemInterval = 86400; +int32_t tsTelemInterval = 43200; char tsTelemServer[TSDB_FQDN_LEN] = "telemetry.taosdata.com"; uint16_t tsTelemPort = 80; diff --git a/source/dnode/mnode/impl/src/mndTelem.c b/source/dnode/mnode/impl/src/mndTelem.c index 27814fe5be..93f7531a27 100644 --- a/source/dnode/mnode/impl/src/mndTelem.c +++ b/source/dnode/mnode/impl/src/mndTelem.c @@ -131,7 +131,9 @@ static int32_t mndProcessTelemTimer(SRpcMsg* pReq) { char* pCont = mndBuildTelemetryReport(pMnode); if (pCont != NULL) { if (taosSendHttpReport(tsTelemServer, tsTelemPort, pCont, strlen(pCont), HTTP_FLAT) != 0) { - mError("failed to send telemetry msg"); + mError("failed to send telemetry report"); + } else { + mTrace("succeed to send telemetry report"); } taosMemoryFree(pCont); } From c0b80a0da46acd19a366c7d1eb28f1c549928987 Mon Sep 17 00:00:00 2001 From: dingbo Date: Wed, 24 Aug 2022 15:11:38 +0800 Subject: [PATCH 19/79] docs: hide 3.0 download --- docs/zh/28-releases/01-tdengine.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/zh/28-releases/01-tdengine.md b/docs/zh/28-releases/01-tdengine.md index a64798caa0..1e97572ca4 100644 --- a/docs/zh/28-releases/01-tdengine.md +++ b/docs/zh/28-releases/01-tdengine.md @@ -9,7 +9,7 @@ import Release from "/components/ReleaseV3"; -## 3.0.0.0 + From 24261cc90b14ed6bf5fbb32682c29ccd19d3b3c6 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Wed, 24 Aug 2022 15:25:25 +0800 Subject: [PATCH 20/79] refactor(stream): refine stream backend interface --- examples/c/stream_demo.c | 2 +- include/common/tcommon.h | 24 +++ include/libs/stream/tstream.h | 15 +- source/dnode/vnode/src/tq/tq.c | 16 +- source/libs/executor/src/scanoperator.c | 36 ++++ source/libs/executor/src/timewindowoperator.c | 169 ++++++++++-------- source/libs/stream/src/streamDispatch.c | 2 +- source/libs/stream/src/streamState.c | 54 ++++-- 8 files changed, 215 insertions(+), 103 deletions(-) diff --git a/examples/c/stream_demo.c b/examples/c/stream_demo.c index 55556f21a1..1c9d11b755 100644 --- a/examples/c/stream_demo.c +++ b/examples/c/stream_demo.c @@ -96,7 +96,7 @@ int32_t create_stream() { taos_free_result(pRes); pRes = taos_query(pConn, - "create stream stream1 trigger at_once watermark 10s into outstb as select _wstart start, k from st1 partition by tbname state_window(k)"); + "create stream stream1 trigger at_once watermark 10s into outstb as select _wstart start, avg(k) from st1 partition by tbname interval(10s)"); if (taos_errno(pRes) != 0) { printf("failed to create stream stream1, reason:%s\n", taos_errstr(pRes)); return -1; diff --git a/include/common/tcommon.h b/include/common/tcommon.h index dbe020f7ec..fb59d8b9a0 100644 --- a/include/common/tcommon.h +++ b/include/common/tcommon.h @@ -44,6 +44,30 @@ enum { ) // clang-format on +typedef struct { + TSKEY ts; + uint64_t groupId; +} SWinKey; + +static inline int SWinKeyCmpr(const void* pKey1, int kLen1, const void* pKey2, int kLen2) { + SWinKey* pWin1 = (SWinKey*)pKey1; + SWinKey* pWin2 = (SWinKey*)pKey2; + + if (pWin1->groupId > pWin2->groupId) { + return 1; + } else if (pWin1->groupId < pWin2->groupId) { + return -1; + } + + if (pWin1->ts > pWin2->ts) { + return 1; + } else if (pWin1->ts < pWin2->ts) { + return -1; + } + + return 0; +} + enum { TMQ_MSG_TYPE__DUMMY = 0, TMQ_MSG_TYPE__POLL_RSP, diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index 16b259cf59..2c27509008 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -551,16 +551,17 @@ typedef struct { } SStreamStateCur; #if 1 -int32_t streamStatePut(SStreamState* pState, const void* key, int32_t kLen, const void* value, int32_t vLen); -int32_t streamStateGet(SStreamState* pState, const void* key, int32_t kLen, void** pVal, int32_t* pVLen); -int32_t streamStateDel(SStreamState* pState, const void* key, int32_t kLen); +int32_t streamStatePut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen); +int32_t streamStateGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen); +int32_t streamStateDel(SStreamState* pState, const SWinKey* key); +void streamFreeVal(void* val); -SStreamStateCur* streamStateGetCur(SStreamState* pState, const void* key, int32_t kLen); -SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const void* key, int32_t kLen); -SStreamStateCur* streamStateSeekKeyPrev(SStreamState* pState, const void* key, int32_t kLen); +SStreamStateCur* streamStateGetCur(SStreamState* pState, const SWinKey* key); +SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const SWinKey* key); +SStreamStateCur* streamStateSeekKeyPrev(SStreamState* pState, const SWinKey* key); void streamStateFreeCur(SStreamStateCur* pCur); -int32_t streamGetKVByCur(SStreamStateCur* pCur, void** pKey, int32_t* pKLen, void** pVal, int32_t* pVLen); +int32_t streamStateGetKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen); int32_t streamStateSeekFirst(SStreamState* pState, SStreamStateCur* pCur); int32_t streamStateSeekLast(SStreamState* pState, SStreamStateCur* pCur); diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 1456c6c067..3ff59ac2c0 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -652,27 +652,33 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask) { // expand executor if (pTask->taskLevel == TASK_LEVEL__SOURCE) { + pTask->pState = streamStateOpen(pTq->pStreamMeta->path, pTask); + if (pTask->pState == NULL) { + return -1; + } + SReadHandle handle = { .meta = pTq->pVnode->pMeta, .vnode = pTq->pVnode, .initTqReader = 1, + .pStateBackend = pTask->pState, }; pTask->exec.executor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &handle); ASSERT(pTask->exec.executor); } else if (pTask->taskLevel == TASK_LEVEL__AGG) { + pTask->pState = streamStateOpen(pTq->pStreamMeta->path, pTask); + if (pTask->pState == NULL) { + return -1; + } SReadHandle mgHandle = { .vnode = NULL, .numOfVgroups = (int32_t)taosArrayGetSize(pTask->childEpInfo), + .pStateBackend = pTask->pState, }; pTask->exec.executor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &mgHandle); ASSERT(pTask->exec.executor); } - pTask->pState = streamStateOpen(pTq->pStreamMeta->path, pTask); - if (pTask->pState == NULL) { - return -1; - } - // sink /*pTask->ahandle = pTq->pVnode;*/ if (pTask->outputType == TASK_OUTPUT__SMA) { diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 599f86f4fa..3eb382522a 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -1281,6 +1281,42 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) { SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SStreamScanInfo* pInfo = pOperator->info; +#if 0 + SStreamState* pState = pTaskInfo->streamInfo.pState; + if (pState) { + printf(">>>>>>>> stream write backend\n"); + SWinKey key = { + .ts = 1, + .groupId = 2, + }; + char tmp[100] = "abcdefg1"; + if (streamStatePut(pState, &key, &tmp, strlen(tmp) + 1) < 0) { + ASSERT(0); + } + + key.ts = 2; + char tmp2[100] = "abcdefg2"; + if (streamStatePut(pState, &key, &tmp2, strlen(tmp2) + 1) < 0) { + ASSERT(0); + } + + key.groupId = 5; + key.ts = 1; + char tmp3[100] = "abcdefg3"; + if (streamStatePut(pState, &key, &tmp3, strlen(tmp3) + 1) < 0) { + ASSERT(0); + } + + char* val2 = NULL; + int32_t sz; + if (streamStateGet(pState, &key, (void**)&val2, &sz) < 0) { + ASSERT(0); + } + printf("stream read %s %d\n", val2, sz); + streamFreeVal(val2); + } +#endif + qDebug("stream scan called"); if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__LOG) { while (1) { diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 3c4cadbc31..e89a43f154 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -15,6 +15,7 @@ #include "executorimpl.h" #include "function.h" #include "functionMgt.h" +#include "tcommon.h" #include "tcompare.h" #include "tdatablock.h" #include "tfill.h" @@ -27,11 +28,6 @@ typedef enum SResultTsInterpType { #define IS_FINAL_OP(op) ((op)->isFinal) -typedef struct SWinRes { - TSKEY ts; - uint64_t groupId; -} SWinRes; - typedef struct SPullWindowInfo { STimeWindow window; uint64_t groupId; @@ -641,7 +637,8 @@ static void doInterpUnclosedTimeWindow(SOperatorInfo* pOperatorInfo, int32_t num setResultRowInterpo(pResult, RESULT_ROW_END_INTERP); setNotInterpoWindowKey(pSup->pCtx, numOfExprs, RESULT_ROW_START_INTERP); - doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, 0, pBlock->info.rows, numOfExprs); + doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, 0, pBlock->info.rows, + numOfExprs); if (isResultRowInterpolated(pResult, RESULT_ROW_END_INTERP)) { closeResultRow(pr); @@ -812,7 +809,7 @@ static int32_t savePullWindow(SPullWindowInfo* pPullInfo, SArray* pPullWins) { int32_t compareResKey(void* pKey, void* data, int32_t index) { SArray* res = (SArray*)data; SResKeyPos* pos = taosArrayGetP(res, index); - SWinRes* pData = (SWinRes*)pKey; + SWinKey* pData = (SWinKey*)pKey; if (pData->ts == *(int64_t*)pos->key) { if (pData->groupId > pos->groupId) { return 1; @@ -828,7 +825,7 @@ int32_t compareResKey(void* pKey, void* data, int32_t index) { static int32_t saveResult(int64_t ts, int32_t pageId, int32_t offset, uint64_t groupId, SArray* pUpdated) { int32_t size = taosArrayGetSize(pUpdated); - SWinRes data = {.ts = ts, .groupId = groupId}; + SWinKey data = {.ts = ts, .groupId = groupId}; int32_t index = binarySearchCom(pUpdated, size, &data, TSDB_ORDER_DESC, compareResKey); if (index == -1) { index = 0; @@ -861,8 +858,8 @@ static int32_t saveWinResult(int64_t ts, int32_t pageId, int32_t offset, uint64_ newPos->groupId = groupId; newPos->pos = (SResultRowPosition){.pageId = pageId, .offset = offset}; *(int64_t*)newPos->key = ts; - SWinRes key = {.ts = ts, .groupId = groupId}; - if (taosHashPut(pUpdatedMap, &key, sizeof(SWinRes), &newPos, sizeof(void*)) != TSDB_CODE_SUCCESS) { + SWinKey key = {.ts = ts, .groupId = groupId}; + if (taosHashPut(pUpdatedMap, &key, sizeof(SWinKey), &newPos, sizeof(void*)) != TSDB_CODE_SUCCESS) { taosMemoryFree(newPos); } return TSDB_CODE_SUCCESS; @@ -879,20 +876,20 @@ static int32_t saveResultRow(SResultRow* result, uint64_t groupId, SArray* pUpda static void removeResults(SArray* pWins, SHashObj* pUpdatedMap) { int32_t size = taosArrayGetSize(pWins); for (int32_t i = 0; i < size; i++) { - SWinRes* pW = taosArrayGet(pWins, i); - taosHashRemove(pUpdatedMap, pW, sizeof(SWinRes)); + SWinKey* pW = taosArrayGet(pWins, i); + taosHashRemove(pUpdatedMap, pW, sizeof(SWinKey)); } } int64_t getWinReskey(void* data, int32_t index) { SArray* res = (SArray*)data; - SWinRes* pos = taosArrayGet(res, index); + SWinKey* pos = taosArrayGet(res, index); return pos->ts; } int32_t compareWinRes(void* pKey, void* data, int32_t index) { SArray* res = (SArray*)data; - SWinRes* pos = taosArrayGetP(res, index); + SWinKey* pos = taosArrayGetP(res, index); SResKeyPos* pData = (SResKeyPos*)pKey; if (*(int64_t*)pData->key == pos->ts) { if (pData->groupId > pos->groupId) { @@ -985,8 +982,8 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul if ((!pInfo->ignoreExpiredData || !isCloseWindow(&win, &pInfo->twAggSup)) && inSlidingWindow(&pInfo->interval, &win, &pBlock->info)) { updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &win, true); - doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, - pBlock->info.rows, numOfOutput); + doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, pBlock->info.rows, + numOfOutput); } doCloseWindow(pResultRowInfo, pInfo, pResult); @@ -1025,8 +1022,8 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul doWindowBorderInterpolation(pInfo, pBlock, pResult, &nextWin, startPos, forwardRows, pSup); updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &nextWin, true); - doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, - pBlock->info.rows, numOfOutput); + doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, pBlock->info.rows, + numOfOutput); doCloseWindow(pResultRowInfo, pInfo, pResult); } @@ -1214,8 +1211,8 @@ static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorI } updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pRowSup->win, false); - doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, pRowSup->startRowIndex, - pRowSup->numOfRows, pBlock->info.rows, numOfOutput); + doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, pRowSup->startRowIndex, pRowSup->numOfRows, + pBlock->info.rows, numOfOutput); } static SSDataBlock* doStateWindowAgg(SOperatorInfo* pOperator) { @@ -1418,7 +1415,7 @@ void doDeleteSpecifyIntervalWindow(SAggSupporter* pAggSup, SSDataBlock* pBlock, STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, tsStarts[i], pInterval, TSDB_ORDER_ASC); doDeleteIntervalWindow(pAggSup, win.skey, groupIds[i]); if (pUpWins) { - SWinRes winRes = {.ts = win.skey, .groupId = groupIds[i]}; + SWinKey winRes = {.ts = win.skey, .groupId = groupIds[i]}; taosArrayPush(pUpWins, &winRes); } } @@ -1445,7 +1442,7 @@ static void doClearWindows(SAggSupporter* pAggSup, SExprSupp* pSup1, SInterval* uint64_t winGpId = pGpDatas ? pGpDatas[startPos] : pBlock->info.groupId; bool res = doClearWindow(pAggSup, pSup1, (char*)&win.skey, sizeof(TSKEY), winGpId, numOfOutput); if (pUpWins && res) { - SWinRes winRes = {.ts = win.skey, .groupId = winGpId}; + SWinKey winRes = {.ts = win.skey, .groupId = winGpId}; taosArrayPush(pUpWins, &winRes); } getNextTimeWindow(pInterval, pInterval->precision, TSDB_ORDER_ASC, &win); @@ -1484,11 +1481,11 @@ static int32_t closeIntervalWindow(SHashObj* pHashMap, STimeWindowAggSupp* pSup, STimeWindow win; win.skey = ts; win.ekey = taosTimeAdd(win.skey, pInterval->interval, pInterval->intervalUnit, pInterval->precision) - 1; - SWinRes winRe = { + SWinKey winRe = { .ts = win.skey, .groupId = groupId, }; - void* chIds = taosHashGet(pPullDataMap, &winRe, sizeof(SWinRes)); + void* chIds = taosHashGet(pPullDataMap, &winRe, sizeof(SWinKey)); if (isCloseWindow(&win, pSup)) { if (chIds && pPullDataMap) { SArray* chAy = *(SArray**)chIds; @@ -1555,7 +1552,7 @@ static void doBuildDeleteResult(SArray* pWins, int32_t* index, SSDataBlock* pBlo SColumnInfoData* pTsCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX); SColumnInfoData* pGroupCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX); for (int32_t i = *index; i < size; i++) { - SWinRes* pWin = taosArrayGet(pWins, i); + SWinKey* pWin = taosArrayGet(pWins, i); colDataAppend(pTsCol, pBlock->info.rows, (const char*)&pWin->ts, false); colDataAppend(pGroupCol, pBlock->info.rows, (const char*)&pWin->groupId, false); pBlock->info.rows++; @@ -1595,6 +1592,9 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { SArray* pUpdated = taosArrayInit(4, POINTER_BYTES); // SResKeyPos _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); SHashObj* pUpdatedMap = taosHashInit(1024, hashFn, false, HASH_NO_LOCK); + + SStreamState* pState = pTaskInfo->streamInfo.pState; + while (1) { SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream); if (pBlock == NULL) { @@ -1639,6 +1639,35 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, MAIN_SCAN, pUpdatedMap); } +#if 0 + if (pState) { + printf(">>>>>>>> stream read backend\n"); + SWinKey key = { + .ts = 1, + .groupId = 2, + }; + char* val = NULL; + int32_t sz; + if (streamStateGet(pState, &key, (void**)&val, &sz) < 0) { + ASSERT(0); + } + printf("stream read %s %d\n", val, sz); + streamFreeVal(val); + + SStreamStateCur* pCur = streamStateGetCur(pState, &key); + ASSERT(pCur); + while (streamStateCurNext(pState, pCur) == 0) { + SWinKey key1; + const void* val1; + if (streamStateGetKVByCur(pCur, &key1, &val1, &sz) < 0) { + break; + } + printf("stream iter key groupId:%d ts:%d, value %s %d\n", key1.groupId, key1.ts, val1, sz); + } + streamStateFreeCur(pCur); + } +#endif + pOperator->status = OP_RES_TO_RETURN; closeIntervalWindow(pInfo->aggSup.pResultRowHashTable, &pInfo->twAggSup, &pInfo->interval, NULL, pUpdatedMap, pInfo->pRecycledPages, pInfo->aggSup.pResultBuf); @@ -1857,7 +1886,7 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* } } pInfo->pRecycledPages = taosArrayInit(4, sizeof(int32_t)); - pInfo->pDelWins = taosArrayInit(4, sizeof(SWinRes)); + pInfo->pDelWins = taosArrayInit(4, sizeof(SWinKey)); pInfo->delIndex = 0; pInfo->pDelRes = createSpecialDataBlock(STREAM_DELETE_RESULT); initResultRowInfo(&pInfo->binfo.resultRowInfo); @@ -1958,8 +1987,8 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSessionAggOperator } updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pRowSup->win, false); - doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, pRowSup->startRowIndex, - pRowSup->numOfRows, pBlock->info.rows, numOfOutput); + doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, pRowSup->startRowIndex, pRowSup->numOfRows, + pBlock->info.rows, numOfOutput); } static SSDataBlock* doSessionWindowAgg(SOperatorInfo* pOperator) { @@ -2811,7 +2840,7 @@ static void rebuildIntervalWindow(SStreamFinalIntervalOperatorInfo* pInfo, SExpr return; } for (int32_t i = 0; i < size; i++) { - SWinRes* pWinRes = taosArrayGet(pWinArray, i); + SWinKey* pWinRes = taosArrayGet(pWinArray, i); SResultRow* pCurResult = NULL; STimeWindow ParentWin = {.skey = pWinRes->ts, .ekey = pWinRes->ts + 1}; setTimeWindowOutputBuf(&pInfo->binfo.resultRowInfo, &ParentWin, true, &pCurResult, pWinRes->groupId, pSup->pCtx, @@ -2854,12 +2883,12 @@ int32_t getNexWindowPos(SInterval* pInterval, SDataBlockInfo* pBlockInfo, TSKEY* return getNextQualifiedWindow(pInterval, pNextWin, pBlockInfo, tsCols, prevEndPos, TSDB_ORDER_ASC); } -void addPullWindow(SHashObj* pMap, SWinRes* pWinRes, int32_t size) { +void addPullWindow(SHashObj* pMap, SWinKey* pWinRes, int32_t size) { SArray* childIds = taosArrayInit(8, sizeof(int32_t)); for (int32_t i = 0; i < size; i++) { taosArrayPush(childIds, &i); } - taosHashPut(pMap, pWinRes, sizeof(SWinRes), &childIds, sizeof(void*)); + taosHashPut(pMap, pWinRes, sizeof(SWinKey), &childIds, sizeof(void*)); } static int32_t getChildIndex(SSDataBlock* pBlock) { return pBlock->info.childId; } @@ -2906,11 +2935,11 @@ static void doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBloc } if (IS_FINAL_OP(pInfo) && isClosed && pInfo->pChildren) { bool ignore = true; - SWinRes winRes = { + SWinKey winRes = { .ts = nextWin.skey, .groupId = tableGroupId, }; - void* chIds = taosHashGet(pInfo->pPullDataMap, &winRes, sizeof(SWinRes)); + void* chIds = taosHashGet(pInfo->pPullDataMap, &winRes, sizeof(SWinKey)); if (isDeletedWindow(&nextWin, tableGroupId, &pInfo->aggSup) && !chIds) { SPullWindowInfo pull = {.window = nextWin, .groupId = tableGroupId}; // add pull data request @@ -3039,8 +3068,8 @@ void processPullOver(SSDataBlock* pBlock, SHashObj* pMap) { uint64_t* groupIdData = (uint64_t*)pGroupCol->pData; int32_t chId = getChildIndex(pBlock); for (int32_t i = 0; i < pBlock->info.rows; i++) { - SWinRes winRes = {.ts = tsData[i], .groupId = groupIdData[i]}; - void* chIds = taosHashGet(pMap, &winRes, sizeof(SWinRes)); + SWinKey winRes = {.ts = tsData[i], .groupId = groupIdData[i]}; + void* chIds = taosHashGet(pMap, &winRes, sizeof(SWinKey)); if (chIds) { SArray* chArray = *(SArray**)chIds; int32_t index = taosArraySearchIdx(chArray, &chId, compareInt32Val, TD_EQ); @@ -3049,7 +3078,7 @@ void processPullOver(SSDataBlock* pBlock, SHashObj* pMap) { taosArrayRemove(chArray, index); if (taosArrayGetSize(chArray) == 0) { // pull data is over - taosHashRemove(pMap, &winRes, sizeof(SWinRes)); + taosHashRemove(pMap, &winRes, sizeof(SWinKey)); } } } @@ -3133,7 +3162,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) { if (pBlock->info.type == STREAM_NORMAL || pBlock->info.type == STREAM_PULL_DATA) { pInfo->binfo.pRes->info.type = pBlock->info.type; } else if (pBlock->info.type == STREAM_CLEAR) { - SArray* pUpWins = taosArrayInit(8, sizeof(SWinRes)); + SArray* pUpWins = taosArrayInit(8, sizeof(SWinKey)); doClearWindows(&pInfo->aggSup, pSup, &pInfo->interval, pOperator->exprSupp.numOfExprs, pBlock, pUpWins); if (IS_FINAL_OP(pInfo)) { int32_t childIndex = getChildIndex(pBlock); @@ -3171,7 +3200,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) { getAllIntervalWindow(pInfo->aggSup.pResultRowHashTable, pUpdatedMap); continue; } else if (pBlock->info.type == STREAM_RETRIEVE && !IS_FINAL_OP(pInfo)) { - SArray* pUpWins = taosArrayInit(8, sizeof(SWinRes)); + SArray* pUpWins = taosArrayInit(8, sizeof(SWinKey)); doClearWindows(&pInfo->aggSup, pSup, &pInfo->interval, pOperator->exprSupp.numOfExprs, pBlock, pUpWins); removeResults(pUpWins, pUpdatedMap); taosArrayDestroy(pUpWins); @@ -3386,7 +3415,7 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, pInfo->ignoreExpiredData = pIntervalPhyNode->window.igExpired; pInfo->pDelRes = createSpecialDataBlock(STREAM_DELETE_RESULT); pInfo->delIndex = 0; - pInfo->pDelWins = taosArrayInit(4, sizeof(SWinRes)); + pInfo->pDelWins = taosArrayInit(4, sizeof(SWinKey)); pInfo->pRecycledPages = taosArrayInit(4, sizeof(int32_t)); pOperator->operatorType = pPhyNode->type; @@ -3721,8 +3750,8 @@ int32_t updateSessionWindowInfo(SResultWindowInfo* pWinInfo, TSKEY* pStartTs, TS } if (pWinInfo->win.skey > pStartTs[i]) { if (pStDeleted && pWinInfo->isOutput) { - SWinRes res = {.ts = pWinInfo->win.skey, .groupId = groupId}; - taosHashPut(pStDeleted, &pWinInfo->pos, sizeof(SResultRowPosition), &res, sizeof(SWinRes)); + SWinKey res = {.ts = pWinInfo->win.skey, .groupId = groupId}; + taosHashPut(pStDeleted, &pWinInfo->pos, sizeof(SResultRowPosition), &res, sizeof(SWinKey)); pWinInfo->isOutput = false; } pWinInfo->win.skey = pStartTs[i]; @@ -3840,8 +3869,8 @@ void compactTimeWindow(SStreamSessionAggOperatorInfo* pInfo, int32_t startIndex, compactFunctions(pSup->pCtx, pInfo->pDummyCtx, numOfOutput, pTaskInfo); taosHashRemove(pStUpdated, &pWinInfo->pos, sizeof(SResultRowPosition)); if (pWinInfo->isOutput) { - SWinRes res = {.ts = pWinInfo->win.skey, .groupId = groupId}; - taosHashPut(pStDeleted, &pWinInfo->pos, sizeof(SResultRowPosition), &res, sizeof(SWinRes)); + SWinKey res = {.ts = pWinInfo->win.skey, .groupId = groupId}; + taosHashPut(pStDeleted, &pWinInfo->pos, sizeof(SResultRowPosition), &res, sizeof(SWinKey)); pWinInfo->isOutput = false; } taosArrayRemove(pInfo->streamAggSup.pCurWins, i); @@ -3903,8 +3932,8 @@ static void doStreamSessionAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSData } pCurWin->isClosed = false; if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE && pStUpdated) { - SWinRes value = {.ts = pCurWin->win.skey, .groupId = groupId}; - code = taosHashPut(pStUpdated, &pCurWin->pos, sizeof(SResultRowPosition), &value, sizeof(SWinRes)); + SWinKey value = {.ts = pCurWin->win.skey, .groupId = groupId}; + code = taosHashPut(pStUpdated, &pCurWin->pos, sizeof(SResultRowPosition), &value, sizeof(SWinKey)); if (code != TSDB_CODE_SUCCESS) { longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } @@ -3956,8 +3985,7 @@ static void doClearSessionWindows(SStreamAggSupporter* pAggSup, SExprSupp* pSup, int32_t step = 0; for (int32_t i = 0; i < pBlock->info.rows; i += step) { int32_t winIndex = 0; - SResultWindowInfo* pCurWin = - getCurSessionWindow(pAggSup, tsCols[i], INT64_MIN, gpCols[i], gap, &winIndex); + SResultWindowInfo* pCurWin = getCurSessionWindow(pAggSup, tsCols[i], INT64_MIN, gpCols[i], gap, &winIndex); if (!pCurWin || pCurWin->pos.pageId == -1) { // window has been closed. step = 1; @@ -3982,9 +4010,9 @@ static int32_t copyUpdateResult(SHashObj* pStUpdated, SArray* pUpdated) { if (pos == NULL) { return TSDB_CODE_QRY_OUT_OF_MEMORY; } - pos->groupId = ((SWinRes*)pData)->groupId; + pos->groupId = ((SWinKey*)pData)->groupId; pos->pos = *(SResultRowPosition*)key; - *(int64_t*)pos->key = ((SWinRes*)pData)->ts; + *(int64_t*)pos->key = ((SWinKey*)pData)->ts; taosArrayPush(pUpdated, &pos); } taosArraySort(pUpdated, resultrowComparAsc); @@ -4000,7 +4028,7 @@ void doBuildDeleteDataBlock(SHashObj* pStDeleted, SSDataBlock* pBlock, void** It blockDataEnsureCapacity(pBlock, size); size_t keyLen = 0; while (((*Ite) = taosHashIterate(pStDeleted, *Ite)) != NULL) { - SWinRes* res = *Ite; + SWinKey* res = *Ite; SColumnInfoData* pTsCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX); colDataAppend(pTsCol, pBlock->info.rows, (const char*)&res->ts, false); SColumnInfoData* pGpCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX); @@ -4131,8 +4159,8 @@ static void copyDeleteWindowInfo(SArray* pResWins, SHashObj* pStDeleted) { int32_t size = taosArrayGetSize(pResWins); for (int32_t i = 0; i < size; i++) { SResultWindowInfo* pWinInfo = taosArrayGet(pResWins, i); - SWinRes res = {.ts = pWinInfo->win.skey, .groupId = pWinInfo->groupId}; - taosHashPut(pStDeleted, &pWinInfo->pos, sizeof(SResultRowPosition), &res, sizeof(SWinRes)); + SWinKey res = {.ts = pWinInfo->win.skey, .groupId = pWinInfo->groupId}; + taosHashPut(pStDeleted, &pWinInfo->pos, sizeof(SResultRowPosition), &res, sizeof(SWinKey)); } } @@ -4170,14 +4198,14 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) { if (pBlock->info.type == STREAM_CLEAR) { SArray* pWins = taosArrayInit(16, sizeof(SResultWindowInfo)); - doClearSessionWindows(&pInfo->streamAggSup, &pOperator->exprSupp, pBlock, START_TS_COLUMN_INDEX, pOperator->exprSupp.numOfExprs, 0, - pWins); + doClearSessionWindows(&pInfo->streamAggSup, &pOperator->exprSupp, pBlock, START_TS_COLUMN_INDEX, + pOperator->exprSupp.numOfExprs, 0, pWins); if (IS_FINAL_OP(pInfo)) { int32_t childIndex = getChildIndex(pBlock); SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, childIndex); SStreamSessionAggOperatorInfo* pChildInfo = pChildOp->info; - doClearSessionWindows(&pChildInfo->streamAggSup, &pChildOp->exprSupp, pBlock, START_TS_COLUMN_INDEX, pChildOp->exprSupp.numOfExprs, - 0, NULL); + doClearSessionWindows(&pChildInfo->streamAggSup, &pChildOp->exprSupp, pBlock, START_TS_COLUMN_INDEX, + pChildOp->exprSupp.numOfExprs, 0, NULL); rebuildTimeWindow(pInfo, pWins, pBlock->info.groupId, pOperator->exprSupp.numOfExprs, pOperator); } taosArrayDestroy(pWins); @@ -4581,7 +4609,8 @@ SStateWindowInfo* getStateWindow(SStreamAggSupporter* pAggSup, TSKEY ts, uint64_ } int32_t updateStateWindowInfo(SArray* pWinInfos, int32_t winIndex, TSKEY* pTs, uint64_t groupId, - SColumnInfoData* pKeyCol, int32_t rows, int32_t start, bool* allEqual, SHashObj* pSeDeleted) { + SColumnInfoData* pKeyCol, int32_t rows, int32_t start, bool* allEqual, + SHashObj* pSeDeleted) { *allEqual = true; SStateWindowInfo* pWinInfo = taosArrayGet(pWinInfos, winIndex); for (int32_t i = start; i < rows; ++i) { @@ -4602,9 +4631,8 @@ int32_t updateStateWindowInfo(SArray* pWinInfos, int32_t winIndex, TSKEY* pTs, u } if (pWinInfo->winInfo.win.skey > pTs[i]) { if (pSeDeleted && pWinInfo->winInfo.isOutput) { - SWinRes res = {.ts = pWinInfo->winInfo.win.skey, .groupId = groupId}; - taosHashPut(pSeDeleted, &pWinInfo->winInfo.pos, sizeof(SResultRowPosition), &res, - sizeof(SWinRes)); + SWinKey res = {.ts = pWinInfo->winInfo.win.skey, .groupId = groupId}; + taosHashPut(pSeDeleted, &pWinInfo->winInfo.pos, sizeof(SResultRowPosition), &res, sizeof(SWinKey)); pWinInfo->winInfo.isOutput = false; } pWinInfo->winInfo.win.skey = pTs[i]; @@ -4617,14 +4645,14 @@ int32_t updateStateWindowInfo(SArray* pWinInfos, int32_t winIndex, TSKEY* pTs, u return rows - start; } -static void doClearStateWindows(SStreamAggSupporter* pAggSup, SSDataBlock* pBlock, - SHashObj* pSeUpdated, SHashObj* pSeDeleted) { +static void doClearStateWindows(SStreamAggSupporter* pAggSup, SSDataBlock* pBlock, SHashObj* pSeUpdated, + SHashObj* pSeDeleted) { SColumnInfoData* pTsColInfo = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX); SColumnInfoData* pGroupColInfo = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX); TSKEY* tsCol = (TSKEY*)pTsColInfo->pData; bool allEqual = false; int32_t step = 1; - uint64_t* gpCol = (uint64_t*) pGroupColInfo->pData; + uint64_t* gpCol = (uint64_t*)pGroupColInfo->pData; for (int32_t i = 0; i < pBlock->info.rows; i += step) { int32_t winIndex = 0; SStateWindowInfo* pCurWin = getStateWindowByTs(pAggSup, tsCol[i], gpCol[i], &winIndex); @@ -4668,13 +4696,12 @@ static void doStreamStateAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBl char* pKeyData = colDataGetData(pKeyColInfo, i); int32_t winIndex = 0; bool allEqual = true; - SStateWindowInfo* pCurWin = - getStateWindow(pAggSup, tsCols[i], groupId, pKeyData, &pInfo->stateCol, &winIndex); - winRows = updateStateWindowInfo(pAggSup->pCurWins, winIndex, tsCols, groupId, pKeyColInfo, - pSDataBlock->info.rows, i, &allEqual, pStDeleted); + SStateWindowInfo* pCurWin = getStateWindow(pAggSup, tsCols[i], groupId, pKeyData, &pInfo->stateCol, &winIndex); + winRows = updateStateWindowInfo(pAggSup->pCurWins, winIndex, tsCols, groupId, pKeyColInfo, pSDataBlock->info.rows, + i, &allEqual, pStDeleted); if (!allEqual) { - appendOneRow(pAggSup->pScanBlock, &pCurWin->winInfo.win.skey, &pCurWin->winInfo.win.ekey, - GROUPID_COLUMN_INDEX, &groupId); + appendOneRow(pAggSup->pScanBlock, &pCurWin->winInfo.win.skey, &pCurWin->winInfo.win.ekey, GROUPID_COLUMN_INDEX, + &groupId); taosHashRemove(pSeUpdated, &pCurWin->winInfo.pos, sizeof(SResultRowPosition)); deleteWindow(pAggSup->pCurWins, winIndex, destroyStateWinInfo); continue; @@ -4685,8 +4712,8 @@ static void doStreamStateAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBl } pCurWin->winInfo.isClosed = false; if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE) { - SWinRes value = {.ts = pCurWin->winInfo.win.skey, .groupId = groupId}; - code = taosHashPut(pSeUpdated, &pCurWin->winInfo.pos, sizeof(SResultRowPosition), &value, sizeof(SWinRes)); + SWinKey value = {.ts = pCurWin->winInfo.win.skey, .groupId = groupId}; + code = taosHashPut(pSeUpdated, &pCurWin->winInfo.pos, sizeof(SResultRowPosition), &value, sizeof(SWinKey)); if (code != TSDB_CODE_SUCCESS) { longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index c78ff0756f..9d4010f60e 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -358,7 +358,7 @@ int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pDat FAIL_SHUFFLE_DISPATCH: if (pReqs) { for (int32_t i = 0; i < vgSz; i++) { - taosArrayDestroy(pReqs[i].data); + taosArrayDestroyP(pReqs[i].data, taosMemoryFree); taosArrayDestroy(pReqs[i].dataLen); } taosMemoryFree(pReqs); diff --git a/source/libs/stream/src/streamState.c b/source/libs/stream/src/streamState.c index 6ccc90fa51..dfd6f012cc 100644 --- a/source/libs/stream/src/streamState.c +++ b/source/libs/stream/src/streamState.c @@ -15,6 +15,7 @@ #include "executor.h" #include "streamInc.h" +#include "tcommon.h" #include "ttimer.h" SStreamState* streamStateOpen(char* path, SStreamTask* pTask) { @@ -23,14 +24,18 @@ SStreamState* streamStateOpen(char* path, SStreamTask* pTask) { terrno = TSDB_CODE_OUT_OF_MEMORY; return NULL; } - char statePath[200]; + char statePath[300]; sprintf(statePath, "%s/%d", path, pTask->taskId); - if (tdbOpen(statePath, 16 * 1024, 1, &pState->db) < 0) { + if (tdbOpen(statePath, 4096, 256, &pState->db) < 0) { goto _err; } // open state storage backend - if (tdbTbOpen("state.db", sizeof(int32_t), -1, NULL, pState->db, &pState->pStateDb) < 0) { + if (tdbTbOpen("state.db", sizeof(SWinKey), -1, SWinKeyCmpr, pState->db, &pState->pStateDb) < 0) { + goto _err; + } + + if (streamStateBegin(pState) < 0) { goto _err; } @@ -60,6 +65,7 @@ int32_t streamStateBegin(SStreamState* pState) { } if (tdbBegin(pState->db, &pState->txn) < 0) { + tdbTxnClose(&pState->txn); return -1; } return 0; @@ -95,33 +101,39 @@ int32_t streamStateAbort(SStreamState* pState) { return 0; } -int32_t streamStatePut(SStreamState* pState, const void* key, int32_t kLen, const void* value, int32_t vLen) { - return tdbTbUpsert(pState->pStateDb, key, kLen, value, vLen, &pState->txn); +int32_t streamStatePut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen) { + return tdbTbUpsert(pState->pStateDb, key, sizeof(SWinKey), value, vLen, &pState->txn); } -int32_t streamStateGet(SStreamState* pState, const void* key, int32_t kLen, void** pVal, int32_t* pVLen) { - return tdbTbGet(pState->pStateDb, key, kLen, pVal, pVLen); +int32_t streamStateGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen) { + return tdbTbGet(pState->pStateDb, key, sizeof(SWinKey), pVal, pVLen); } -int32_t streamStateDel(SStreamState* pState, const void* key, int32_t kLen) { - return tdbTbDelete(pState->pStateDb, key, kLen, &pState->txn); +int32_t streamStateDel(SStreamState* pState, const SWinKey* key) { + return tdbTbDelete(pState->pStateDb, key, sizeof(SWinKey), &pState->txn); } -SStreamStateCur* streamStateGetCur(SStreamState* pState, const void* key, int32_t kLen) { +SStreamStateCur* streamStateGetCur(SStreamState* pState, const SWinKey* key) { SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur)); if (pCur == NULL) return NULL; tdbTbcOpen(pState->pStateDb, &pCur->pCur, NULL); int32_t c; - tdbTbcMoveTo(pCur->pCur, key, kLen, &c); + tdbTbcMoveTo(pCur->pCur, key, sizeof(SWinKey), &c); if (c != 0) { taosMemoryFree(pCur); return NULL; } - return 0; + return pCur; } -int32_t streamGetKVByCur(SStreamStateCur* pCur, void** pKey, int32_t* pKLen, void** pVal, int32_t* pVLen) { - return tdbTbcGet(pCur->pCur, (const void**)pKey, pKLen, (const void**)pVal, pVLen); +int32_t streamStateGetKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen) { + const SWinKey* pKTmp = NULL; + int32_t kLen; + if (tdbTbcGet(pCur->pCur, (const void**)&pKTmp, &kLen, pVal, pVLen) < 0) { + return -1; + } + *pKey = *pKTmp; + return 0; } int32_t streamStateSeekFirst(SStreamState* pState, SStreamStateCur* pCur) { @@ -134,14 +146,14 @@ int32_t streamStateSeekLast(SStreamState* pState, SStreamStateCur* pCur) { return tdbTbcMoveToLast(pCur->pCur); } -SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const void* key, int32_t kLen) { +SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const SWinKey* key) { SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur)); if (pCur == NULL) { return NULL; } int32_t c; - if (tdbTbcMoveTo(pCur->pCur, key, kLen, &c) < 0) { + if (tdbTbcMoveTo(pCur->pCur, key, sizeof(SWinKey), &c) < 0) { taosMemoryFree(pCur); return NULL; } @@ -155,14 +167,14 @@ SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const void* key, i return pCur; } -SStreamStateCur* streamStateSeekKeyPrev(SStreamState* pState, const void* key, int32_t kLen) { +SStreamStateCur* streamStateSeekKeyPrev(SStreamState* pState, const SWinKey* key) { SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur)); if (pCur == NULL) { return NULL; } int32_t c; - if (tdbTbcMoveTo(pCur->pCur, key, kLen, &c) < 0) { + if (tdbTbcMoveTo(pCur->pCur, key, sizeof(SWinKey), &c) < 0) { taosMemoryFree(pCur); return NULL; } @@ -185,3 +197,9 @@ int32_t streamStateCurPrev(SStreamState* pState, SStreamStateCur* pCur) { // return tdbTbcMoveToPrev(pCur->pCur); } +void streamStateFreeCur(SStreamStateCur* pCur) { + tdbTbcClose(pCur->pCur); + taosMemoryFree(pCur); +} + +void streamFreeVal(void* val) { tdbFree(val); } From 9f72a715bf0a4dc2881e197c0c440a3a35712037 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Wed, 24 Aug 2022 16:00:15 +0800 Subject: [PATCH 21/79] fix: typo coredump --- source/dnode/vnode/src/tsdb/tsdbCommit.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbCommit.c b/source/dnode/vnode/src/tsdb/tsdbCommit.c index 8d6e82d266..04a6de8472 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCommit.c +++ b/source/dnode/vnode/src/tsdb/tsdbCommit.c @@ -969,7 +969,7 @@ static int32_t tsdbCommitTableData(SCommitter *pCommitter, STbData *pTbData) { if (pRow == NULL) { if (pCommitter->dReader.pBlockIdx && tTABLEIDCmprFn(pCommitter->dReader.pBlockIdx, pTbData) == 0) { SBlockIdx blockIdx = {.suid = pTbData->suid, .uid = pTbData->uid}; - code = tsdbWriteBlock(pCommitter->dWriter.pWriter, &pCommitter->dWriter.mBlock, &blockIdx); + code = tsdbWriteBlock(pCommitter->dWriter.pWriter, &pCommitter->dReader.mBlock, &blockIdx); if (code) goto _err; if (taosArrayPush(pCommitter->dWriter.aBlockIdx, &blockIdx) == NULL) { From b250efeee455f8151cdf4a4aa34df2facd2531e9 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Wed, 24 Aug 2022 17:05:38 +0800 Subject: [PATCH 22/79] fix: support window pseduo column for fill operator --- include/common/tcommon.h | 2 +- include/libs/nodes/querynodes.h | 4 +- source/libs/executor/src/executil.c | 198 +++++++++--------- source/libs/executor/src/executorimpl.c | 14 -- source/libs/executor/src/tfill.c | 5 + source/libs/executor/src/timewindowoperator.c | 4 +- source/libs/planner/src/planLogicCreater.c | 12 +- 7 files changed, 121 insertions(+), 118 deletions(-) diff --git a/include/common/tcommon.h b/include/common/tcommon.h index dbe020f7ec..6f239a9a0c 100644 --- a/include/common/tcommon.h +++ b/include/common/tcommon.h @@ -181,7 +181,7 @@ typedef struct SColumn { int16_t slotId; char name[TSDB_COL_NAME_LEN]; - int8_t flag; // column type: normal column, tag, or user-input column (integer/float/string) + int16_t colType; // column type: normal column, tag, or window column int16_t type; int32_t bytes; uint8_t precision; diff --git a/include/libs/nodes/querynodes.h b/include/libs/nodes/querynodes.h index cec6f1a691..3a1eaf289e 100644 --- a/include/libs/nodes/querynodes.h +++ b/include/libs/nodes/querynodes.h @@ -57,7 +57,9 @@ typedef enum EColumnType { COLUMN_TYPE_COLUMN = 1, COLUMN_TYPE_TAG, COLUMN_TYPE_TBNAME, - COLUMN_TYPE_WINDOW_PC, + COLUMN_TYPE_WINDOW_START, + COLUMN_TYPE_WINDOW_END, + COLUMN_TYPE_WINDOW_DURATION, COLUMN_TYPE_GROUP_KEY } EColumnType; diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 4c694026cb..b326d5ab03 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -284,17 +284,17 @@ int32_t isQualifiedTable(STableKeyInfo* info, SNode* pTagCond, void* metaHandle, return TSDB_CODE_SUCCESS; } -typedef struct tagFilterAssist{ - SHashObj *colHash; +typedef struct tagFilterAssist { + SHashObj* colHash; int32_t index; - SArray *cInfoList; -}tagFilterAssist; + SArray* cInfoList; +} tagFilterAssist; static EDealRes getColumn(SNode** pNode, void* pContext) { SColumnNode* pSColumnNode = NULL; if (QUERY_NODE_COLUMN == nodeType((*pNode))) { pSColumnNode = *(SColumnNode**)pNode; - }else if(QUERY_NODE_FUNCTION == nodeType((*pNode))){ + } else if (QUERY_NODE_FUNCTION == nodeType((*pNode))) { SFunctionNode* pFuncNode = *(SFunctionNode**)(pNode); if (pFuncNode->funcType == FUNCTION_TYPE_TBNAME) { pSColumnNode = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN); @@ -307,24 +307,26 @@ static EDealRes getColumn(SNode** pNode, void* pContext) { pSColumnNode->node.resType.bytes = TSDB_TABLE_FNAME_LEN - 1 + VARSTR_HEADER_SIZE; nodesDestroyNode(*pNode); *pNode = (SNode*)pSColumnNode; - }else{ + } else { return DEAL_RES_CONTINUE; } - }else{ + } else { return DEAL_RES_CONTINUE; } - tagFilterAssist *pData = (tagFilterAssist *)pContext; - void *data = taosHashGet(pData->colHash, &pSColumnNode->colId, sizeof(pSColumnNode->colId)); - if(!data){ + tagFilterAssist* pData = (tagFilterAssist*)pContext; + void* data = taosHashGet(pData->colHash, &pSColumnNode->colId, sizeof(pSColumnNode->colId)); + if (!data) { taosHashPut(pData->colHash, &pSColumnNode->colId, sizeof(pSColumnNode->colId), pNode, sizeof((*pNode))); pSColumnNode->slotId = pData->index++; - SColumnInfo cInfo = {.colId = pSColumnNode->colId, .type = pSColumnNode->node.resType.type, .bytes = pSColumnNode->node.resType.bytes}; + SColumnInfo cInfo = {.colId = pSColumnNode->colId, + .type = pSColumnNode->node.resType.type, + .bytes = pSColumnNode->node.resType.bytes}; #if TAG_FILTER_DEBUG qDebug("tagfilter build column info, slotId:%d, colId:%d, type:%d", pSColumnNode->slotId, cInfo.colId, cInfo.type); #endif taosArrayPush(pData->cInfoList, &cInfo); - }else{ + } else { SColumnNode* col = *(SColumnNode**)data; pSColumnNode->slotId = col->slotId; } @@ -339,9 +341,9 @@ static int32_t createResultData(SDataType* pType, int32_t numOfRows, SScalarPara return terrno; } - pColumnData->info.type = pType->type; - pColumnData->info.bytes = pType->bytes; - pColumnData->info.scale = pType->scale; + pColumnData->info.type = pType->type; + pColumnData->info.bytes = pType->bytes; + pColumnData->info.scale = pType->scale; pColumnData->info.precision = pType->precision; int32_t code = colInfoDataEnsureCapacity(pColumnData, numOfRows); @@ -356,27 +358,27 @@ static int32_t createResultData(SDataType* pType, int32_t numOfRows, SScalarPara return TSDB_CODE_SUCCESS; } -static SColumnInfoData* getColInfoResult(void* metaHandle, uint64_t suid, SArray* uidList, SNode* pTagCond){ - int32_t code = TSDB_CODE_SUCCESS; - SArray* pBlockList = NULL; +static SColumnInfoData* getColInfoResult(void* metaHandle, uint64_t suid, SArray* uidList, SNode* pTagCond) { + int32_t code = TSDB_CODE_SUCCESS; + SArray* pBlockList = NULL; SSDataBlock* pResBlock = NULL; - SHashObj * tags = NULL; + SHashObj* tags = NULL; SScalarParam output = {0}; tagFilterAssist ctx = {0}; ctx.colHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_SMALLINT), false, HASH_NO_LOCK); - if(ctx.colHash == NULL){ + if (ctx.colHash == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; goto end; } ctx.index = 0; ctx.cInfoList = taosArrayInit(4, sizeof(SColumnInfo)); - if(ctx.cInfoList == NULL){ + if (ctx.cInfoList == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; goto end; } - nodesRewriteExprPostOrder(&pTagCond, getColumn, (void *)&ctx); + nodesRewriteExprPostOrder(&pTagCond, getColumn, (void*)&ctx); pResBlock = createDataBlock(); if (pResBlock == NULL) { @@ -390,7 +392,7 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, uint64_t suid, SArray blockDataAppendColInfo(pResBlock, &colInfo); } -// int64_t stt = taosGetTimestampUs(); + // int64_t stt = taosGetTimestampUs(); tags = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); code = metaGetTableTags(metaHandle, suid, uidList, tags); if (code != TSDB_CODE_SUCCESS) { @@ -400,11 +402,11 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, uint64_t suid, SArray } int32_t rows = taosArrayGetSize(uidList); - if(rows == 0){ + if (rows == 0) { goto end; } -// int64_t stt1 = taosGetTimestampUs(); -// qDebug("generate tag meta rows:%d, cost:%ld us", rows, stt1-stt); + // int64_t stt1 = taosGetTimestampUs(); + // qDebug("generate tag meta rows:%d, cost:%ld us", rows, stt1-stt); code = blockDataEnsureCapacity(pResBlock, rows); if (code != TSDB_CODE_SUCCESS) { @@ -412,46 +414,46 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, uint64_t suid, SArray goto end; } -// int64_t st = taosGetTimestampUs(); + // int64_t st = taosGetTimestampUs(); for (int32_t i = 0; i < rows; i++) { int64_t* uid = taosArrayGet(uidList, i); - for(int32_t j = 0; j < taosArrayGetSize(pResBlock->pDataBlock); j++){ + for (int32_t j = 0; j < taosArrayGetSize(pResBlock->pDataBlock); j++) { SColumnInfoData* pColInfo = (SColumnInfoData*)taosArrayGet(pResBlock->pDataBlock, j); - if(pColInfo->info.colId == -1){ // tbname + if (pColInfo->info.colId == -1) { // tbname char str[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; metaGetTableNameByUid(metaHandle, *uid, str); colDataAppend(pColInfo, i, str, false); #if TAG_FILTER_DEBUG - qDebug("tagfilter uid:%ld, tbname:%s", *uid, str+2); + qDebug("tagfilter uid:%ld, tbname:%s", *uid, str + 2); #endif - }else{ + } else { void* tag = taosHashGet(tags, uid, sizeof(int64_t)); ASSERT(tag); STagVal tagVal = {0}; tagVal.cid = pColInfo->info.colId; const char* p = metaGetTableTagVal(tag, pColInfo->info.type, &tagVal); - if (p == NULL || (pColInfo->info.type == TSDB_DATA_TYPE_JSON && ((STag*)p)->nTag == 0)){ + if (p == NULL || (pColInfo->info.type == TSDB_DATA_TYPE_JSON && ((STag*)p)->nTag == 0)) { colDataAppend(pColInfo, i, p, true); } else if (pColInfo->info.type == TSDB_DATA_TYPE_JSON) { colDataAppend(pColInfo, i, p, false); } else if (IS_VAR_DATA_TYPE(pColInfo->info.type)) { - char *tmp = taosMemoryCalloc(tagVal.nData + VARSTR_HEADER_SIZE + 1, 1); + char* tmp = taosMemoryCalloc(tagVal.nData + VARSTR_HEADER_SIZE + 1, 1); varDataSetLen(tmp, tagVal.nData); memcpy(tmp + VARSTR_HEADER_SIZE, tagVal.pData, tagVal.nData); colDataAppend(pColInfo, i, tmp, false); #if TAG_FILTER_DEBUG - qDebug("tagfilter varch:%s", tmp+2); + qDebug("tagfilter varch:%s", tmp + 2); #endif taosMemoryFree(tmp); } else { colDataAppend(pColInfo, i, (const char*)&tagVal.i64, false); #if TAG_FILTER_DEBUG - if(pColInfo->info.type == TSDB_DATA_TYPE_INT){ + if (pColInfo->info.type == TSDB_DATA_TYPE_INT) { qDebug("tagfilter int:%d", *(int*)(&tagVal.i64)); - }else if(pColInfo->info.type == TSDB_DATA_TYPE_DOUBLE){ - qDebug("tagfilter double:%f", *(double *)(&tagVal.i64)); + } else if (pColInfo->info.type == TSDB_DATA_TYPE_DOUBLE) { + qDebug("tagfilter double:%f", *(double*)(&tagVal.i64)); } #endif } @@ -460,8 +462,8 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, uint64_t suid, SArray } pResBlock->info.rows = rows; -// int64_t st1 = taosGetTimestampUs(); -// qDebug("generate tag block rows:%d, cost:%ld us", rows, st1-st); + // int64_t st1 = taosGetTimestampUs(); + // qDebug("generate tag block rows:%d, cost:%ld us", rows, st1-st); pBlockList = taosArrayInit(2, POINTER_BYTES); taosArrayPush(pBlockList, &pResBlock); @@ -474,12 +476,12 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, uint64_t suid, SArray } code = scalarCalculate(pTagCond, pBlockList, &output); - if(code != TSDB_CODE_SUCCESS){ + if (code != TSDB_CODE_SUCCESS) { qError("failed to calculate scalar, reason:%s", tstrerror(code)); terrno = code; } -// int64_t st2 = taosGetTimestampUs(); -// qDebug("calculate tag block rows:%d, cost:%ld us", rows, st2-st1); + // int64_t st2 = taosGetTimestampUs(); + // qDebug("calculate tag block rows:%d, cost:%ld us", rows, st2-st1); end: taosHashCleanup(tags); @@ -491,43 +493,43 @@ end: } static void releaseColInfoData(void* pCol) { - if(pCol){ - SColumnInfoData* col = (SColumnInfoData*) pCol; + if (pCol) { + SColumnInfoData* col = (SColumnInfoData*)pCol; colDataDestroy(col); taosMemoryFree(col); } } -int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableListInfo* pTableListInfo){ - int32_t code = TSDB_CODE_SUCCESS; - SArray *pBlockList = NULL; - SSDataBlock *pResBlock = NULL; - SHashObj *tags = NULL; - SArray *uidList = NULL; - void *keyBuf = NULL; - SArray *groupData = NULL; +int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableListInfo* pTableListInfo) { + int32_t code = TSDB_CODE_SUCCESS; + SArray* pBlockList = NULL; + SSDataBlock* pResBlock = NULL; + SHashObj* tags = NULL; + SArray* uidList = NULL; + void* keyBuf = NULL; + SArray* groupData = NULL; int32_t rows = taosArrayGetSize(pTableListInfo->pTableList); - if(rows == 0){ + if (rows == 0) { return TDB_CODE_SUCCESS; } tagFilterAssist ctx = {0}; ctx.colHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_SMALLINT), false, HASH_NO_LOCK); - if(ctx.colHash == NULL){ + if (ctx.colHash == NULL) { code = TSDB_CODE_OUT_OF_MEMORY; goto end; } ctx.index = 0; ctx.cInfoList = taosArrayInit(4, sizeof(SColumnInfo)); - if(ctx.cInfoList == NULL){ + if (ctx.cInfoList == NULL) { code = TSDB_CODE_OUT_OF_MEMORY; goto end; } - SNode* pNode = NULL; + SNode* pNode = NULL; FOREACH(pNode, group) { - nodesRewriteExprPostOrder(&pNode, getColumn, (void *)&ctx); + nodesRewriteExprPostOrder(&pNode, getColumn, (void*)&ctx); REPLACE_NODE(pNode); } @@ -549,61 +551,61 @@ int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableLis taosArrayPush(uidList, &pkeyInfo->uid); } -// int64_t stt = taosGetTimestampUs(); + // int64_t stt = taosGetTimestampUs(); tags = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); code = metaGetTableTags(metaHandle, pTableListInfo->suid, uidList, tags); if (code != TSDB_CODE_SUCCESS) { goto end; } -// int64_t stt1 = taosGetTimestampUs(); -// qDebug("generate tag meta rows:%d, cost:%ld us", rows, stt1-stt); + // int64_t stt1 = taosGetTimestampUs(); + // qDebug("generate tag meta rows:%d, cost:%ld us", rows, stt1-stt); code = blockDataEnsureCapacity(pResBlock, rows); if (code != TSDB_CODE_SUCCESS) { goto end; } -// int64_t st = taosGetTimestampUs(); + // int64_t st = taosGetTimestampUs(); for (int32_t i = 0; i < rows; i++) { int64_t* uid = taosArrayGet(uidList, i); - for(int32_t j = 0; j < taosArrayGetSize(pResBlock->pDataBlock); j++){ + for (int32_t j = 0; j < taosArrayGetSize(pResBlock->pDataBlock); j++) { SColumnInfoData* pColInfo = (SColumnInfoData*)taosArrayGet(pResBlock->pDataBlock, j); - if(pColInfo->info.colId == -1){ // tbname + if (pColInfo->info.colId == -1) { // tbname char str[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; metaGetTableNameByUid(metaHandle, *uid, str); colDataAppend(pColInfo, i, str, false); #if TAG_FILTER_DEBUG - qDebug("tagfilter uid:%ld, tbname:%s", *uid, str+2); + qDebug("tagfilter uid:%ld, tbname:%s", *uid, str + 2); #endif - }else{ + } else { void* tag = taosHashGet(tags, uid, sizeof(int64_t)); ASSERT(tag); STagVal tagVal = {0}; tagVal.cid = pColInfo->info.colId; const char* p = metaGetTableTagVal(tag, pColInfo->info.type, &tagVal); - if (p == NULL || (pColInfo->info.type == TSDB_DATA_TYPE_JSON && ((STag*)p)->nTag == 0)){ + if (p == NULL || (pColInfo->info.type == TSDB_DATA_TYPE_JSON && ((STag*)p)->nTag == 0)) { colDataAppend(pColInfo, i, p, true); } else if (pColInfo->info.type == TSDB_DATA_TYPE_JSON) { colDataAppend(pColInfo, i, p, false); } else if (IS_VAR_DATA_TYPE(pColInfo->info.type)) { - char *tmp = taosMemoryCalloc(tagVal.nData + VARSTR_HEADER_SIZE + 1, 1); + char* tmp = taosMemoryCalloc(tagVal.nData + VARSTR_HEADER_SIZE + 1, 1); varDataSetLen(tmp, tagVal.nData); memcpy(tmp + VARSTR_HEADER_SIZE, tagVal.pData, tagVal.nData); colDataAppend(pColInfo, i, tmp, false); #if TAG_FILTER_DEBUG - qDebug("tagfilter varch:%s", tmp+2); + qDebug("tagfilter varch:%s", tmp + 2); #endif taosMemoryFree(tmp); } else { colDataAppend(pColInfo, i, (const char*)&tagVal.i64, false); #if TAG_FILTER_DEBUG - if(pColInfo->info.type == TSDB_DATA_TYPE_INT){ + if (pColInfo->info.type == TSDB_DATA_TYPE_INT) { qDebug("tagfilter int:%d", *(int*)(&tagVal.i64)); - }else if(pColInfo->info.type == TSDB_DATA_TYPE_DOUBLE){ - qDebug("tagfilter double:%f", *(double *)(&tagVal.i64)); + } else if (pColInfo->info.type == TSDB_DATA_TYPE_DOUBLE) { + qDebug("tagfilter double:%f", *(double*)(&tagVal.i64)); } #endif } @@ -612,8 +614,8 @@ int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableLis } pResBlock->info.rows = rows; -// int64_t st1 = taosGetTimestampUs(); -// qDebug("generate tag block rows:%d, cost:%ld us", rows, st1-st); + // int64_t st1 = taosGetTimestampUs(); + // qDebug("generate tag block rows:%d, cost:%ld us", rows, st1-st); pBlockList = taosArrayInit(2, POINTER_BYTES); taosArrayPush(pBlockList, &pResBlock); @@ -627,7 +629,7 @@ int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableLis break; case QUERY_NODE_COLUMN: case QUERY_NODE_OPERATOR: - case QUERY_NODE_FUNCTION:{ + case QUERY_NODE_FUNCTION: { SExprNode* expNode = (SExprNode*)pNode; code = createResultData(&expNode->resType, rows, &output); if (code != TSDB_CODE_SUCCESS) { @@ -639,16 +641,16 @@ int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableLis code = TSDB_CODE_OPS_NOT_SUPPORT; goto end; } - if(nodeType(pNode) == QUERY_NODE_COLUMN){ - SColumnNode* pSColumnNode = (SColumnNode*)pNode; + if (nodeType(pNode) == QUERY_NODE_COLUMN) { + SColumnNode* pSColumnNode = (SColumnNode*)pNode; SColumnInfoData* pColInfo = (SColumnInfoData*)taosArrayGet(pResBlock->pDataBlock, pSColumnNode->slotId); code = colDataAssign(output.columnData, pColInfo, rows, NULL); - }else if(nodeType(pNode) == QUERY_NODE_VALUE){ + } else if (nodeType(pNode) == QUERY_NODE_VALUE) { continue; - }else{ + } else { code = scalarCalculate(pNode, pBlockList, &output); } - if(code != TSDB_CODE_SUCCESS){ + if (code != TSDB_CODE_SUCCESS) { releaseColInfoData(output.columnData); goto end; } @@ -656,7 +658,7 @@ int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableLis } int32_t keyLen = 0; - SNode* node; + SNode* node; FOREACH(node, group) { SExprNode* pExpr = (SExprNode*)node; keyLen += pExpr->resType.bytes; @@ -670,12 +672,12 @@ int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableLis code = TSDB_CODE_OUT_OF_MEMORY; goto end; } - for(int i = 0; i < rows; i++){ + for (int i = 0; i < rows; i++) { STableKeyInfo* info = taosArrayGet(pTableListInfo->pTableList, i); char* isNull = (char*)keyBuf; char* pStart = (char*)keyBuf + sizeof(int8_t) * LIST_LENGTH(group); - for(int j = 0; j < taosArrayGetSize(groupData); j++){ + for (int j = 0; j < taosArrayGetSize(groupData); j++) { SColumnInfoData* pValue = (SColumnInfoData*)taosArrayGetP(groupData, j); if (colDataIsNull_s(pValue, i)) { @@ -688,7 +690,7 @@ int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableLis code = TSDB_CODE_QRY_JSON_IN_GROUP_ERROR; goto end; } - if(tTagIsJsonNull(data)){ + if (tTagIsJsonNull(data)) { isNull[j] = 1; continue; } @@ -710,10 +712,10 @@ int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableLis taosHashPut(pTableListInfo->map, &(info->uid), sizeof(uint64_t), &info->groupId, sizeof(uint64_t)); } -// int64_t st2 = taosGetTimestampUs(); -// qDebug("calculate tag block rows:%d, cost:%ld us", rows, st2-st1); + // int64_t st2 = taosGetTimestampUs(); + // qDebug("calculate tag block rows:%d, cost:%ld us", rows, st2-st1); - end: +end: taosMemoryFreeClear(keyBuf); taosHashCleanup(tags); taosHashCleanup(ctx.colHash); @@ -743,7 +745,7 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, SIndexMetaArg metaArg = { .metaEx = metaHandle, .idx = tsdbGetIdx(metaHandle), .ivtIdx = tsdbGetIvtIdx(metaHandle), .suid = tableUid}; -// int64_t stt = taosGetTimestampUs(); + // int64_t stt = taosGetTimestampUs(); SIdxFltStatus status = SFLT_NOT_INDEX; code = doFilterTag(pTagIndexCond, &metaArg, res, &status); if (code != 0 || status == SFLT_NOT_INDEX) { @@ -751,20 +753,20 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, code = TDB_CODE_SUCCESS; } -// int64_t stt1 = taosGetTimestampUs(); -// qDebug("generate table list, cost:%ld us", stt1-stt); - }else if(!pTagCond){ + // int64_t stt1 = taosGetTimestampUs(); + // qDebug("generate table list, cost:%ld us", stt1-stt); + } else if (!pTagCond) { vnodeGetCtbIdList(pVnode, pScanNode->suid, res); } } else { // Create one table group. - if(metaIsTableExist(metaHandle, tableUid)){ + if (metaIsTableExist(metaHandle, tableUid)) { taosArrayPush(res, &tableUid); } } if (pTagCond) { SColumnInfoData* pColInfoData = getColInfoResult(metaHandle, pListInfo->suid, res, pTagCond); - if(terrno != TDB_CODE_SUCCESS){ + if (terrno != TDB_CODE_SUCCESS) { colDataDestroy(pColInfoData); taosMemoryFreeClear(pColInfoData); taosArrayDestroy(res); @@ -826,7 +828,7 @@ size_t getTableTagsBufLen(const SNodeList* pGroups) { int32_t getGroupIdFromTagsVal(void* pMeta, uint64_t uid, SNodeList* pGroupNode, char* keyBuf, uint64_t* pGroupId) { SMetaReader mr = {0}; metaReaderInit(&mr, pMeta, 0); - if(metaGetTableEntryByUid(&mr, uid) != 0){ // table not exist + if (metaGetTableEntryByUid(&mr, uid) != 0) { // table not exist metaReaderClear(&mr); return TSDB_CODE_PAR_TABLE_NOT_EXIST; } @@ -984,7 +986,7 @@ static SResSchema createResSchema(int32_t type, int32_t bytes, int32_t slotId, i return s; } -static SColumn* createColumn(int32_t blockId, int32_t slotId, int32_t colId, SDataType* pType) { +static SColumn* createColumn(int32_t blockId, int32_t slotId, int32_t colId, SDataType* pType, EColumnType colType) { SColumn* pCol = taosMemoryCalloc(1, sizeof(SColumn)); if (pCol == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -998,7 +1000,7 @@ static SColumn* createColumn(int32_t blockId, int32_t slotId, int32_t colId, SDa pCol->scale = pType->scale; pCol->precision = pType->precision; pCol->dataBlockId = blockId; - + pCol->colType = colType; return pCol; } @@ -1042,7 +1044,8 @@ SExprInfo* createExprInfo(SNodeList* pNodeList, SNodeList* pGroupKeys, int32_t* SDataType* pType = &pColNode->node.resType; pExp->base.resSchema = createResSchema(pType->type, pType->bytes, pTargetNode->slotId, pType->scale, pType->precision, pColNode->colName); - pExp->base.pParam[0].pCol = createColumn(pColNode->dataBlockId, pColNode->slotId, pColNode->colId, pType); + pExp->base.pParam[0].pCol = + createColumn(pColNode->dataBlockId, pColNode->slotId, pColNode->colId, pType, pColNode->colType); pExp->base.pParam[0].type = FUNC_PARAM_TYPE_COLUMN; } else if (type == QUERY_NODE_VALUE) { pExp->pExpr->nodeType = QUERY_NODE_VALUE; @@ -1094,7 +1097,8 @@ SExprInfo* createExprInfo(SNodeList* pNodeList, SNodeList* pGroupKeys, int32_t* SColumnNode* pcn = (SColumnNode*)p1; pExp->base.pParam[j].type = FUNC_PARAM_TYPE_COLUMN; - pExp->base.pParam[j].pCol = createColumn(pcn->dataBlockId, pcn->slotId, pcn->colId, &pcn->node.resType); + pExp->base.pParam[j].pCol = + createColumn(pcn->dataBlockId, pcn->slotId, pcn->colId, &pcn->node.resType, pcn->colType); } else if (p1->type == QUERY_NODE_VALUE) { SValueNode* pvn = (SValueNode*)p1; pExp->base.pParam[j].type = FUNC_PARAM_TYPE_VALUE; diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 1836ca6d9b..6b52031c31 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -148,20 +148,6 @@ static int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, static void initCtxOutputBuffer(SqlFunctionCtx* pCtx, int32_t size); static void doSetTableGroupOutputBuf(SOperatorInfo* pOperator, int32_t numOfOutput, uint64_t groupId); -// setup the output buffer for each operator -static bool hasNull(SColumn* pColumn, SColumnDataAgg* pStatis) { - if (TSDB_COL_IS_TAG(pColumn->flag) || TSDB_COL_IS_UD_COL(pColumn->flag) || - pColumn->colId == PRIMARYKEY_TIMESTAMP_COL_ID) { - return false; - } - - if (pStatis != NULL && pStatis->numOfNull == 0) { - return false; - } - - return true; -} - #if 0 static bool chkResultRowFromKey(STaskRuntimeEnv* pRuntimeEnv, SResultRowInfo* pResultRowInfo, char* pData, int16_t bytes, bool masterscan, uint64_t uid) { diff --git a/source/libs/executor/src/tfill.c b/source/libs/executor/src/tfill.c index 59dd58070d..6d25a49847 100644 --- a/source/libs/executor/src/tfill.c +++ b/source/libs/executor/src/tfill.c @@ -76,6 +76,11 @@ static void doSetUserSpecifiedValue(SColumnInfoData* pDst, SVariant* pVar, int32 } } +static bool fillWindowPseudoColumn(SFillInfo* pFillInfo, SFillColInfo* pCol, SColumnInfoData* pDstColInfoData, int32_t rowIndex) { + //fill windows pseduo column, _wstart, _wend, _wduration and return true + return false; +} + static void doFillOneRow(SFillInfo* pFillInfo, SSDataBlock* pBlock, SSDataBlock* pSrcBlock, int64_t ts, bool outOfBound) { SPoint point1, point2, point; diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 551180f639..6fc09744e7 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -1918,8 +1918,8 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSessionAggOperator if (gid != pRowSup->groupId || pInfo->winSup.prevTs == INT64_MIN) { doKeepNewWindowStartInfo(pRowSup, tsList, j, gid); doKeepTuple(pRowSup, tsList[j], gid); - } else if ((tsList[j] - pRowSup->prevTs >= 0) && tsList[j] - pRowSup->prevTs <= gap || - (pRowSup->prevTs - tsList[j] >= 0) && (pRowSup->prevTs - tsList[j] <= gap)) { + } else if (((tsList[j] - pRowSup->prevTs >= 0) && (tsList[j] - pRowSup->prevTs <= gap)) || + ((pRowSup->prevTs - tsList[j] >= 0) && (pRowSup->prevTs - tsList[j] <= gap))) { // The gap is less than the threshold, so it belongs to current session window that has been opened already. doKeepTuple(pRowSup, tsList[j], gid); if (j == 0 && pRowSup->startRowIndex != 0) { diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c index 71f084d412..0667c5f5b9 100644 --- a/source/libs/planner/src/planLogicCreater.c +++ b/source/libs/planner/src/planLogicCreater.c @@ -44,12 +44,15 @@ static void setColumnInfo(SFunctionNode* pFunc, SColumnNode* pCol) { pCol->colType = COLUMN_TYPE_TBNAME; break; case FUNCTION_TYPE_WSTART: + pCol->colId = PRIMARYKEY_TIMESTAMP_COL_ID; + pCol->colType = COLUMN_TYPE_WINDOW_START; + break; case FUNCTION_TYPE_WEND: pCol->colId = PRIMARYKEY_TIMESTAMP_COL_ID; - pCol->colType = COLUMN_TYPE_WINDOW_PC; + pCol->colType = COLUMN_TYPE_WINDOW_END; break; case FUNCTION_TYPE_WDURATION: - pCol->colType = COLUMN_TYPE_WINDOW_PC; + pCol->colType = COLUMN_TYPE_WINDOW_DURATION; break; case FUNCTION_TYPE_GROUP_KEY: pCol->colType = COLUMN_TYPE_GROUP_KEY; @@ -784,7 +787,10 @@ static int32_t createWindowLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSele static EDealRes needFillValueImpl(SNode* pNode, void* pContext) { if (QUERY_NODE_COLUMN == nodeType(pNode)) { SColumnNode* pCol = (SColumnNode*)pNode; - if (COLUMN_TYPE_WINDOW_PC != pCol->colType && COLUMN_TYPE_GROUP_KEY != pCol->colType) { + if (COLUMN_TYPE_WINDOW_START != pCol->colType && + COLUMN_TYPE_WINDOW_END != pCol->colType && + COLUMN_TYPE_WINDOW_DURATION != pCol->colType && + COLUMN_TYPE_GROUP_KEY != pCol->colType) { *(bool*)pContext = true; return DEAL_RES_END; } From cf1c6346d5351277b049fb1ce71c0357f0be4bb7 Mon Sep 17 00:00:00 2001 From: afwerar <1296468573@qq.com> Date: Wed, 24 Aug 2022 17:09:12 +0800 Subject: [PATCH 23/79] build: cmake warning error --- cmake/cmake.define | 2 ++ packaging/release.bat | 18 ++++-------------- source/client/src/clientEnv.c | 2 ++ source/os/src/osFile.c | 5 +++-- source/os/src/osSysinfo.c | 7 +++---- 5 files changed, 14 insertions(+), 20 deletions(-) diff --git a/cmake/cmake.define b/cmake/cmake.define index 5d64815a9a..376a55d396 100644 --- a/cmake/cmake.define +++ b/cmake/cmake.define @@ -2,6 +2,8 @@ cmake_minimum_required(VERSION 3.0) set(CMAKE_VERBOSE_MAKEFILE OFF) +SET(BUILD_SHARED_LIBS "OFF") + #set output directory SET(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/build/lib) SET(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR}/build/bin) diff --git a/packaging/release.bat b/packaging/release.bat index ffd3a68048..591227382f 100644 --- a/packaging/release.bat +++ b/packaging/release.bat @@ -40,10 +40,12 @@ if not exist %work_dir%\debug\ver-%2-x86 ( ) cd %work_dir%\debug\ver-%2-x64 call vcvarsall.bat x64 -cmake ../../ -G "NMake Makefiles JOM" -DCMAKE_MAKE_PROGRAM=jom -DBUILD_TOOLS=true -DBUILD_HTTP=false -DVERNUMBER=%2 -DCPUTYPE=x64 +cmake ../../ -G "NMake Makefiles JOM" -DCMAKE_MAKE_PROGRAM=jom -DBUILD_TOOLS=true -DBUILD_HTTP=false -DBUILD_TEST=false -DVERNUMBER=%2 -DCPUTYPE=x64 cmake --build . rd /s /Q C:\TDengine cmake --install . +for /r c:\TDengine %%i in (*.dll) do signtool sign /f D:\\123.pfx /p taosdata %%i +for /r c:\TDengine %%i in (*.exe) do signtool sign /f D:\\123.pfx /p taosdata %%i if not %errorlevel% == 0 ( call :RUNFAILED build x64 failed & exit /b 1) cd %package_dir% iscc /DMyAppInstallName="%packagServerName_x64%" /DMyAppVersion="%2" /DMyAppExcludeSource="" tools\tdengine.iss /O..\release @@ -51,19 +53,7 @@ if not %errorlevel% == 0 ( call :RUNFAILED package %packagServerName_x64% faile iscc /DMyAppInstallName="%packagClientName_x64%" /DMyAppVersion="%2" /DMyAppExcludeSource="taosd.exe" tools\tdengine.iss /O..\release if not %errorlevel% == 0 ( call :RUNFAILED package %packagClientName_x64% failed & exit /b 1) -cd %work_dir%\debug\ver-%2-x86 -call vcvarsall.bat x86 -cmake ../../ -G "NMake Makefiles JOM" -DCMAKE_MAKE_PROGRAM=jom -DBUILD_TOOLS=true -DBUILD_HTTP=false -DVERNUMBER=%2 -DCPUTYPE=x86 -cmake --build . -rd /s /Q C:\TDengine -cmake --install . -if not %errorlevel% == 0 ( call :RUNFAILED build x86 failed & exit /b 1) -cd %package_dir% -@REM iscc /DMyAppInstallName="%packagServerName_x86%" /DMyAppVersion="%2" /DMyAppExcludeSource="" tools\tdengine.iss /O..\release -@REM if not %errorlevel% == 0 ( call :RUNFAILED package %packagServerName_x86% failed & exit /b 1) -iscc /DMyAppInstallName="%packagClientName_x86%" /DMyAppVersion="%2" /DMyAppExcludeSource="taosd.exe" tools\tdengine.iss /O..\release -if not %errorlevel% == 0 ( call :RUNFAILED package %packagClientName_x86% failed & exit /b 1) - +for /r ..\release %%i in (*.exe) do signtool sign /f d:\\123.pfx /p taosdata %%i goto EXIT0 :USAGE diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c index ff1b9322c9..99ecab9642 100644 --- a/source/client/src/clientEnv.c +++ b/source/client/src/clientEnv.c @@ -393,7 +393,9 @@ void taos_init_imp(void) { schedulerInit(); tscDebug("starting to initialize TAOS driver"); +#ifndef WINDOWS taosSetCoreDump(true); +#endif initTaskQueue(); fmFuncMgtInit(); diff --git a/source/os/src/osFile.c b/source/os/src/osFile.c index 2d9cfe3246..f9797f6319 100644 --- a/source/os/src/osFile.c +++ b/source/os/src/osFile.c @@ -203,10 +203,11 @@ int32_t taosRenameFile(const char *oldName, const char *newName) { } int32_t taosStatFile(const char *path, int64_t *size, int32_t *mtime) { - struct stat fileStat; #ifdef WINDOWS - int32_t code = _stat(path, &fileStat); + struct _stati64 fileStat; + int32_t code = _stati64(path, &fileStat); #else + struct stat fileStat; int32_t code = stat(path, &fileStat); #endif if (code < 0) { diff --git a/source/os/src/osSysinfo.c b/source/os/src/osSysinfo.c index 3a75e18a7f..3aa3f4f29e 100644 --- a/source/os/src/osSysinfo.c +++ b/source/os/src/osSysinfo.c @@ -851,13 +851,12 @@ char *taosGetCmdlineByPID(int pid) { } void taosSetCoreDump(bool enable) { + if (!enable) return; #ifdef WINDOWS - // SetUnhandledExceptionFilter(exceptionHandler); - // SetUnhandledExceptionFilter(&FlCrashDump); + SetUnhandledExceptionFilter(exceptionHandler); + SetUnhandledExceptionFilter(&FlCrashDump); #elif defined(_TD_DARWIN_64) #else - if (!enable) return; - // 1. set ulimit -c unlimited struct rlimit rlim; struct rlimit rlim_new; From a23f5f59eafbffe78147c766583cad2aa5f78564 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 24 Aug 2022 17:09:33 +0800 Subject: [PATCH 24/79] refactor(query): do some internal refactor. --- source/dnode/vnode/src/tsdb/tsdbRead.c | 265 +++++------------- source/libs/executor/inc/executil.h | 6 + source/libs/executor/src/executorimpl.c | 62 +--- source/libs/executor/src/groupoperator.c | 16 +- source/libs/executor/src/projectoperator.c | 10 +- source/libs/executor/src/scanoperator.c | 32 +-- source/libs/executor/src/sortoperator.c | 14 +- source/libs/executor/src/timewindowoperator.c | 46 +-- 8 files changed, 150 insertions(+), 301 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index 0b027367da..cd40a9acc2 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -131,6 +131,7 @@ typedef struct SFileBlockDumpInfo { typedef struct SReaderStatus { bool loadFromFile; // check file stage + bool composedDataBlock; // the returned data block is a composed block or not SHashObj* pTableMap; // SHash STableBlockScanInfo* pTableIter; // table iterator used in building in-memory buffer data blocks. SFileBlockDumpInfo fBlockDumpInfo; @@ -138,7 +139,6 @@ typedef struct SReaderStatus { SBlockData fileBlockData; SFilesetIter fileIter; SDataBlockIter blockIter; - bool composedDataBlock; // the returned data block is a composed block or not } SReaderStatus; struct STsdbReader { @@ -166,7 +166,7 @@ struct STsdbReader { static SFileDataBlockInfo* getCurrentBlockInfo(SDataBlockIter* pBlockIter); static int buildDataBlockFromBufImpl(STableBlockScanInfo* pBlockScanInfo, int64_t endKey, int32_t capacity, STsdbReader* pReader); -static TSDBROW* getValidRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* pReader); +static TSDBROW* getValidMemRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* pReader); static int32_t doMergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pScanInfo, STsdbReader* pReader, SRowMerger* pMerger); static int32_t doMergeRowsInLastBlock(SLastBlockReader* pLastBlockReader, STableBlockScanInfo* pScanInfo, int64_t ts, SRowMerger* pMerger); @@ -513,86 +513,6 @@ _end: return code; } -// void tsdbResetQueryHandleForNewTable(STsdbReader* queryHandle, SQueryTableDataCond* pCond, STableListInfo* tableList, -// int32_t tWinIdx) { -// STsdbReader* pTsdbReadHandle = queryHandle; - -// pTsdbReadHandle->order = pCond->order; -// pTsdbReadHandle->window = pCond->twindows[tWinIdx]; -// pTsdbReadHandle->type = TSDB_QUERY_TYPE_ALL; -// pTsdbReadHandle->cur.fid = -1; -// pTsdbReadHandle->cur.win = TSWINDOW_INITIALIZER; -// pTsdbReadHandle->checkFiles = true; -// pTsdbReadHandle->activeIndex = 0; // current active table index -// pTsdbReadHandle->locateStart = false; -// pTsdbReadHandle->loadExternalRow = pCond->loadExternalRows; - -// if (ASCENDING_TRAVERSE(pCond->order)) { -// assert(pTsdbReadHandle->window.skey <= pTsdbReadHandle->window.ekey); -// } else { -// assert(pTsdbReadHandle->window.skey >= pTsdbReadHandle->window.ekey); -// } - -// // allocate buffer in order to load data blocks from file -// memset(pTsdbReadHandle->suppInfo.pstatis, 0, sizeof(SColumnDataAgg)); -// memset(pTsdbReadHandle->suppInfo.plist, 0, POINTER_BYTES); - -// tsdbInitDataBlockLoadInfo(&pTsdbReadHandle->dataBlockLoadInfo); -// tsdbInitCompBlockLoadInfo(&pTsdbReadHandle->compBlockLoadInfo); - -// SArray* pTable = NULL; -// // STsdbMeta* pMeta = tsdbGetMeta(pTsdbReadHandle->pTsdb); - -// // pTsdbReadHandle->pTableCheckInfo = destroyTableCheckInfo(pTsdbReadHandle->pTableCheckInfo); - -// pTsdbReadHandle->pTableCheckInfo = NULL; // createDataBlockScanInfo(pTsdbReadHandle, groupList, pMeta, -// // &pTable); -// if (pTsdbReadHandle->pTableCheckInfo == NULL) { -// // tsdbReaderClose(pTsdbReadHandle); -// terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; -// } - -// // pTsdbReadHandle->prev = doFreeColumnInfoData(pTsdbReadHandle->prev); -// // pTsdbReadHandle->next = doFreeColumnInfoData(pTsdbReadHandle->next); -// } - -// SArray* tsdbGetQueriedTableList(STsdbReader** pHandle) { -// assert(pHandle != NULL); - -// STsdbReader* pTsdbReadHandle = (STsdbReader*)pHandle; - -// size_t size = taosArrayGetSize(pTsdbReadHandle->pTableCheckInfo); -// SArray* res = taosArrayInit(size, POINTER_BYTES); -// return res; -// } - -// static int32_t binarySearchForBlock(SBlock* pBlock, int32_t numOfBlocks, TSKEY skey, int32_t order) { -// int32_t firstSlot = 0; -// int32_t lastSlot = numOfBlocks - 1; - -// int32_t midSlot = firstSlot; - -// while (1) { -// numOfBlocks = lastSlot - firstSlot + 1; -// midSlot = (firstSlot + (numOfBlocks >> 1)); - -// if (numOfBlocks == 1) break; - -// if (skey > pBlock[midSlot].maxKey.ts) { -// if (numOfBlocks == 2) break; -// if ((order == TSDB_ORDER_DESC) && (skey < pBlock[midSlot + 1].minKey.ts)) break; -// firstSlot = midSlot + 1; -// } else if (skey < pBlock[midSlot].minKey.ts) { -// if ((order == TSDB_ORDER_ASC) && (skey > pBlock[midSlot - 1].maxKey.ts)) break; -// lastSlot = midSlot - 1; -// } else { -// break; // got the slot -// } -// } - -// return midSlot; -// } - static int32_t doLoadBlockIndex(STsdbReader* pReader, SDataFReader* pFileReader, SArray* pIndexList) { SArray* aBlockIdx = taosArrayInit(8, sizeof(SBlockIdx)); @@ -861,71 +781,32 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, STableBlockScanIn static int32_t doLoadFileBlockData(STsdbReader* pReader, SDataBlockIter* pBlockIter, SBlockData* pBlockData) { int64_t st = taosGetTimestampUs(); - double elapsedTime = 0; - int32_t code = 0; SFileDataBlockInfo* pBlockInfo = getCurrentBlockInfo(pBlockIter); SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo; + ASSERT(pBlockInfo != NULL); - if (pBlockInfo != NULL) { - SBlock* pBlock = getCurrentBlock(pBlockIter); - code = tsdbReadDataBlock(pReader->pFileReader, pBlock, pBlockData); - if (code != TSDB_CODE_SUCCESS) { - tsdbError("%p error occurs in loading file block, global index:%d, table index:%d, brange:%" PRId64 "-%" PRId64 - ", rows:%d, code:%s %s", - pReader, pBlockIter->index, pBlockInfo->tbBlockIdx, pBlock->minKey.ts, pBlock->maxKey.ts, pBlock->nRow, - tstrerror(code), pReader->idStr); - goto _error; - } - - elapsedTime = (taosGetTimestampUs() - st) / 1000.0; - - tsdbDebug("%p load file block into buffer, global index:%d, table index:%d, brange:%" PRId64 "-%" PRId64 - ", rows:%d, minVer:%" PRId64 ", maxVer:%" PRId64 ", elapsed time:%.2f ms, %s", + SBlock* pBlock = getCurrentBlock(pBlockIter); + int32_t code = tsdbReadDataBlock(pReader->pFileReader, pBlock, pBlockData); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("%p error occurs in loading file block, global index:%d, table index:%d, brange:%" PRId64 "-%" PRId64 + ", rows:%d, code:%s %s", pReader, pBlockIter->index, pBlockInfo->tbBlockIdx, pBlock->minKey.ts, pBlock->maxKey.ts, pBlock->nRow, - pBlock->minVer, pBlock->maxVer, elapsedTime, pReader->idStr); - } else { -#if 0 - SLastBlockReader* pLastBlockReader = pReader->status.fileIter.pLastBlockReader; - - uint64_t uid = pBlockInfo->uid; - SArray* pBlocks = pLastBlockReader->pBlockL; - - pLastBlockReader->currentBlockIndex = -1; - - // find the correct SBlockL - for(int32_t i = 0; i < taosArrayGetSize(pBlocks); ++i) { - SBlockL* pBlock = taosArrayGet(pBlocks, i); - if (pBlock->minUid >= uid && pBlock->maxUid <= uid) { - pLastBlockReader->currentBlockIndex = i; - break; - } - } - -// SBlockL* pBlockL = taosArrayGet(pLastBlockReader->pBlockL, *index); - code = tsdbReadLastBlock(pReader->pFileReader, pBlockL, pBlockData); - if (code != TSDB_CODE_SUCCESS) { - tsdbDebug("%p error occurs in loading last block into buffer, last block index:%d, total:%d brange:%" PRId64 "-%" PRId64 - ", rows:%d, minVer:%" PRId64 ", maxVer:%" PRId64 ", code:%s %s", - pReader, *index, pBlockIter->numOfBlocks.numOfLastBlocks, 0, 0, pBlockL->nRow, - pBlockL->minVer, pBlockL->maxVer, tstrerror(code), pReader->idStr); - goto _error; - } - - tsdbDebug("%p load last file block into buffer, last block index:%d, total:%d brange:%" PRId64 "-%" PRId64 - ", rows:%d, minVer:%" PRId64 ", maxVer:%" PRId64 ", elapsed time:%.2f ms, %s", - pReader, *index, pBlockIter->numOfBlocks.numOfLastBlocks, 0, 0, pBlockL->nRow, - pBlockL->minVer, pBlockL->maxVer, elapsedTime, pReader->idStr); -#endif + tstrerror(code), pReader->idStr); + return code; } + double elapsedTime = (taosGetTimestampUs() - st) / 1000.0; + + tsdbDebug("%p load file block into buffer, global index:%d, index in table block list:%d, brange:%" PRId64 "-%" PRId64 + ", rows:%d, minVer:%" PRId64 ", maxVer:%" PRId64 ", elapsed time:%.2f ms, %s", + pReader, pBlockIter->index, pBlockInfo->tbBlockIdx, pBlock->minKey.ts, pBlock->maxKey.ts, pBlock->nRow, + pBlock->minVer, pBlock->maxVer, elapsedTime, pReader->idStr); + pReader->cost.blockLoadTime += elapsedTime; pDumpInfo->allDumped = false; return TSDB_CODE_SUCCESS; - -_error: - return code; } static void cleanupBlockOrderSupporter(SBlockOrderSupporter* pSup) { @@ -979,10 +860,10 @@ static int32_t fileDataBlockOrderCompar(const void* pLeft, const void* pRight, v } static int32_t doSetCurrentBlock(SDataBlockIter* pBlockIter) { - SFileDataBlockInfo* pFBlock = getCurrentBlockInfo(pBlockIter); - if (pFBlock != NULL) { - STableBlockScanInfo* pScanInfo = taosHashGet(pBlockIter->pTableMap, &pFBlock->uid, sizeof(pFBlock->uid)); - int32_t* mapDataIndex = taosArrayGet(pScanInfo->pBlockList, pFBlock->tbBlockIdx); + SFileDataBlockInfo* pBlockInfo = getCurrentBlockInfo(pBlockIter); + if (pBlockInfo != NULL) { + STableBlockScanInfo* pScanInfo = taosHashGet(pBlockIter->pTableMap, &pBlockInfo->uid, sizeof(pBlockInfo->uid)); + int32_t* mapDataIndex = taosArrayGet(pScanInfo->pBlockList, pBlockInfo->tbBlockIdx); tMapDataGetItemByIdx(&pScanInfo->mapData, *mapDataIndex, &pBlockIter->block, tGetBlock); } @@ -1396,7 +1277,7 @@ static FORCE_INLINE STSchema* doGetSchemaForTSRow(int32_t sversion, STsdbReader* return pReader->pMemSchema; } -static int32_t doMergeBufAndFileRows_Rv(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo, TSDBROW* pRow, +static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo, TSDBROW* pRow, SIterInfo* pIter, int64_t key, SLastBlockReader* pLastBlockReader) { SRowMerger merge = {0}; STSRow* pTSRow = NULL; @@ -1512,6 +1393,33 @@ static int32_t doMergeBufAndFileRows_Rv(STsdbReader* pReader, STableBlockScanInf return TSDB_CODE_SUCCESS; } +static int32_t doMergeFileBlockAndLastBlock(SLastBlockReader* pLastBlockReader, STsdbReader* pReader, + STableBlockScanInfo* pBlockScanInfo, SBlockData* pBlockData, + bool mergeBlockData) { + SBlockData* pLastBlockData = &pLastBlockReader->lastBlockData; + int64_t tsLastBlock = getCurrentKeyInLastBlock(pLastBlockReader); + + STSRow* pTSRow = NULL; + SRowMerger merge = {0}; + + TSDBROW fRow = tsdbRowFromBlockData(pLastBlockData, *pLastBlockReader->rowIndex); + + tRowMergerInit(&merge, &fRow, pReader->pSchema); + doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLastBlock, &merge); + + // merge with block data if ts == key + if (mergeBlockData) { + doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge); + } + + tRowMergerGetRow(&merge, &pTSRow); + doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid); + + taosMemoryFree(pTSRow); + tRowMergerClear(&merge); + return TSDB_CODE_SUCCESS; +} + static int32_t mergeFileBlockAndLastBlock(STsdbReader* pReader, SLastBlockReader* pLastBlockReader, int64_t key, STableBlockScanInfo* pBlockScanInfo, SBlockData* pBlockData) { SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo; @@ -1549,55 +1457,23 @@ static int32_t mergeFileBlockAndLastBlock(STsdbReader* pReader, SLastBlockReader return TSDB_CODE_SUCCESS; } } else { // desc order - SBlockData* pLastBlockData = &pLastBlockReader->lastBlockData; - TSDBROW fRow1 = tsdbRowFromBlockData(pLastBlockData, *pLastBlockReader->rowIndex); - - STSRow* pTSRow = NULL; - SRowMerger merge = {0}; - tRowMergerInit(&merge, &fRow1, pReader->pSchema); - doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, ts, &merge); - - if (ts == key) { - doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge); - } - - tRowMergerGetRow(&merge, &pTSRow); - doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid); - - taosMemoryFree(pTSRow); - tRowMergerClear(&merge); - return TSDB_CODE_SUCCESS; + return doMergeFileBlockAndLastBlock(pLastBlockReader, pReader, pBlockScanInfo, pBlockData, true); } } else { // only last block exists - SBlockData* pLastBlockData = &pLastBlockReader->lastBlockData; - int64_t tsLastBlock = getCurrentKeyInLastBlock(pLastBlockReader); - - STSRow* pTSRow = NULL; - SRowMerger merge = {0}; - - TSDBROW fRow = tsdbRowFromBlockData(pLastBlockData, *pLastBlockReader->rowIndex); - - tRowMergerInit(&merge, &fRow, pReader->pSchema); - doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLastBlock, &merge); - tRowMergerGetRow(&merge, &pTSRow); - - doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid); - - taosMemoryFree(pTSRow); - tRowMergerClear(&merge); - return TSDB_CODE_SUCCESS; + return doMergeFileBlockAndLastBlock(pLastBlockReader, pReader, pBlockScanInfo, NULL, false); } } -static int32_t doMergeMultiLevelRowsRv(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo, SBlockData* pBlockData, SLastBlockReader* pLastBlockReader) { +static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo, SBlockData* pBlockData, + SLastBlockReader* pLastBlockReader) { SRowMerger merge = {0}; STSRow* pTSRow = NULL; SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo; SArray* pDelList = pBlockScanInfo->delSkyline; - TSDBROW* pRow = getValidRow(&pBlockScanInfo->iter, pDelList, pReader); - TSDBROW* piRow = getValidRow(&pBlockScanInfo->iiter, pDelList, pReader); + TSDBROW* pRow = getValidMemRow(&pBlockScanInfo->iter, pDelList, pReader); + TSDBROW* piRow = getValidMemRow(&pBlockScanInfo->iiter, pDelList, pReader); ASSERT(pRow != NULL && piRow != NULL); SBlockData* pLastBlockData = &pLastBlockReader->lastBlockData; @@ -1611,7 +1487,7 @@ static int32_t doMergeMultiLevelRowsRv(STsdbReader* pReader, STableBlockScanInfo TSDBKEY k = TSDBROW_KEY(pRow); TSDBKEY ik = TSDBROW_KEY(piRow); - int64_t minKey = 0;//INT64_MAX; + int64_t minKey = 0; if (ASCENDING_TRAVERSE(pReader->order)) { minKey = INT64_MAX; // let's find the minimum if (minKey > k.ts) { @@ -1748,8 +1624,8 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo* SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo; SArray* pDelList = pBlockScanInfo->delSkyline; - TSDBROW* pRow = getValidRow(&pBlockScanInfo->iter, pDelList, pReader); - TSDBROW* piRow = getValidRow(&pBlockScanInfo->iiter, pDelList, pReader); + TSDBROW* pRow = getValidMemRow(&pBlockScanInfo->iter, pDelList, pReader); + TSDBROW* piRow = getValidMemRow(&pBlockScanInfo->iiter, pDelList, pReader); ASSERT(pRow != NULL && piRow != NULL); int64_t key = pBlockData->aTSKEY[pDumpInfo->rowIndex]; @@ -2024,20 +1900,20 @@ static int32_t buildComposedDataBlockImpl(STsdbReader* pReader, STableBlockScanI SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo; int64_t key = (pBlockData->nRow > 0)? pBlockData->aTSKEY[pDumpInfo->rowIndex]:INT64_MIN; - TSDBROW* pRow = getValidRow(&pBlockScanInfo->iter, pBlockScanInfo->delSkyline, pReader); - TSDBROW* piRow = getValidRow(&pBlockScanInfo->iiter, pBlockScanInfo->delSkyline, pReader); + TSDBROW* pRow = getValidMemRow(&pBlockScanInfo->iter, pBlockScanInfo->delSkyline, pReader); + TSDBROW* piRow = getValidMemRow(&pBlockScanInfo->iiter, pBlockScanInfo->delSkyline, pReader); if (pBlockScanInfo->iter.hasVal && pBlockScanInfo->iiter.hasVal) { - return doMergeMultiLevelRowsRv(pReader, pBlockScanInfo, pBlockData, pLastBlockReader); + return doMergeMultiLevelRows(pReader, pBlockScanInfo, pBlockData, pLastBlockReader); } else { // imem + file + last block if (pBlockScanInfo->iiter.hasVal) { - return doMergeBufAndFileRows_Rv(pReader, pBlockScanInfo, piRow, &pBlockScanInfo->iiter, key, pLastBlockReader); + return doMergeBufAndFileRows(pReader, pBlockScanInfo, piRow, &pBlockScanInfo->iiter, key, pLastBlockReader); } // mem + file + last block if (pBlockScanInfo->iter.hasVal) { - return doMergeBufAndFileRows_Rv(pReader, pBlockScanInfo, pRow, &pBlockScanInfo->iter, key, pLastBlockReader); + return doMergeBufAndFileRows(pReader, pBlockScanInfo, pRow, &pBlockScanInfo->iter, key, pLastBlockReader); } // files data blocks + last block @@ -2270,12 +2146,12 @@ static TSDBKEY getCurrentKeyInBuf(STableBlockScanInfo* pScanInfo, STsdbReader* p TSDBKEY key = {.ts = TSKEY_INITIAL_VAL}; initMemDataIterator(pScanInfo, pReader); - TSDBROW* pRow = getValidRow(&pScanInfo->iter, pScanInfo->delSkyline, pReader); + TSDBROW* pRow = getValidMemRow(&pScanInfo->iter, pScanInfo->delSkyline, pReader); if (pRow != NULL) { key = TSDBROW_KEY(pRow); } - pRow = getValidRow(&pScanInfo->iiter, pScanInfo->delSkyline, pReader); + pRow = getValidMemRow(&pScanInfo->iiter, pScanInfo->delSkyline, pReader); if (pRow != NULL) { TSDBKEY k = TSDBROW_KEY(pRow); if (key.ts > k.ts) { @@ -2861,7 +2737,7 @@ bool hasBeenDropped(const SArray* pDelList, int32_t* index, TSDBKEY* pKey, int32 return false; } -TSDBROW* getValidRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* pReader) { +TSDBROW* getValidMemRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* pReader) { if (!pIter->hasVal) { return NULL; } @@ -2909,7 +2785,7 @@ int32_t doMergeRowsInBuf(SIterInfo* pIter, uint64_t uid, int64_t ts, SArray* pDe } // data exists but not valid - TSDBROW* pRow = getValidRow(pIter, pDelList, pReader); + TSDBROW* pRow = getValidMemRow(pIter, pDelList, pReader); if (pRow == NULL) { break; } @@ -3033,7 +2909,6 @@ int32_t doMergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pSc return TSDB_CODE_SUCCESS; } -// todo check if the rows are dropped or not int32_t doMergeRowsInLastBlock(SLastBlockReader* pLastBlockReader, STableBlockScanInfo* pScanInfo, int64_t ts, SRowMerger* pMerger) { while(nextRowInLastBlock(pLastBlockReader, pScanInfo)) { int64_t next1 = getCurrentKeyInLastBlock(pLastBlockReader); @@ -3061,7 +2936,7 @@ void doMergeMemTableMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SAr *freeTSRow = false; return; } else { // has next point in mem/imem - pNextRow = getValidRow(pIter, pDelList, pReader); + pNextRow = getValidMemRow(pIter, pDelList, pReader); if (pNextRow == NULL) { *pTSRow = current.pTSRow; *freeTSRow = false; @@ -3127,8 +3002,8 @@ void doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScanInfo* pBlo int32_t tsdbGetNextRowInMem(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader, STSRow** pTSRow, int64_t endKey, bool* freeTSRow) { - TSDBROW* pRow = getValidRow(&pBlockScanInfo->iter, pBlockScanInfo->delSkyline, pReader); - TSDBROW* piRow = getValidRow(&pBlockScanInfo->iiter, pBlockScanInfo->delSkyline, pReader); + TSDBROW* pRow = getValidMemRow(&pBlockScanInfo->iter, pBlockScanInfo->delSkyline, pReader); + TSDBROW* piRow = getValidMemRow(&pBlockScanInfo->iiter, pBlockScanInfo->delSkyline, pReader); SArray* pDelList = pBlockScanInfo->delSkyline; uint64_t uid = pBlockScanInfo->uid; diff --git a/source/libs/executor/inc/executil.h b/source/libs/executor/inc/executil.h index f4c42023c8..e287bcc882 100644 --- a/source/libs/executor/inc/executil.h +++ b/source/libs/executor/inc/executil.h @@ -23,6 +23,12 @@ #include "tcommon.h" #include "tpagedbuf.h" +#define T_LONG_JMP(_obj, _c) \ + do { \ + ASSERT((_c) != -1); \ + longjmp((_obj), (_c)); \ + } while (0); + #define SET_RES_WINDOW_KEY(_k, _ori, _len, _uid) \ do { \ assert(sizeof(_uid) == sizeof(uint64_t)); \ diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index c3f1c8fbf6..6aaa2eb0c7 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -76,12 +76,6 @@ static UNUSED_FUNC void* u_realloc(void* p, size_t __size) { #define realloc u_realloc #endif -#define T_LONG_JMP(_obj, _c) \ - do { \ - assert((_c) != -1); \ - longjmp((_obj), (_c)); \ - } while (0); - #define CLEAR_QUERY_STATUS(q, st) ((q)->status &= (~(st))) #define QUERY_IS_INTERVAL_QUERY(_q) ((_q)->interval.interval > 0) @@ -92,9 +86,7 @@ static int32_t getExprFunctionId(SExprInfo* pExprInfo) { return 0; } -static void doSetTagValueToResultBuf(char* output, const char* val, int16_t type, int16_t bytes); - -static void setBlockSMAInfo(SqlFunctionCtx* pCtx, SExprInfo* pExpr, SSDataBlock* pSDataBlock); +static void setBlockSMAInfo(SqlFunctionCtx* pCtx, SExprInfo* pExpr, SSDataBlock* pBlock); static void releaseQueryBuf(size_t numOfTables); @@ -278,9 +270,6 @@ SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pR // 1. close current opened time window if (pResultRowInfo->cur.pageId != -1 && ((pResult == NULL) || (pResult->pageId != pResultRowInfo->cur.pageId))) { -#ifdef BUF_PAGE_DEBUG - qDebug("page_1"); -#endif SResultRowPosition pos = pResultRowInfo->cur; SFilePage* pPage = getBufPage(pResultBuf, pos.pageId); releaseBufPage(pResultBuf, pPage); @@ -308,7 +297,7 @@ SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pR // too many time window in query if (pTaskInfo->execModel == OPTR_EXEC_MODEL_BATCH && taosHashGetSize(pSup->pResultRowHashTable) > MAX_INTERVAL_TIME_WINDOW) { - longjmp(pTaskInfo->env, TSDB_CODE_QRY_TOO_MANY_TIMEWINDOW); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_TOO_MANY_TIMEWINDOW); } return pResult; @@ -434,7 +423,7 @@ void doApplyFunctions(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, SColumnInfo if (code != TSDB_CODE_SUCCESS) { qError("%s apply functions error, code: %s", GET_TASKID(taskInfo), tstrerror(code)); taskInfo->code = code; - longjmp(taskInfo->env, code); + T_LONG_JMP(taskInfo->env, code); } } @@ -1152,7 +1141,7 @@ int32_t loadDataBlockOnDemand(SExecTaskInfo* pTaskInfo, STableScanInfo* pTableSc if (setResultOutputBufByKey(pRuntimeEnv, pTableScanInfo->pResultRowInfo, pBlock->info.uid, &win, masterScan, &pResult, groupId, pTableScanInfo->pCtx, pTableScanInfo->numOfOutput, pTableScanInfo->rowEntryInfoOffset) != TSDB_CODE_SUCCESS) { - longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + T_LONG_JMP(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } } } else if (pQueryAttr->stableQuery && (!pQueryAttr->tsCompQuery) && (!pQueryAttr->diffQuery)) { // stable aggregate, not interval aggregate or normal column aggregate @@ -1203,7 +1192,7 @@ int32_t loadDataBlockOnDemand(SExecTaskInfo* pTaskInfo, STableScanInfo* pTableSc if (setResultOutputBufByKey(pRuntimeEnv, pTableScanInfo->pResultRowInfo, pBlock->info.uid, &win, masterScan, &pResult, groupId, pTableScanInfo->pCtx, pTableScanInfo->numOfOutput, pTableScanInfo->rowEntryInfoOffset) != TSDB_CODE_SUCCESS) { - longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + T_LONG_JMP(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } } } @@ -1495,7 +1484,7 @@ int32_t finalizeResultRowIntoResultDataBlock(SDiskbasedBuf* pBuf, SResultRowPosi if (TAOS_FAILED(code)) { releaseBufPage(pBuf, page); qError("%s ensure result data capacity failed, code %s", GET_TASKID(pTaskInfo), tstrerror(code)); - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } } @@ -1507,7 +1496,7 @@ int32_t finalizeResultRowIntoResultDataBlock(SDiskbasedBuf* pBuf, SResultRowPosi int32_t code = pCtx[j].fpSet.finalize(&pCtx[j], pBlock); if (TAOS_FAILED(code)) { qError("%s build result data block error, code %s", GET_TASKID(pTaskInfo), tstrerror(code)); - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } } else if (strcmp(pCtx[j].pExpr->pExpr->_function.functionName, "_select_value") == 0) { // do nothing, todo refactor @@ -1581,7 +1570,7 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprS int32_t code = pCtx[j].fpSet.finalize(&pCtx[j], pBlock); if (TAOS_FAILED(code)) { qError("%s build result data block error, code %s", GET_TASKID(pTaskInfo), tstrerror(code)); - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } } else if (strcmp(pCtx[j].pExpr->pExpr->_function.functionName, "_select_value") == 0) { // do nothing, todo refactor @@ -1736,7 +1725,7 @@ void queryCostStatis(SExecTaskInfo* pTaskInfo) { // SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER; // while (tsdbNextDataBlock(pTsdbReadHandle)) { // if (isTaskKilled(pRuntimeEnv->qinfo)) { -// longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); +// T_LONG_JMP(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); // } // // tsdbRetrieveDataBlockInfo(pTsdbReadHandle, &blockInfo); @@ -1755,7 +1744,7 @@ void queryCostStatis(SExecTaskInfo* pTaskInfo) { // } // // if (terrno != TSDB_CODE_SUCCESS) { -// longjmp(pRuntimeEnv->env, terrno); +// T_LONG_JMP(pRuntimeEnv->env, terrno); // } // } @@ -1919,7 +1908,7 @@ void queryCostStatis(SExecTaskInfo* pTaskInfo) { // // // check for error // if (terrno != TSDB_CODE_SUCCESS) { -// longjmp(pRuntimeEnv->env, terrno); +// T_LONG_JMP(pRuntimeEnv->env, terrno); // } // // return true; @@ -2771,7 +2760,7 @@ static SSDataBlock* doSortedMerge(SOperatorInfo* pOperator) { int32_t code = tsortOpen(pInfo->pSortHandle); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, terrno); + T_LONG_JMP(pTaskInfo->env, terrno); } pOperator->status = OP_RES_TO_RETURN; @@ -2966,7 +2955,7 @@ static int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) { int32_t code = getTableScanInfo(pOperator, &order, &scanFlag); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } // there is an scalar expression that needs to be calculated before apply the group aggregation. @@ -2974,7 +2963,7 @@ static int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) { SExprSupp* pSup1 = &pAggInfo->scalarExprSup; code = projectApplyFunctions(pSup1->pExprInfo, pBlock, pBlock, pSup1->pCtx, pSup1->numOfExprs, NULL); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } } @@ -2983,7 +2972,7 @@ static int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) { setInputDataBlock(pOperator, pSup->pCtx, pBlock, order, scanFlag, true); code = doAggregateImpl(pOperator, pSup->pCtx); if (code != 0) { - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } } @@ -4673,27 +4662,6 @@ void doDestroyTask(SExecTaskInfo* pTaskInfo) { taosMemoryFreeClear(pTaskInfo); } -static void doSetTagValueToResultBuf(char* output, const char* val, int16_t type, int16_t bytes) { - if (val == NULL) { - setNull(output, type, bytes); - return; - } - - if (IS_VAR_DATA_TYPE(type)) { - // Binary data overflows for sort of unknown reasons. Let trim the overflow data - if (varDataTLen(val) > bytes) { - int32_t maxLen = bytes - VARSTR_HEADER_SIZE; - int32_t len = (varDataLen(val) > maxLen) ? maxLen : varDataLen(val); - memcpy(varDataVal(output), varDataVal(val), len); - varDataSetLen(output, len); - } else { - varDataCopy(output, val); - } - } else { - memcpy(output, val, bytes); - } -} - static int64_t getQuerySupportBufSize(size_t numOfTables) { size_t s1 = sizeof(STableQueryInfo); // size_t s3 = sizeof(STableCheckInfo); buffer consumption in tsdb diff --git a/source/libs/executor/src/groupoperator.c b/source/libs/executor/src/groupoperator.c index 05dffc658b..ab2326ecae 100644 --- a/source/libs/executor/src/groupoperator.c +++ b/source/libs/executor/src/groupoperator.c @@ -247,7 +247,7 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SSDataBlock* pBlock) { if (!pInfo->isInit) { recordNewGroupKeys(pInfo->pGroupCols, pInfo->pGroupColVals, pBlock, j); if (terrno != TSDB_CODE_SUCCESS) { // group by json error - longjmp(pTaskInfo->env, terrno); + T_LONG_JMP(pTaskInfo->env, terrno); } pInfo->isInit = true; num++; @@ -265,7 +265,7 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SSDataBlock* pBlock) { num++; recordNewGroupKeys(pInfo->pGroupCols, pInfo->pGroupColVals, pBlock, j); if (terrno != TSDB_CODE_SUCCESS) { // group by json error - longjmp(pTaskInfo->env, terrno); + T_LONG_JMP(pTaskInfo->env, terrno); } continue; } @@ -273,7 +273,7 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SSDataBlock* pBlock) { len = buildGroupKeys(pInfo->keyBuf, pInfo->pGroupColVals); int32_t ret = setGroupResultOutputBuf(pOperator, &(pInfo->binfo), pOperator->exprSupp.numOfExprs, pInfo->keyBuf, len, pBlock->info.groupId, pInfo->aggSup.pResultBuf, &pInfo->aggSup); if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code - longjmp(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR); } int32_t rowIndex = j - num; @@ -291,7 +291,7 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SSDataBlock* pBlock) { setGroupResultOutputBuf(pOperator, &(pInfo->binfo), pOperator->exprSupp.numOfExprs, pInfo->keyBuf, len, pBlock->info.groupId, pInfo->aggSup.pResultBuf, &pInfo->aggSup); if (ret != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR); } int32_t rowIndex = pBlock->info.rows - num; @@ -350,7 +350,7 @@ static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) { int32_t code = getTableScanInfo(pOperator, &order, &scanFlag); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } // the pDataBlock are always the same one, no need to call this again @@ -360,7 +360,7 @@ static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) { if (pInfo->scalarSup.pExprInfo != NULL) { pTaskInfo->code = projectApplyFunctions(pInfo->scalarSup.pExprInfo, pBlock, pBlock, pInfo->scalarSup.pCtx, pInfo->scalarSup.numOfExprs, NULL); if (pTaskInfo->code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, pTaskInfo->code); + T_LONG_JMP(pTaskInfo->env, pTaskInfo->code); } } @@ -678,14 +678,14 @@ static SSDataBlock* hashPartition(SOperatorInfo* pOperator) { if (pInfo->scalarSup.pExprInfo != NULL) { pTaskInfo->code = projectApplyFunctions(pInfo->scalarSup.pExprInfo, pBlock, pBlock, pInfo->scalarSup.pCtx, pInfo->scalarSup.numOfExprs, NULL); if (pTaskInfo->code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, pTaskInfo->code); + T_LONG_JMP(pTaskInfo->env, pTaskInfo->code); } } terrno = TSDB_CODE_SUCCESS; doHashPartition(pOperator, pBlock); if (terrno != TSDB_CODE_SUCCESS) { // group by json error - longjmp(pTaskInfo->env, terrno); + T_LONG_JMP(pTaskInfo->env, terrno); } } diff --git a/source/libs/executor/src/projectoperator.c b/source/libs/executor/src/projectoperator.c index 94da3e23e1..b0ca219d52 100644 --- a/source/libs/executor/src/projectoperator.c +++ b/source/libs/executor/src/projectoperator.c @@ -268,7 +268,7 @@ SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) { // the pDataBlock are always the same one, no need to call this again int32_t code = getTableScanInfo(downstream, &order, &scanFlag); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } setInputDataBlock(pOperator, pSup->pCtx, pBlock, order, scanFlag, false); @@ -277,7 +277,7 @@ SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) { code = projectApplyFunctions(pSup->pExprInfo, pInfo->pRes, pBlock, pSup->pCtx, pSup->numOfExprs, pProjectInfo->pPseudoColInfo); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } status = doIngroupLimitOffset(pLimitInfo, pBlock->info.groupId, pInfo->pRes, pOperator); @@ -415,7 +415,7 @@ static void doHandleDataBlock(SOperatorInfo* pOperator, SSDataBlock* pBlock, SOp // the pDataBlock are always the same one, no need to call this again int32_t code = getTableScanInfo(downstream, &order, &scanFlag); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } // there is an scalar expression that needs to be calculated before apply the group aggregation. @@ -424,7 +424,7 @@ static void doHandleDataBlock(SOperatorInfo* pOperator, SSDataBlock* pBlock, SOp code = projectApplyFunctions(pScalarSup->pExprInfo, pBlock, pBlock, pScalarSup->pCtx, pScalarSup->numOfExprs, pIndefInfo->pPseudoColInfo); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } } @@ -434,7 +434,7 @@ static void doHandleDataBlock(SOperatorInfo* pOperator, SSDataBlock* pBlock, SOp code = projectApplyFunctions(pSup->pExprInfo, pInfo->pRes, pBlock, pSup->pCtx, pSup->numOfExprs, pIndefInfo->pPseudoColInfo); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } } diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 7b13aa8ad8..ec902588e3 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -250,7 +250,7 @@ static bool doLoadBlockSMA(STableScanInfo* pTableScanInfo, SSDataBlock* pBlock, int32_t code = tsdbRetrieveDatablockSMA(pTableScanInfo->dataReader, &pColAgg, &allColumnsHaveAgg); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } if (!allColumnsHaveAgg) { @@ -264,7 +264,7 @@ static bool doLoadBlockSMA(STableScanInfo* pTableScanInfo, SSDataBlock* pBlock, if (pBlock->pBlockAgg == NULL) { pBlock->pBlockAgg = taosMemoryCalloc(numOfCols, POINTER_BYTES); if (pBlock->pBlockAgg == NULL) { - longjmp(pTaskInfo->env, TSDB_CODE_OUT_OF_MEMORY); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_OUT_OF_MEMORY); } } @@ -374,7 +374,7 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableSca int32_t code = addTagPseudoColumnData(&pTableScanInfo->readHandle, pSup->pExprInfo, pSup->numOfExprs, pBlock, GET_TASKID(pTaskInfo)); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } } @@ -495,7 +495,7 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) { while (tsdbNextDataBlock(pTableScanInfo->dataReader)) { if (isTaskKilled(pTaskInfo)) { - longjmp(pTaskInfo->env, TSDB_CODE_TSC_QUERY_CANCELLED); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_TSC_QUERY_CANCELLED); } // process this data block based on the probabilities @@ -523,7 +523,7 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) { int32_t code = loadDataBlock(pOperator, pTableScanInfo, pBlock, &status); // int32_t code = loadDataBlockOnDemand(pOperator->pRuntimeEnv, pTableScanInfo, pBlock, &status); if (code != TSDB_CODE_SUCCESS) { - longjmp(pOperator->pTaskInfo->env, code); + T_LONG_JMP(pOperator->pTaskInfo->env, code); } // current block is filter out according to filter condition, continue load the next block @@ -649,7 +649,7 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) { int32_t code = tsdbReaderOpen(pInfo->readHandle.vnode, &pInfo->cond, tableList, (STsdbReader**)&pInfo->dataReader, GET_TASKID(pTaskInfo)); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); return NULL; } } @@ -837,7 +837,7 @@ static SSDataBlock* doBlockInfoScan(SOperatorInfo* pOperator) { int32_t code = doGetTableRowSize(pBlockScanInfo->readHandle.meta, pBlockScanInfo->uid, &blockDistInfo.rowSize, GET_TASKID(pTaskInfo)); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } tsdbGetFileBlocksDistInfo(pBlockScanInfo->pHandle, &blockDistInfo); @@ -1259,7 +1259,7 @@ static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock GET_TASKID(pTaskInfo)); if (code != TSDB_CODE_SUCCESS) { blockDataFreeRes((SSDataBlock*)pBlock); - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } } @@ -1950,7 +1950,7 @@ static SSDataBlock* sysTableScanUserTags(SOperatorInfo* pOperator) { metaReaderClear(&smr); metaCloseTbCursor(pInfo->pCur); pInfo->pCur = NULL; - longjmp(pTaskInfo->env, terrno); + T_LONG_JMP(pTaskInfo->env, terrno); } char stableName[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0}; @@ -2153,7 +2153,7 @@ static SSDataBlock* sysTableScanUserTables(SOperatorInfo* pOperator) { metaReaderClear(&mr); metaCloseTbCursor(pInfo->pCur); pInfo->pCur = NULL; - longjmp(pTaskInfo->env, terrno); + T_LONG_JMP(pTaskInfo->env, terrno); } // number of columns @@ -2527,7 +2527,7 @@ static SSDataBlock* doTagScan(SOperatorInfo* pOperator) { qError("failed to get table meta, uid:0x%" PRIx64 ", code:%s, %s", item->uid, tstrerror(terrno), GET_TASKID(pTaskInfo)); metaReaderClear(&mr); - longjmp(pTaskInfo->env, terrno); + T_LONG_JMP(pTaskInfo->env, terrno); } for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) { @@ -2777,7 +2777,7 @@ static int32_t loadDataBlockFromOneTable(SOperatorInfo* pOperator, STableMergeSc int32_t code = addTagPseudoColumnData(&pTableScanInfo->readHandle, pTableScanInfo->pseudoSup.pExprInfo, pTableScanInfo->pseudoSup.numOfExprs, pBlock, GET_TASKID(pTaskInfo)); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } } @@ -2820,7 +2820,7 @@ static SSDataBlock* getTableDataBlock(void* param) { STsdbReader* reader = taosArrayGetP(pTableScanInfo->dataReaders, readerIdx); while (tsdbNextDataBlock(reader)) { if (isTaskKilled(pOperator->pTaskInfo)) { - longjmp(pOperator->pTaskInfo->env, TSDB_CODE_TSC_QUERY_CANCELLED); + T_LONG_JMP(pOperator->pTaskInfo->env, TSDB_CODE_TSC_QUERY_CANCELLED); } // process this data block based on the probabilities @@ -2843,7 +2843,7 @@ static SSDataBlock* getTableDataBlock(void* param) { int32_t code = loadDataBlockFromOneTable(pOperator, pTableScanInfo, readerIdx, pBlock, &status); // int32_t code = loadDataBlockOnDemand(pOperator->pRuntimeEnv, pTableScanInfo, pBlock, &status); if (code != TSDB_CODE_SUCCESS) { - longjmp(pOperator->pTaskInfo->env, code); + T_LONG_JMP(pOperator->pTaskInfo->env, code); } // current block is filter out according to filter condition, continue load the next block @@ -2936,7 +2936,7 @@ int32_t startGroupTableMergeScan(SOperatorInfo* pOperator) { int32_t code = tsortOpen(pInfo->pSortHandle); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, terrno); + T_LONG_JMP(pTaskInfo->env, terrno); } return TSDB_CODE_SUCCESS; @@ -3006,7 +3006,7 @@ SSDataBlock* doTableMergeScan(SOperatorInfo* pOperator) { int32_t code = pOperator->fpSet._openFn(pOperator); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } size_t tableListSize = taosArrayGetSize(pInfo->tableListInfo->pTableList); if (!pInfo->hasGroupId) { diff --git a/source/libs/executor/src/sortoperator.c b/source/libs/executor/src/sortoperator.c index 4dd5e4ec15..dbaba98914 100644 --- a/source/libs/executor/src/sortoperator.c +++ b/source/libs/executor/src/sortoperator.c @@ -156,7 +156,7 @@ void applyScalarFunction(SSDataBlock* pBlock, void* param) { int32_t code = projectApplyFunctions(pOperator->exprSupp.pExprInfo, pBlock, pBlock, pOperator->exprSupp.pCtx, pOperator->exprSupp.numOfExprs, NULL); if (code != TSDB_CODE_SUCCESS) { - longjmp(pOperator->pTaskInfo->env, code); + T_LONG_JMP(pOperator->pTaskInfo->env, code); } } } @@ -184,7 +184,7 @@ int32_t doOpenSortOperator(SOperatorInfo* pOperator) { taosMemoryFreeClear(ps); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, terrno); + T_LONG_JMP(pTaskInfo->env, terrno); } pOperator->cost.openCost = (taosGetTimestampUs() - pInfo->startTs) / 1000.0; @@ -204,7 +204,7 @@ SSDataBlock* doSort(SOperatorInfo* pOperator) { int32_t code = pOperator->fpSet._openFn(pOperator); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } SSDataBlock* pBlock = NULL; @@ -388,7 +388,7 @@ int32_t beginSortGroup(SOperatorInfo* pOperator) { taosMemoryFreeClear(ps); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, terrno); + T_LONG_JMP(pTaskInfo->env, terrno); } return TSDB_CODE_SUCCESS; @@ -420,7 +420,7 @@ SSDataBlock* doGroupSort(SOperatorInfo* pOperator) { int32_t code = pOperator->fpSet._openFn(pOperator); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } if (!pInfo->hasGroupId) { @@ -575,7 +575,7 @@ int32_t doOpenMultiwayMergeOperator(SOperatorInfo* pOperator) { int32_t code = tsortOpen(pInfo->pSortHandle); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, terrno); + T_LONG_JMP(pTaskInfo->env, terrno); } pOperator->cost.openCost = (taosGetTimestampUs() - pInfo->startTs) / 1000.0; @@ -672,7 +672,7 @@ SSDataBlock* doMultiwayMerge(SOperatorInfo* pOperator) { int32_t code = pOperator->fpSet._openFn(pOperator); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } SSDataBlock* pBlock = getMultiwaySortedBlockData(pInfo->pSortHandle, pInfo->binfo.pRes, diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 2648e368b2..3769c57bf3 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -628,7 +628,7 @@ static void doInterpUnclosedTimeWindow(SOperatorInfo* pOperatorInfo, int32_t num int32_t ret = setTimeWindowOutputBuf(pResultRowInfo, &w, (scanFlag == MAIN_SCAN), &pResult, groupId, pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo); if (ret != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } ASSERT(!isResultRowInterpolated(pResult, RESULT_ROW_END_INTERP)); @@ -952,7 +952,7 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul ret = setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo); if (ret != TSDB_CODE_SUCCESS || pResult == NULL) { - longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM && pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE) { @@ -975,7 +975,7 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul ret = setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo); if (ret != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } // window start key interpolation @@ -1009,7 +1009,7 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul int32_t code = setTimeWindowOutputBuf(pResultRowInfo, &nextWin, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo); if (code != TSDB_CODE_SUCCESS || pResult == NULL) { - longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM && pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE) { @@ -1185,7 +1185,7 @@ static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorI int32_t ret = setTimeWindowOutputBuf(&pInfo->binfo.resultRowInfo, &window, masterScan, &pResult, gid, pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo); if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code - longjmp(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR); } updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &window, false); @@ -1210,7 +1210,7 @@ static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorI int32_t ret = setTimeWindowOutputBuf(&pInfo->binfo.resultRowInfo, &pRowSup->win, masterScan, &pResult, gid, pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo); if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code - longjmp(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR); } updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pRowSup->win, false); @@ -1928,7 +1928,7 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSessionAggOperator int32_t ret = setTimeWindowOutputBuf(&pInfo->binfo.resultRowInfo, &window, masterScan, &pResult, gid, pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo); if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code - longjmp(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR); } // pInfo->numOfRows data belong to the current session window @@ -1947,7 +1947,7 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSessionAggOperator int32_t ret = setTimeWindowOutputBuf(&pInfo->binfo.resultRowInfo, &pRowSup->win, masterScan, &pResult, gid, pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo); if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code - longjmp(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR); } updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pRowSup->win, false); @@ -2335,7 +2335,7 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) { int32_t code = initKeeperInfo(pSliceInfo, pBlock); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } // the pDataBlock are always the same one, no need to call this again @@ -2783,7 +2783,7 @@ void compactFunctions(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx, int3 if (code != TSDB_CODE_SUCCESS) { qError("%s apply functions error, code: %s", GET_TASKID(pTaskInfo), tstrerror(code)); pTaskInfo->code = code; - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } } } @@ -2937,7 +2937,7 @@ static void doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBloc int32_t code = setTimeWindowOutputBuf(pResultRowInfo, &nextWin, true, &pResult, tableGroupId, pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo); if (code != TSDB_CODE_SUCCESS || pResult == NULL) { - longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } if (IS_FINAL_OP(pInfo)) { @@ -3189,7 +3189,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) { for (int32_t i = 0; i < chIndex + 1 - size; i++) { SOperatorInfo* pChildOp = createStreamFinalIntervalOperatorInfo(NULL, pInfo->pPhyNode, pOperator->pTaskInfo, 0); if (!pChildOp) { - longjmp(pOperator->pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + T_LONG_JMP(pOperator->pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } SStreamFinalIntervalOperatorInfo* pTmpInfo = pChildOp->info; pTmpInfo->twAggSup.calTrigger = STREAM_TRIGGER_AT_ONCE; @@ -3732,7 +3732,7 @@ static int32_t setWindowOutputBuf(SResultWindowInfo* pWinInfo, SResultRow** pRes // too many time window in query int32_t size = taosArrayGetSize(pAggSup->pCurWins); if (pTaskInfo->execModel == OPTR_EXEC_MODEL_BATCH && size > MAX_INTERVAL_TIME_WINDOW) { - longjmp(pTaskInfo->env, TSDB_CODE_QRY_TOO_MANY_TIMEWINDOW); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_TOO_MANY_TIMEWINDOW); } if (pWinInfo->pos.pageId == -1) { @@ -3884,7 +3884,7 @@ static void doStreamSessionAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSData pStDeleted); code = doOneWindowAgg(pInfo, pSDataBlock, pCurWin, &pResult, i, winRows, numOfOutput, pOperator); if (code != TSDB_CODE_SUCCESS || pResult == NULL) { - longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } int32_t winNum = getNumCompactWindow(pAggSup->pCurWins, winIndex, gap); @@ -3896,7 +3896,7 @@ static void doStreamSessionAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSData SWinRes value = {.ts = pCurWin->win.skey, .groupId = groupId}; code = taosHashPut(pStUpdated, &pCurWin->pos, sizeof(SResultRowPosition), &value, sizeof(SWinRes)); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } pCurWin->isOutput = true; } @@ -4205,7 +4205,7 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) { SOperatorInfo* pChildOp = createStreamFinalSessionAggOperatorInfo(NULL, pInfo->pPhyNode, pOperator->pTaskInfo, 0); if (!pChildOp) { - longjmp(pOperator->pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + T_LONG_JMP(pOperator->pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } taosArrayPush(pInfo->pChildren, &pChildOp); } @@ -4668,14 +4668,14 @@ static void doStreamStateAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBl } code = doOneStateWindowAgg(pInfo, pSDataBlock, &pCurWin->winInfo, &pResult, i, winRows, numOfOutput, pOperator); if (code != TSDB_CODE_SUCCESS || pResult == NULL) { - longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } pCurWin->winInfo.isClosed = false; if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE) { SWinRes value = {.ts = pCurWin->winInfo.win.skey, .groupId = groupId}; code = taosHashPut(pSeUpdated, &pCurWin->winInfo.pos, sizeof(SResultRowPosition), &value, sizeof(SWinRes)); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } pCurWin->winInfo.isOutput = true; } @@ -4921,7 +4921,7 @@ static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultR int32_t ret = setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &iaInfo->aggSup, pTaskInfo); if (ret != TSDB_CODE_SUCCESS || pResult == NULL) { - longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } int32_t currPos = startPos; @@ -4948,7 +4948,7 @@ static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultR ret = setTimeWindowOutputBuf(pResultRowInfo, &currWin, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &iaInfo->aggSup, pTaskInfo); if (ret != TSDB_CODE_SUCCESS || pResult == NULL) { - longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } miaInfo->curTs = currWin.skey; @@ -5223,7 +5223,7 @@ static void doMergeIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultRowInfo* setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pExprSup->pCtx, numOfOutput, pExprSup->rowEntryInfoOffset, &iaInfo->aggSup, pTaskInfo); if (ret != TSDB_CODE_SUCCESS || pResult == NULL) { - longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } TSKEY ekey = ascScan ? win.ekey : win.skey; @@ -5240,7 +5240,7 @@ static void doMergeIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultRowInfo* ret = setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pExprSup->pCtx, numOfOutput, pExprSup->rowEntryInfoOffset, &iaInfo->aggSup, pTaskInfo); if (ret != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } // window start key interpolation @@ -5269,7 +5269,7 @@ static void doMergeIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultRowInfo* setTimeWindowOutputBuf(pResultRowInfo, &nextWin, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pExprSup->pCtx, numOfOutput, pExprSup->rowEntryInfoOffset, &iaInfo->aggSup, pTaskInfo); if (code != TSDB_CODE_SUCCESS || pResult == NULL) { - longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } ekey = ascScan ? nextWin.ekey : nextWin.skey; From a269f63a10ee6fe1c367fdfa7782db46de857092 Mon Sep 17 00:00:00 2001 From: gccgdb1234 Date: Wed, 24 Aug 2022 17:09:47 +0800 Subject: [PATCH 25/79] doc: change connector order and add taosadapter in REST API --- docs/en/14-reference/02-rest-api/02-rest-api.mdx | 2 +- docs/zh/08-connector/02-rest-api.mdx | 4 +++- docs/zh/08-connector/03-cpp.mdx | 1 - docs/zh/08-connector/04-java.mdx | 1 - docs/zh/08-connector/06-rust.mdx | 1 - docs/zh/08-connector/07-python.mdx | 1 - docs/zh/08-connector/08-node.mdx | 1 - docs/zh/08-connector/09-csharp.mdx | 1 - docs/zh/08-connector/10-php.mdx | 3 +-- 9 files changed, 5 insertions(+), 10 deletions(-) diff --git a/docs/en/14-reference/02-rest-api/02-rest-api.mdx b/docs/en/14-reference/02-rest-api/02-rest-api.mdx index 8d4186a36b..74ba78b7fc 100644 --- a/docs/en/14-reference/02-rest-api/02-rest-api.mdx +++ b/docs/en/14-reference/02-rest-api/02-rest-api.mdx @@ -10,7 +10,7 @@ One difference from the native connector is that the REST interface is stateless ## Installation -The REST interface does not rely on any TDengine native library, so the client application does not need to install any TDengine libraries. The client application's development language only needs to support the HTTP protocol. +The REST interface does not rely on any TDengine native library, so the client application does not need to install any TDengine libraries. The client application's development language only needs to support the HTTP protocol. The REST interface is provided by [taosAdapter](../taosadapter), to use REST interface you need to make sure `taosAdapter` is running properly. ## Verification diff --git a/docs/zh/08-connector/02-rest-api.mdx b/docs/zh/08-connector/02-rest-api.mdx index 4b9171c07d..e254244657 100644 --- a/docs/zh/08-connector/02-rest-api.mdx +++ b/docs/zh/08-connector/02-rest-api.mdx @@ -1,5 +1,7 @@ --- title: REST API +sidebar_label: REST API +description: 详细介绍 TDengine 提供的 RESTful API. --- 为支持各种不同类型平台的开发,TDengine 提供符合 REST 设计标准的 API,即 REST API。为最大程度降低学习成本,不同于其他数据库 REST API 的设计方法,TDengine 直接通过 HTTP POST 请求 BODY 中包含的 SQL 语句来操作数据库,仅需要一个 URL。REST 连接器的使用参见 [视频教程](https://www.taosdata.com/blog/2020/11/11/1965.html)。 @@ -10,7 +12,7 @@ title: REST API ## 安装 -RESTful 接口不依赖于任何 TDengine 的库,因此客户端不需要安装任何 TDengine 的库,只要客户端的开发语言支持 HTTP 协议即可。 +RESTful 接口不依赖于任何 TDengine 的库,因此客户端不需要安装任何 TDengine 的库,只要客户端的开发语言支持 HTTP 协议即可。TDengine 的 RESTful API 由 [taosAdapter](../../reference/taosadapter) 提供,在使用 RESTful API 之前需要确保 `taosAdapter` 正常运行。 ## 验证 diff --git a/docs/zh/08-connector/03-cpp.mdx b/docs/zh/08-connector/03-cpp.mdx index d27eeb7dfb..c0bd33f129 100644 --- a/docs/zh/08-connector/03-cpp.mdx +++ b/docs/zh/08-connector/03-cpp.mdx @@ -1,5 +1,4 @@ --- -sidebar_position: 1 sidebar_label: C/C++ title: C/C++ Connector --- diff --git a/docs/zh/08-connector/04-java.mdx b/docs/zh/08-connector/04-java.mdx index 20d2e4fabd..6b1715f8c6 100644 --- a/docs/zh/08-connector/04-java.mdx +++ b/docs/zh/08-connector/04-java.mdx @@ -1,6 +1,5 @@ --- toc_max_heading_level: 4 -sidebar_position: 2 sidebar_label: Java title: TDengine Java Connector description: TDengine Java 连接器基于标准 JDBC API 实现, 并提供原生连接与 REST连接两种连接器。 diff --git a/docs/zh/08-connector/06-rust.mdx b/docs/zh/08-connector/06-rust.mdx index 187e2f0b33..26f53c82d6 100644 --- a/docs/zh/08-connector/06-rust.mdx +++ b/docs/zh/08-connector/06-rust.mdx @@ -1,6 +1,5 @@ --- toc_max_heading_level: 4 -sidebar_position: 5 sidebar_label: Rust title: TDengine Rust Connector --- diff --git a/docs/zh/08-connector/07-python.mdx b/docs/zh/08-connector/07-python.mdx index 88a5d4f84d..0242486d3b 100644 --- a/docs/zh/08-connector/07-python.mdx +++ b/docs/zh/08-connector/07-python.mdx @@ -1,5 +1,4 @@ --- -sidebar_position: 3 sidebar_label: Python title: TDengine Python Connector description: "taospy 是 TDengine 的官方 Python 连接器。taospy 提供了丰富的 API, 使得 Python 应用可以很方便地使用 TDengine。tasopy 对 TDengine 的原生接口和 REST 接口都进行了封装, 分别对应 tasopy 的两个子模块:tasos 和 taosrest。除了对原生接口和 REST 接口的封装,taospy 还提供了符合 Python 数据访问规范(PEP 249)的编程接口。这使得 taospy 和很多第三方工具集成变得简单,比如 SQLAlchemy 和 pandas" diff --git a/docs/zh/08-connector/08-node.mdx b/docs/zh/08-connector/08-node.mdx index 63d690e554..167ae069d6 100644 --- a/docs/zh/08-connector/08-node.mdx +++ b/docs/zh/08-connector/08-node.mdx @@ -1,6 +1,5 @@ --- toc_max_heading_level: 4 -sidebar_position: 6 sidebar_label: Node.js title: TDengine Node.js Connector --- diff --git a/docs/zh/08-connector/09-csharp.mdx b/docs/zh/08-connector/09-csharp.mdx index 8214717583..be27bfb685 100644 --- a/docs/zh/08-connector/09-csharp.mdx +++ b/docs/zh/08-connector/09-csharp.mdx @@ -1,6 +1,5 @@ --- toc_max_heading_level: 4 -sidebar_position: 7 sidebar_label: C# title: C# Connector --- diff --git a/docs/zh/08-connector/10-php.mdx b/docs/zh/08-connector/10-php.mdx index 53611c0274..5e32c709de 100644 --- a/docs/zh/08-connector/10-php.mdx +++ b/docs/zh/08-connector/10-php.mdx @@ -1,6 +1,5 @@ --- -sidebar_position: 1 -sidebar_label: PHP(社区贡献) +sidebar_label: PHP title: PHP Connector --- From 61d7eee261601f8359fefaa056a160f2e039b906 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 24 Aug 2022 17:15:24 +0800 Subject: [PATCH 26/79] feat: update taostools 833b721 for3.0 (#16378) * feat: update taos-tools for 3.0 [TD-14141] * feat: update taos-tools for 3.0 * feat: update taos-tools for 3.0 * feat: update taos-tools for 3.0 * feat: update taos-tools for 3.0 * feat: update taos-tools for 3.0 * feat: update taos-tools for 3.0 * feat: update taos-tools for 3.0 * feat: update taos-tools for 3.0 * feat: update taos-tools for 3.0 * feat: update taos-tools 8e3b3ee * fix: remove submodules * feat: update taos-tools c529299 * feat: update taos-tools 9dc2fec for 3.0 * fix: optim upx * feat: update taos-tools f4e456a for 3.0 * feat: update taos-tools 2a2def1 for 3.0 * feat: update taos-tools c9cc20f for 3.0 * feat: update taostoosl 8a5e336 for 3.0 * feat: update taostools 3c7dafe for 3.0 * feat: update taos-tools 2d68404 for 3.0 * feat: update taos-tools 57bdfbf for 3.0 * fix: jenkinsfile2 to upgrade pip * feat: update taostoosl 11d23e5 for 3.0 * feat: update taostools 43924b8 for 3.0 * feat: update taostools 53a0103 for 3.0 * feat: update taostoosl d237772 for 3.0 * feat: update taos-tools 6bde102 for 3.0 * feat: upate taos-tools 2af2222 for 3.0 * feat: update taos-tools 833b721 for 3.0 --- cmake/taostools_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index e593e6d62b..2d9b00eee7 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG 2af2222 + GIT_TAG 833b721 SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE From 168b6cc9ebef4429931c1925f624720b878f809f Mon Sep 17 00:00:00 2001 From: gccgdb1234 Date: Wed, 24 Aug 2022 17:21:57 +0800 Subject: [PATCH 27/79] doc: change connector order --- docs/en/14-reference/03-connector/{cpp.mdx => 03-cpp.mdx} | 1 - docs/en/14-reference/03-connector/{java.mdx => 04-java.mdx} | 1 - docs/en/14-reference/03-connector/{go.mdx => 05-go.mdx} | 1 - docs/en/14-reference/03-connector/{rust.mdx => 06-rust.mdx} | 1 - .../en/14-reference/03-connector/{python.mdx => 07-python.mdx} | 1 - docs/en/14-reference/03-connector/{node.mdx => 08-node.mdx} | 1 - .../en/14-reference/03-connector/{csharp.mdx => 09-csharp.mdx} | 1 - docs/en/14-reference/03-connector/{php.mdx => 10-php.mdx} | 3 +-- .../14-reference/03-connector/{03-connector.mdx => index.mdx} | 0 9 files changed, 1 insertion(+), 9 deletions(-) rename docs/en/14-reference/03-connector/{cpp.mdx => 03-cpp.mdx} (99%) rename docs/en/14-reference/03-connector/{java.mdx => 04-java.mdx} (99%) rename docs/en/14-reference/03-connector/{go.mdx => 05-go.mdx} (99%) rename docs/en/14-reference/03-connector/{rust.mdx => 06-rust.mdx} (99%) rename docs/en/14-reference/03-connector/{python.mdx => 07-python.mdx} (99%) rename docs/en/14-reference/03-connector/{node.mdx => 08-node.mdx} (99%) rename docs/en/14-reference/03-connector/{csharp.mdx => 09-csharp.mdx} (99%) rename docs/en/14-reference/03-connector/{php.mdx => 10-php.mdx} (98%) rename docs/en/14-reference/03-connector/{03-connector.mdx => index.mdx} (100%) diff --git a/docs/en/14-reference/03-connector/cpp.mdx b/docs/en/14-reference/03-connector/03-cpp.mdx similarity index 99% rename from docs/en/14-reference/03-connector/cpp.mdx rename to docs/en/14-reference/03-connector/03-cpp.mdx index 5839ed4af8..02d7df48db 100644 --- a/docs/en/14-reference/03-connector/cpp.mdx +++ b/docs/en/14-reference/03-connector/03-cpp.mdx @@ -1,5 +1,4 @@ --- -sidebar_position: 1 sidebar_label: C/C++ title: C/C++ Connector --- diff --git a/docs/en/14-reference/03-connector/java.mdx b/docs/en/14-reference/03-connector/04-java.mdx similarity index 99% rename from docs/en/14-reference/03-connector/java.mdx rename to docs/en/14-reference/03-connector/04-java.mdx index 39514c37eb..0f977393f1 100644 --- a/docs/en/14-reference/03-connector/java.mdx +++ b/docs/en/14-reference/03-connector/04-java.mdx @@ -1,6 +1,5 @@ --- toc_max_heading_level: 4 -sidebar_position: 2 sidebar_label: Java title: TDengine Java Connector description: The TDengine Java Connector is implemented on the standard JDBC API and provides native and REST connectors. diff --git a/docs/en/14-reference/03-connector/go.mdx b/docs/en/14-reference/03-connector/05-go.mdx similarity index 99% rename from docs/en/14-reference/03-connector/go.mdx rename to docs/en/14-reference/03-connector/05-go.mdx index 2926355040..00e3bc1bc3 100644 --- a/docs/en/14-reference/03-connector/go.mdx +++ b/docs/en/14-reference/03-connector/05-go.mdx @@ -1,6 +1,5 @@ --- toc_max_heading_level: 4 -sidebar_position: 4 sidebar_label: Go title: TDengine Go Connector --- diff --git a/docs/en/14-reference/03-connector/rust.mdx b/docs/en/14-reference/03-connector/06-rust.mdx similarity index 99% rename from docs/en/14-reference/03-connector/rust.mdx rename to docs/en/14-reference/03-connector/06-rust.mdx index e9b16ba94d..1184c98a28 100644 --- a/docs/en/14-reference/03-connector/rust.mdx +++ b/docs/en/14-reference/03-connector/06-rust.mdx @@ -1,6 +1,5 @@ --- toc_max_heading_level: 4 -sidebar_position: 5 sidebar_label: Rust title: TDengine Rust Connector --- diff --git a/docs/en/14-reference/03-connector/python.mdx b/docs/en/14-reference/03-connector/07-python.mdx similarity index 99% rename from docs/en/14-reference/03-connector/python.mdx rename to docs/en/14-reference/03-connector/07-python.mdx index e183bbee22..fc95033baa 100644 --- a/docs/en/14-reference/03-connector/python.mdx +++ b/docs/en/14-reference/03-connector/07-python.mdx @@ -1,5 +1,4 @@ --- -sidebar_position: 3 sidebar_label: Python title: TDengine Python Connector description: "taospy is the official Python connector for TDengine. taospy provides a rich API that makes it easy for Python applications to use TDengine. tasopy wraps both the native and REST interfaces of TDengine, corresponding to the two submodules of tasopy: taos and taosrest. In addition to wrapping the native and REST interfaces, taospy also provides a programming interface that conforms to the Python Data Access Specification (PEP 249), making it easy to integrate taospy with many third-party tools, such as SQLAlchemy and pandas." diff --git a/docs/en/14-reference/03-connector/node.mdx b/docs/en/14-reference/03-connector/08-node.mdx similarity index 99% rename from docs/en/14-reference/03-connector/node.mdx rename to docs/en/14-reference/03-connector/08-node.mdx index d170044435..f93632b417 100644 --- a/docs/en/14-reference/03-connector/node.mdx +++ b/docs/en/14-reference/03-connector/08-node.mdx @@ -1,6 +1,5 @@ --- toc_max_heading_level: 4 -sidebar_position: 6 sidebar_label: Node.js title: TDengine Node.js Connector --- diff --git a/docs/en/14-reference/03-connector/csharp.mdx b/docs/en/14-reference/03-connector/09-csharp.mdx similarity index 99% rename from docs/en/14-reference/03-connector/csharp.mdx rename to docs/en/14-reference/03-connector/09-csharp.mdx index 388ae49d09..c745b8dd1a 100644 --- a/docs/en/14-reference/03-connector/csharp.mdx +++ b/docs/en/14-reference/03-connector/09-csharp.mdx @@ -1,6 +1,5 @@ --- toc_max_heading_level: 4 -sidebar_position: 7 sidebar_label: C# title: C# Connector --- diff --git a/docs/en/14-reference/03-connector/php.mdx b/docs/en/14-reference/03-connector/10-php.mdx similarity index 98% rename from docs/en/14-reference/03-connector/php.mdx rename to docs/en/14-reference/03-connector/10-php.mdx index 08cf34495f..820f703759 100644 --- a/docs/en/14-reference/03-connector/php.mdx +++ b/docs/en/14-reference/03-connector/10-php.mdx @@ -1,6 +1,5 @@ --- -sidebar_position: 1 -sidebar_label: PHP (community contribution) +sidebar_label: PHP title: PHP Connector --- diff --git a/docs/en/14-reference/03-connector/03-connector.mdx b/docs/en/14-reference/03-connector/index.mdx similarity index 100% rename from docs/en/14-reference/03-connector/03-connector.mdx rename to docs/en/14-reference/03-connector/index.mdx From 99490c063ee006c394527ade3ddaa4fa8c720d20 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Wed, 24 Aug 2022 17:36:10 +0800 Subject: [PATCH 28/79] feat: system table visible permission --- include/common/systable.h | 46 +- include/common/tmsg.h | 11 +- include/libs/command/command.h | 4 +- include/libs/nodes/plannodes.h | 1 + include/libs/parser/parser.h | 1 + include/libs/planner/planner.h | 1 + source/client/inc/clientInt.h | 15 +- source/client/src/clientImpl.c | 16 +- source/client/src/clientMain.c | 1 + source/client/src/clientMsgHandler.c | 1 + source/common/src/systable.c | 491 ++++++++++---------- source/common/src/tmsg.c | 2 + source/dnode/mnode/impl/inc/mndInfoSchema.h | 3 +- source/dnode/mnode/impl/src/mndDb.c | 2 +- source/dnode/mnode/impl/src/mndInfoSchema.c | 16 +- source/dnode/mnode/impl/src/mndProfile.c | 1 + source/dnode/mnode/impl/src/mndStb.c | 7 +- source/libs/command/src/command.c | 42 +- source/libs/executor/inc/executorimpl.h | 1 + source/libs/executor/src/scanoperator.c | 14 +- source/libs/nodes/src/nodesCloneFuncs.c | 1 + source/libs/nodes/src/nodesCodeFuncs.c | 8 +- source/libs/parser/src/parTranslater.c | 19 +- source/libs/planner/src/planPhysiCreater.c | 1 + 24 files changed, 386 insertions(+), 319 deletions(-) diff --git a/include/common/systable.h b/include/common/systable.h index ed2e6a46c3..01c9807627 100644 --- a/include/common/systable.h +++ b/include/common/systable.h @@ -22,27 +22,27 @@ extern "C" { #ifndef TDENGINE_SYSTABLE_H #define TDENGINE_SYSTABLE_H -#define TSDB_INFORMATION_SCHEMA_DB "information_schema" -#define TSDB_INS_TABLE_DNODES "ins_dnodes" -#define TSDB_INS_TABLE_MNODES "ins_mnodes" -#define TSDB_INS_TABLE_MODULES "ins_modules" -#define TSDB_INS_TABLE_QNODES "ins_qnodes" -#define TSDB_INS_TABLE_BNODES "ins_bnodes" -#define TSDB_INS_TABLE_SNODES "ins_snodes" -#define TSDB_INS_TABLE_CLUSTER "ins_cluster" -#define TSDB_INS_TABLE_DATABASES "ins_databases" -#define TSDB_INS_TABLE_FUNCTIONS "ins_functions" -#define TSDB_INS_TABLE_INDEXES "ins_indexes" -#define TSDB_INS_TABLE_STABLES "ins_stables" -#define TSDB_INS_TABLE_TABLES "ins_tables" -#define TSDB_INS_TABLE_TAGS "ins_tags" -#define TSDB_INS_TABLE_TABLE_DISTRIBUTED "ins_table_distributed" -#define TSDB_INS_TABLE_USERS "ins_users" -#define TSDB_INS_TABLE_LICENCES "ins_grants" -#define TSDB_INS_TABLE_VGROUPS "ins_vgroups" -#define TSDB_INS_TABLE_VNODES "ins_vnodes" -#define TSDB_INS_TABLE_CONFIGS "ins_configs" -#define TSDB_INS_TABLE_DNODE_VARIABLES "ins_dnode_variables" +#define TSDB_INFORMATION_SCHEMA_DB "information_schema" +#define TSDB_INS_TABLE_DNODES "ins_dnodes" +#define TSDB_INS_TABLE_MNODES "ins_mnodes" +#define TSDB_INS_TABLE_MODULES "ins_modules" +#define TSDB_INS_TABLE_QNODES "ins_qnodes" +#define TSDB_INS_TABLE_BNODES "ins_bnodes" +#define TSDB_INS_TABLE_SNODES "ins_snodes" +#define TSDB_INS_TABLE_CLUSTER "ins_cluster" +#define TSDB_INS_TABLE_DATABASES "ins_databases" +#define TSDB_INS_TABLE_FUNCTIONS "ins_functions" +#define TSDB_INS_TABLE_INDEXES "ins_indexes" +#define TSDB_INS_TABLE_STABLES "ins_stables" +#define TSDB_INS_TABLE_TABLES "ins_tables" +#define TSDB_INS_TABLE_TAGS "ins_tags" +#define TSDB_INS_TABLE_TABLE_DISTRIBUTED "ins_table_distributed" +#define TSDB_INS_TABLE_USERS "ins_users" +#define TSDB_INS_TABLE_LICENCES "ins_grants" +#define TSDB_INS_TABLE_VGROUPS "ins_vgroups" +#define TSDB_INS_TABLE_VNODES "ins_vnodes" +#define TSDB_INS_TABLE_CONFIGS "ins_configs" +#define TSDB_INS_TABLE_DNODE_VARIABLES "ins_dnode_variables" #define TSDB_PERFORMANCE_SCHEMA_DB "performance_schema" #define TSDB_PERFS_TABLE_SMAS "perf_smas" @@ -60,16 +60,20 @@ typedef struct SSysDbTableSchema { const char* name; const int32_t type; const int32_t bytes; + const bool sysInfo; } SSysDbTableSchema; typedef struct SSysTableMeta { const char* name; const SSysDbTableSchema* schema; const int32_t colNum; + const bool sysInfo; } SSysTableMeta; void getInfosDbMeta(const SSysTableMeta** pInfosTableMeta, size_t* size); void getPerfDbMeta(const SSysTableMeta** pPerfsTableMeta, size_t* size); +void getVisibleInfosTablesNum(bool sysInfo, size_t* size); +bool invisibleColumn(bool sysInfo, int8_t tableType, int8_t flags); #ifdef __cplusplus } diff --git a/include/common/tmsg.h b/include/common/tmsg.h index 8f199c72f7..bb72a6cfc1 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -292,10 +292,11 @@ int32_t tEncodeSSubmitRsp(SEncoder* pEncoder, const SSubmitRsp* pRsp); int32_t tDecodeSSubmitRsp(SDecoder* pDecoder, SSubmitRsp* pRsp); void tFreeSSubmitRsp(SSubmitRsp* pRsp); -#define COL_SMA_ON ((int8_t)0x1) -#define COL_IDX_ON ((int8_t)0x2) -#define COL_SET_NULL ((int8_t)0x10) -#define COL_SET_VAL ((int8_t)0x20) +#define COL_SMA_ON ((int8_t)0x1) +#define COL_IDX_ON ((int8_t)0x2) +#define COL_SET_NULL ((int8_t)0x10) +#define COL_SET_VAL ((int8_t)0x20) +#define COL_IS_SYSINFO ((int8_t)0x40) struct SSchema { int8_t type; int8_t flags; @@ -530,6 +531,7 @@ typedef struct { uint32_t connId; int32_t dnodeNum; int8_t superUser; + int8_t sysInfo; int8_t connType; SEpSet epSet; int32_t svrTimestamp; @@ -1253,6 +1255,7 @@ typedef struct { uint64_t suid; uint64_t tuid; int32_t vgId; + int8_t sysInfo; SSchema* pSchemas; } STableMetaRsp; diff --git a/include/libs/command/command.h b/include/libs/command/command.h index 8a4ecad37d..b3339a417b 100644 --- a/include/libs/command/command.h +++ b/include/libs/command/command.h @@ -17,12 +17,12 @@ #define TDENGINE_COMMAND_H #include "cmdnodes.h" -#include "tmsg.h" #include "plannodes.h" +#include "tmsg.h" typedef struct SExplainCtx SExplainCtx; -int32_t qExecCommand(SNode* pStmt, SRetrieveTableRsp** pRsp); +int32_t qExecCommand(bool sysInfoUser, SNode *pStmt, SRetrieveTableRsp **pRsp); int32_t qExecStaticExplain(SQueryPlan *pDag, SRetrieveTableRsp **pRsp); int32_t qExecExplainBegin(SQueryPlan *pDag, SExplainCtx **pCtx, int64_t startTs); diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h index 8661baceb2..6fd6a316eb 100644 --- a/include/libs/nodes/plannodes.h +++ b/include/libs/nodes/plannodes.h @@ -317,6 +317,7 @@ typedef struct SSystemTableScanPhysiNode { SEpSet mgmtEpSet; bool showRewrite; int32_t accountId; + bool sysInfo; } SSystemTableScanPhysiNode; typedef struct STableScanPhysiNode { diff --git a/include/libs/parser/parser.h b/include/libs/parser/parser.h index 717278d51d..95bde85864 100644 --- a/include/libs/parser/parser.h +++ b/include/libs/parser/parser.h @@ -49,6 +49,7 @@ typedef struct SParseContext { SStmtCallback* pStmtCb; const char* pUser; bool isSuperUser; + bool enableSysInfo; bool async; int8_t schemalessType; const char* svrVer; diff --git a/include/libs/planner/planner.h b/include/libs/planner/planner.h index d1a5c5db10..05caa7a7bb 100644 --- a/include/libs/planner/planner.h +++ b/include/libs/planner/planner.h @@ -38,6 +38,7 @@ typedef struct SPlanContext { char* pMsg; int32_t msgLen; const char* pUser; + bool sysInfo; } SPlanContext; // Create the physical plan for the query, according to the AST. diff --git a/source/client/inc/clientInt.h b/source/client/inc/clientInt.h index 855dfb15ee..b9c1be5987 100644 --- a/source/client/inc/clientInt.h +++ b/source/client/inc/clientInt.h @@ -95,15 +95,15 @@ typedef struct { } SClientHbMgr; typedef struct SQueryExecMetric { - int64_t start; // start timestamp, us + int64_t start; // start timestamp, us int64_t syntaxStart; // start to parse, us - int64_t syntaxEnd; // end to parse, us - int64_t ctgStart; // start to parse, us - int64_t ctgEnd; // end to parse, us + int64_t syntaxEnd; // end to parse, us + int64_t ctgStart; // start to parse, us + int64_t ctgEnd; // end to parse, us int64_t semanticEnd; int64_t execEnd; - int64_t send; // start to send to server, us - int64_t rsp; // receive response from server, us + int64_t send; // start to send to server, us + int64_t rsp; // receive response from server, us } SQueryExecMetric; struct SAppInstInfo { @@ -137,6 +137,7 @@ typedef struct STscObj { char db[TSDB_DB_FNAME_LEN]; char sVer[TSDB_VERSION_LEN]; char sDetailVer[128]; + int8_t sysInfo; int8_t connType; int32_t acctId; uint32_t connId; @@ -257,7 +258,7 @@ SRequestObj* execQuery(uint64_t connId, const char* sql, int sqlLen, bool valida TAOS_RES* taosQueryImpl(TAOS* taos, const char* sql, bool validateOnly); void taosAsyncQueryImpl(uint64_t connId, const char* sql, __taos_async_fn_t fp, void* param, bool validateOnly); -int32_t getVersion1BlockMetaSize(const char* p, int32_t numOfCols); +int32_t getVersion1BlockMetaSize(const char* p, int32_t numOfCols); static FORCE_INLINE SReqResultInfo* tmqGetCurResInfo(TAOS_RES* res) { SMqRspObj* msg = (SMqRspObj*)res; diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 998b9cee5c..35c09f8357 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -215,6 +215,7 @@ int32_t parseSql(SRequestObj* pRequest, bool topicQuery, SQuery** pQuery, SStmtC .pUser = pTscObj->user, .schemalessType = pTscObj->schemalessType, .isSuperUser = (0 == strcmp(pTscObj->user, TSDB_DEFAULT_USER)), + .enableSysInfo = pTscObj->sysInfo, .svrVer = pTscObj->sVer, .nodeOffline = (pTscObj->pAppInfo->onlineDnodes < pTscObj->pAppInfo->totalDnodes)}; @@ -246,7 +247,7 @@ int32_t parseSql(SRequestObj* pRequest, bool topicQuery, SQuery** pQuery, SStmtC int32_t execLocalCmd(SRequestObj* pRequest, SQuery* pQuery) { SRetrieveTableRsp* pRsp = NULL; - int32_t code = qExecCommand(pQuery->pRoot, &pRsp); + int32_t code = qExecCommand(pRequest->pTscObj->sysInfo, pQuery->pRoot, &pRsp); if (TSDB_CODE_SUCCESS == code && NULL != pRsp) { code = setQueryResultFromRsp(&pRequest->body.resInfo, pRsp, false, true); } @@ -284,7 +285,7 @@ void asyncExecLocalCmd(SRequestObj* pRequest, SQuery* pQuery) { return; } - int32_t code = qExecCommand(pQuery->pRoot, &pRsp); + int32_t code = qExecCommand(pRequest->pTscObj->sysInfo, pQuery->pRoot, &pRsp); if (TSDB_CODE_SUCCESS == code && NULL != pRsp) { code = setQueryResultFromRsp(&pRequest->body.resInfo, pRsp, false, true); } @@ -419,7 +420,8 @@ int32_t getPlan(SRequestObj* pRequest, SQuery* pQuery, SQueryPlan** pPlan, SArra .showRewrite = pQuery->showRewrite, .pMsg = pRequest->msgBuf, .msgLen = ERROR_MSG_BUF_DEFAULT_SIZE, - .pUser = pRequest->pTscObj->user}; + .pUser = pRequest->pTscObj->user, + .sysInfo = pRequest->pTscObj->sysInfo}; return qCreateQueryPlan(&cxt, pPlan, pNodeList); } @@ -992,7 +994,8 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultM .showRewrite = pQuery->showRewrite, .pMsg = pRequest->msgBuf, .msgLen = ERROR_MSG_BUF_DEFAULT_SIZE, - .pUser = pRequest->pTscObj->user}; + .pUser = pRequest->pTscObj->user, + .sysInfo = pRequest->pTscObj->sysInfo}; SAppInstInfo* pAppInfo = getAppInfo(pRequest); SQueryPlan* pDag = NULL; @@ -1577,10 +1580,11 @@ static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int } int32_t getVersion1BlockMetaSize(const char* p, int32_t numOfCols) { - int32_t cols = *(int32_t*) (p + sizeof(int32_t) * 3); + int32_t cols = *(int32_t*)(p + sizeof(int32_t) * 3); ASSERT(numOfCols == cols); - return sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t)*3 + sizeof(uint64_t) + numOfCols * (sizeof(int8_t) + sizeof(int32_t)); + return sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) * 3 + sizeof(uint64_t) + + numOfCols * (sizeof(int8_t) + sizeof(int32_t)); } static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, int32_t numOfRows) { diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index 31ae443d5b..9ceb6e0683 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -759,6 +759,7 @@ int32_t createParseContext(const SRequestObj *pRequest, SParseContext **pCxt) { .pUser = pTscObj->user, .schemalessType = pTscObj->schemalessType, .isSuperUser = (0 == strcmp(pTscObj->user, TSDB_DEFAULT_USER)), + .enableSysInfo = pTscObj->sysInfo, .async = true, .svrVer = pTscObj->sVer, .nodeOffline = (pTscObj->pAppInfo->onlineDnodes < pTscObj->pAppInfo->totalDnodes)}; diff --git a/source/client/src/clientMsgHandler.c b/source/client/src/clientMsgHandler.c index 0c4cf23c4e..023ac96844 100644 --- a/source/client/src/clientMsgHandler.c +++ b/source/client/src/clientMsgHandler.c @@ -96,6 +96,7 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) { connectRsp.epSet.eps[i].fqdn, connectRsp.epSet.eps[i].port, pTscObj->id); } + pTscObj->sysInfo = connectRsp.sysInfo; pTscObj->connId = connectRsp.connId; pTscObj->acctId = connectRsp.acctId; tstrncpy(pTscObj->sVer, connectRsp.sVer, tListLen(pTscObj->sVer)); diff --git a/source/common/src/systable.c b/source/common/src/systable.c index 68a77a9f33..0465f1f3f4 100644 --- a/source/common/src/systable.c +++ b/source/common/src/systable.c @@ -15,343 +15,345 @@ #include "systable.h" #include "taos.h" +#include "taosdef.h" #include "tdef.h" #include "tgrant.h" +#include "tmsg.h" #include "types.h" #define SYSTABLE_SCH_TABLE_NAME_LEN ((TSDB_TABLE_NAME_LEN - 1) + VARSTR_HEADER_SIZE) #define SYSTABLE_SCH_DB_NAME_LEN ((TSDB_DB_NAME_LEN - 1) + VARSTR_HEADER_SIZE) #define SYSTABLE_SCH_COL_NAME_LEN ((TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE) +// clang-format off static const SSysDbTableSchema dnodesSchema[] = { - {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "vnodes", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT}, - {.name = "support_vnodes", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT}, - {.name = "status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, - {.name = "note", .bytes = 256 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "vnodes", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT, .sysInfo = true}, + {.name = "support_vnodes", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT, .sysInfo = true}, + {.name = "status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = true}, + {.name = "note", .bytes = 256 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, }; static const SSysDbTableSchema mnodesSchema[] = { - {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "role", .bytes = 12 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "status", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, + {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "role", .bytes = 12 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "status", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = true}, }; static const SSysDbTableSchema modulesSchema[] = { - {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "endpoint", .bytes = 134 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "module", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "endpoint", .bytes = 134 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "module", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, }; static const SSysDbTableSchema qnodesSchema[] = { - {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, + {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = true}, }; static const SSysDbTableSchema snodesSchema[] = { - {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, + {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = true}, }; static const SSysDbTableSchema bnodesSchema[] = { - {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, + {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = true}, }; static const SSysDbTableSchema clusterSchema[] = { - {.name = "id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, - {.name = "name", .bytes = TSDB_CLUSTER_ID_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "uptime", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, + {.name = "id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = true}, + {.name = "name", .bytes = TSDB_CLUSTER_ID_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "uptime", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = true}, }; static const SSysDbTableSchema userDBSchema[] = { - {.name = "name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, - {.name = "vgroups", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "ntables", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, - {.name = "replica", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT}, - {.name = "strict", .bytes = TSDB_DB_STRICT_STR_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "duration", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "keep", .bytes = 32 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "buffer", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "pagesize", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "pages", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "minrows", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "maxrows", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "comp", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT}, - {.name = "precision", .bytes = 2 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "retentions", .bytes = 60 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "single_stable", .bytes = 1, .type = TSDB_DATA_TYPE_BOOL}, - {.name = "cachemodel", .bytes = TSDB_CACHE_MODEL_STR_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "cachesize", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "wal_level", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT}, - {.name = "wal_fsync_period", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "wal_retention_period", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "wal_retention_size", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, - {.name = "wal_roll_period", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "wal_segment_size", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, + {.name = "name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, + {.name = "vgroups", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "ntables", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false}, + {.name = "replica", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = true}, + {.name = "strict", .bytes = TSDB_DB_STRICT_STR_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "duration", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "keep", .bytes = 32 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "buffer", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "pagesize", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "pages", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "minrows", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "maxrows", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "comp", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = true}, + {.name = "precision", .bytes = 2 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "retentions", .bytes = 60 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "single_stable", .bytes = 1, .type = TSDB_DATA_TYPE_BOOL, .sysInfo = true}, + {.name = "cachemodel", .bytes = TSDB_CACHE_MODEL_STR_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "cachesize", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "wal_level", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = true}, + {.name = "wal_fsync_period", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "wal_retention_period", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "wal_retention_size", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = true}, + {.name = "wal_roll_period", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "wal_segment_size", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = true}, }; static const SSysDbTableSchema userFuncSchema[] = { - {.name = "name", .bytes = TSDB_FUNC_NAME_LEN - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "comment", .bytes = PATH_MAX - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "aggregate", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "output_type", .bytes = TSDB_TYPE_STR_MAX_LEN - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, - {.name = "code_len", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "bufsize", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, + {.name = "name", .bytes = TSDB_FUNC_NAME_LEN - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "comment", .bytes = PATH_MAX - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "aggregate", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, + {.name = "output_type", .bytes = TSDB_TYPE_STR_MAX_LEN - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, + {.name = "code_len", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, + {.name = "bufsize", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, }; static const SSysDbTableSchema userIdxSchema[] = { - {.name = "index_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "table_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, + {.name = "index_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "table_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, }; static const SSysDbTableSchema userStbsSchema[] = { - {.name = "stable_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, - {.name = "columns", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "tags", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "last_update", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, - {.name = "table_comment", .bytes = TSDB_TB_COMMENT_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "watermark", .bytes = 64 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "max_delay", .bytes = 64 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "rollup", .bytes = 128 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "stable_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, + {.name = "columns", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, + {.name = "tags", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, + {.name = "last_update", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, + {.name = "table_comment", .bytes = TSDB_TB_COMMENT_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "watermark", .bytes = 64 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "max_delay", .bytes = 64 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "rollup", .bytes = 128 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, }; static const SSysDbTableSchema streamSchema[] = { - {.name = "stream_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, - {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "source_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "target_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "target_table", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "watermark", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, - {.name = "trigger", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "stream_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, + {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "source_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "target_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "target_table", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "watermark", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false}, + {.name = "trigger", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, }; static const SSysDbTableSchema userTblsSchema[] = { - {.name = "table_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, - {.name = "columns", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "stable_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "uid", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, - {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "ttl", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "table_comment", .bytes = TSDB_TB_COMMENT_LEN - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "type", .bytes = 21 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "table_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, + {.name = "columns", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, + {.name = "stable_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "uid", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false}, + {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "ttl", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, + {.name = "table_comment", .bytes = TSDB_TB_COMMENT_LEN - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "type", .bytes = 21 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, }; static const SSysDbTableSchema userTagsSchema[] = { - {.name = "table_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "stable_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "tag_name", .bytes = TSDB_COL_NAME_LEN - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "tag_type", .bytes = 32 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "tag_value", .bytes = TSDB_MAX_TAGS_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "table_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "stable_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "tag_name", .bytes = TSDB_COL_NAME_LEN - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "tag_type", .bytes = 32 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "tag_value", .bytes = TSDB_MAX_TAGS_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, }; static const SSysDbTableSchema userTblDistSchema[] = { - {.name = "db_name", .bytes = 32 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "table_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "distributed_histogram", .bytes = 500 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "min_of_rows", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "max_of_rows", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "avg_of_rows", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "stddev_of_rows", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "rows", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, - {.name = "blocks", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "storage_size", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, - {.name = "compression_ratio", .bytes = 8, .type = TSDB_DATA_TYPE_DOUBLE}, - {.name = "rows_in_mem", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "seek_header_time", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, + {.name = "db_name", .bytes = 32 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "table_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "distributed_histogram", .bytes = 500 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "min_of_rows", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "max_of_rows", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "avg_of_rows", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "stddev_of_rows", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "rows", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = true}, + {.name = "blocks", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "storage_size", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = true}, + {.name = "compression_ratio", .bytes = 8, .type = TSDB_DATA_TYPE_DOUBLE, .sysInfo = true}, + {.name = "rows_in_mem", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "seek_header_time", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, }; static const SSysDbTableSchema userUsersSchema[] = { - {.name = "name", .bytes = TSDB_USER_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "super", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT}, - {.name = "enable", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT}, - {.name = "sysinfo", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT}, - {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, + {.name = "name", .bytes = TSDB_USER_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "super", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = false}, + {.name = "enable", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = false}, + {.name = "sysinfo", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = false}, + {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, }; GRANTS_SCHEMA; static const SSysDbTableSchema vgroupsSchema[] = { - {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "tables", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "v1_dnode", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "v1_status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "v2_dnode", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "v2_status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "v3_dnode", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "v3_status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "status", .bytes = 12 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "nfiles", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "file_size", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "tsma", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT}, + {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "tables", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "v1_dnode", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "v1_status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "v2_dnode", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "v2_status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "v3_dnode", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "v3_status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "status", .bytes = 12 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "nfiles", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "file_size", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "tsma", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = true}, }; static const SSysDbTableSchema smaSchema[] = { - {.name = "sma_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, - {.name = "stable_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, + {.name = "sma_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, + {.name = "stable_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, }; static const SSysDbTableSchema transSchema[] = { - {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, - {.name = "stage", .bytes = TSDB_TRANS_STAGE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "db1", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "db2", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "failed_times", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "last_exec_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, - {.name = "last_action_info", - .bytes = (TSDB_TRANS_ERROR_LEN - 1) + VARSTR_HEADER_SIZE, - .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, + {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, + {.name = "stage", .bytes = TSDB_TRANS_STAGE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "db1", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "db2", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "failed_times", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, + {.name = "last_exec_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, + {.name = "last_action_info", .bytes = (TSDB_TRANS_ERROR_LEN - 1) + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, }; static const SSysDbTableSchema configSchema[] = { - {.name = "name", .bytes = TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "value", .bytes = TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "name", .bytes = TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "value", .bytes = TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, }; static const SSysDbTableSchema variablesSchema[] = { {.name = "dnode_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "name", .bytes = TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "value", .bytes = TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "name", .bytes = TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "value", .bytes = TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, }; static const SSysTableMeta infosMeta[] = { - {TSDB_INS_TABLE_DNODES, dnodesSchema, tListLen(dnodesSchema)}, - {TSDB_INS_TABLE_MNODES, mnodesSchema, tListLen(mnodesSchema)}, - {TSDB_INS_TABLE_MODULES, modulesSchema, tListLen(modulesSchema)}, - {TSDB_INS_TABLE_QNODES, qnodesSchema, tListLen(qnodesSchema)}, + {TSDB_INS_TABLE_DNODES, dnodesSchema, tListLen(dnodesSchema), true}, + {TSDB_INS_TABLE_MNODES, mnodesSchema, tListLen(mnodesSchema), true}, + {TSDB_INS_TABLE_MODULES, modulesSchema, tListLen(modulesSchema), true}, + {TSDB_INS_TABLE_QNODES, qnodesSchema, tListLen(qnodesSchema), true}, // {TSDB_INS_TABLE_SNODES, snodesSchema, tListLen(snodesSchema)}, // {TSDB_INS_TABLE_BNODES, bnodesSchema, tListLen(bnodesSchema)}, - {TSDB_INS_TABLE_CLUSTER, clusterSchema, tListLen(clusterSchema)}, - {TSDB_INS_TABLE_DATABASES, userDBSchema, tListLen(userDBSchema)}, - {TSDB_INS_TABLE_FUNCTIONS, userFuncSchema, tListLen(userFuncSchema)}, - {TSDB_INS_TABLE_INDEXES, userIdxSchema, tListLen(userIdxSchema)}, - {TSDB_INS_TABLE_STABLES, userStbsSchema, tListLen(userStbsSchema)}, - {TSDB_INS_TABLE_TABLES, userTblsSchema, tListLen(userTblsSchema)}, - {TSDB_INS_TABLE_TAGS, userTagsSchema, tListLen(userTagsSchema)}, + {TSDB_INS_TABLE_CLUSTER, clusterSchema, tListLen(clusterSchema), true}, + {TSDB_INS_TABLE_DATABASES, userDBSchema, tListLen(userDBSchema), false}, + {TSDB_INS_TABLE_FUNCTIONS, userFuncSchema, tListLen(userFuncSchema), false}, + {TSDB_INS_TABLE_INDEXES, userIdxSchema, tListLen(userIdxSchema), false}, + {TSDB_INS_TABLE_STABLES, userStbsSchema, tListLen(userStbsSchema), false}, + {TSDB_INS_TABLE_TABLES, userTblsSchema, tListLen(userTblsSchema), false}, + {TSDB_INS_TABLE_TAGS, userTagsSchema, tListLen(userTagsSchema), false}, // {TSDB_INS_TABLE_TABLE_DISTRIBUTED, userTblDistSchema, tListLen(userTblDistSchema)}, - {TSDB_INS_TABLE_USERS, userUsersSchema, tListLen(userUsersSchema)}, - {TSDB_INS_TABLE_LICENCES, grantsSchema, tListLen(grantsSchema)}, - {TSDB_INS_TABLE_VGROUPS, vgroupsSchema, tListLen(vgroupsSchema)}, - {TSDB_INS_TABLE_CONFIGS, configSchema, tListLen(configSchema)}, - {TSDB_INS_TABLE_DNODE_VARIABLES, variablesSchema, tListLen(variablesSchema)}, + {TSDB_INS_TABLE_USERS, userUsersSchema, tListLen(userUsersSchema), false}, + {TSDB_INS_TABLE_LICENCES, grantsSchema, tListLen(grantsSchema), true}, + {TSDB_INS_TABLE_VGROUPS, vgroupsSchema, tListLen(vgroupsSchema), true}, + {TSDB_INS_TABLE_CONFIGS, configSchema, tListLen(configSchema), true}, + {TSDB_INS_TABLE_DNODE_VARIABLES, variablesSchema, tListLen(variablesSchema), true}, }; static const SSysDbTableSchema connectionsSchema[] = { - {.name = "conn_id", .bytes = 4, .type = TSDB_DATA_TYPE_UINT}, - {.name = "user", .bytes = TSDB_USER_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}, - {.name = "app", .bytes = TSDB_APP_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}, - {.name = "pid", .bytes = 4, .type = TSDB_DATA_TYPE_UINT}, - {.name = "end_point", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}, - {.name = "login_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, - {.name = "last_access", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, + {.name = "conn_id", .bytes = 4, .type = TSDB_DATA_TYPE_UINT, .sysInfo = false}, + {.name = "user", .bytes = TSDB_USER_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, + {.name = "app", .bytes = TSDB_APP_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, + {.name = "pid", .bytes = 4, .type = TSDB_DATA_TYPE_UINT, .sysInfo = false}, + {.name = "end_point", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, + {.name = "login_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, + {.name = "last_access", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, }; static const SSysDbTableSchema topicSchema[] = { - {.name = "topic_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY}, - {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY}, - {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, - {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}, + {.name = "topic_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, + {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, + {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, + {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, // TODO config }; static const SSysDbTableSchema consumerSchema[] = { - {.name = "consumer_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, - {.name = "consumer_group", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY}, - {.name = "client_id", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY}, - {.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}, - {.name = "topics", .bytes = TSDB_TOPIC_FNAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}, - /*{.name = "end_point", .bytes = TSDB_IPv4ADDR_LEN + 6 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},*/ - {.name = "up_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, - {.name = "subscribe_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, - {.name = "rebalance_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, + {.name = "consumer_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false}, + {.name = "consumer_group", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, + {.name = "client_id", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, + {.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, + {.name = "topics", .bytes = TSDB_TOPIC_FNAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, + /*{.name = "end_point", .bytes = TSDB_IPv4ADDR_LEN + 6 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},*/ + {.name = "up_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, + {.name = "subscribe_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, + {.name = "rebalance_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, }; static const SSysDbTableSchema subscriptionSchema[] = { - {.name = "topic_name", .bytes = TSDB_TOPIC_FNAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}, - {.name = "consumer_group", .bytes = TSDB_CGROUP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}, - {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "consumer_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, + {.name = "topic_name", .bytes = TSDB_TOPIC_FNAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, + {.name = "consumer_group", .bytes = TSDB_CGROUP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, + {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, + {.name = "consumer_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false}, }; static const SSysDbTableSchema offsetSchema[] = { - {.name = "topic_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY}, - {.name = "group_id", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY}, - {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "committed_offset", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, - {.name = "current_offset", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, - {.name = "skip_log_cnt", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, + {.name = "topic_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, + {.name = "group_id", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, + {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, + {.name = "committed_offset", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false}, + {.name = "current_offset", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false}, + {.name = "skip_log_cnt", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false}, }; static const SSysDbTableSchema querySchema[] = { - {.name = "kill_id", .bytes = TSDB_QUERY_ID_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "query_id", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT}, - {.name = "conn_id", .bytes = 4, .type = TSDB_DATA_TYPE_UINT}, - {.name = "app", .bytes = TSDB_APP_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "pid", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "user", .bytes = TSDB_USER_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "end_point", .bytes = TSDB_IPv4ADDR_LEN + 6 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, - {.name = "exec_usec", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, - {.name = "stable_query", .bytes = 1, .type = TSDB_DATA_TYPE_BOOL}, - {.name = "sub_num", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "sub_status", .bytes = TSDB_SHOW_SUBQUERY_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "kill_id", .bytes = TSDB_QUERY_ID_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "query_id", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT, .sysInfo = false}, + {.name = "conn_id", .bytes = 4, .type = TSDB_DATA_TYPE_UINT, .sysInfo = false}, + {.name = "app", .bytes = TSDB_APP_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "pid", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, + {.name = "user", .bytes = TSDB_USER_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "end_point", .bytes = TSDB_IPv4ADDR_LEN + 6 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, + {.name = "exec_usec", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false}, + {.name = "stable_query", .bytes = 1, .type = TSDB_DATA_TYPE_BOOL, .sysInfo = false}, + {.name = "sub_num", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, + {.name = "sub_status", .bytes = TSDB_SHOW_SUBQUERY_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, }; static const SSysDbTableSchema appSchema[] = { - {.name = "app_id", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT}, - {.name = "ip", .bytes = TSDB_IPv4ADDR_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "pid", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "name", .bytes = TSDB_APP_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "start_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, - {.name = "insert_req", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT}, - {.name = "insert_row", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT}, - {.name = "insert_time", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT}, - {.name = "insert_bytes", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT}, - {.name = "fetch_bytes", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT}, - {.name = "query_time", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT}, - {.name = "slow_query", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT}, - {.name = "total_req", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT}, - {.name = "current_req", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT}, - {.name = "last_access", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, + {.name = "app_id", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT, .sysInfo = false}, + {.name = "ip", .bytes = TSDB_IPv4ADDR_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "pid", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, + {.name = "name", .bytes = TSDB_APP_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "start_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, + {.name = "insert_req", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT, .sysInfo = false}, + {.name = "insert_row", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT, .sysInfo = false}, + {.name = "insert_time", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT, .sysInfo = false}, + {.name = "insert_bytes", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT, .sysInfo = false}, + {.name = "fetch_bytes", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT, .sysInfo = false}, + {.name = "query_time", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT, .sysInfo = false}, + {.name = "slow_query", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT, .sysInfo = false}, + {.name = "total_req", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT, .sysInfo = false}, + {.name = "current_req", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT, .sysInfo = false}, + {.name = "last_access", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, }; static const SSysTableMeta perfsMeta[] = { - {TSDB_PERFS_TABLE_CONNECTIONS, connectionsSchema, tListLen(connectionsSchema)}, - {TSDB_PERFS_TABLE_QUERIES, querySchema, tListLen(querySchema)}, - {TSDB_PERFS_TABLE_TOPICS, topicSchema, tListLen(topicSchema)}, - {TSDB_PERFS_TABLE_CONSUMERS, consumerSchema, tListLen(consumerSchema)}, - {TSDB_PERFS_TABLE_SUBSCRIPTIONS, subscriptionSchema, tListLen(subscriptionSchema)}, + {TSDB_PERFS_TABLE_CONNECTIONS, connectionsSchema, tListLen(connectionsSchema), false}, + {TSDB_PERFS_TABLE_QUERIES, querySchema, tListLen(querySchema), false}, + {TSDB_PERFS_TABLE_TOPICS, topicSchema, tListLen(topicSchema), false}, + {TSDB_PERFS_TABLE_CONSUMERS, consumerSchema, tListLen(consumerSchema), false}, + {TSDB_PERFS_TABLE_SUBSCRIPTIONS, subscriptionSchema, tListLen(subscriptionSchema), false}, // {TSDB_PERFS_TABLE_OFFSETS, offsetSchema, tListLen(offsetSchema)}, - {TSDB_PERFS_TABLE_TRANS, transSchema, tListLen(transSchema)}, - {TSDB_PERFS_TABLE_SMAS, smaSchema, tListLen(smaSchema)}, - {TSDB_PERFS_TABLE_STREAMS, streamSchema, tListLen(streamSchema)}, - {TSDB_PERFS_TABLE_APPS, appSchema, tListLen(appSchema)}}; + {TSDB_PERFS_TABLE_TRANS, transSchema, tListLen(transSchema), false}, + {TSDB_PERFS_TABLE_SMAS, smaSchema, tListLen(smaSchema), false}, + {TSDB_PERFS_TABLE_STREAMS, streamSchema, tListLen(streamSchema), false}, + {TSDB_PERFS_TABLE_APPS, appSchema, tListLen(appSchema), false}}; +// clang-format on void getInfosDbMeta(const SSysTableMeta** pInfosTableMeta, size_t* size) { if (pInfosTableMeta) { @@ -370,3 +372,26 @@ void getPerfDbMeta(const SSysTableMeta** pPerfsTableMeta, size_t* size) { *size = tListLen(perfsMeta); } } + +void getVisibleInfosTablesNum(bool sysInfo, size_t* size) { + if (sysInfo) { + getInfosDbMeta(NULL, size); + return; + } + *size = 0; + const SSysTableMeta* pMeta = NULL; + size_t totalNum = 0; + getInfosDbMeta(&pMeta, &totalNum); + for (size_t i = 0; i < totalNum; ++i) { + if (!pMeta[i].sysInfo) { + ++(*size); + } + } +} + +bool invisibleColumn(bool sysInfo, int8_t tableType, int8_t flags) { + if (sysInfo || TSDB_SYSTEM_TABLE != tableType) { + return false; + } + return 0 != (flags & COL_IS_SYSINFO); +} diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index 533d924546..59c9a69bb6 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -3630,6 +3630,7 @@ int32_t tSerializeSConnectRsp(void *buf, int32_t bufLen, SConnectRsp *pRsp) { if (tEncodeU32(&encoder, pRsp->connId) < 0) return -1; if (tEncodeI32(&encoder, pRsp->dnodeNum) < 0) return -1; if (tEncodeI8(&encoder, pRsp->superUser) < 0) return -1; + if (tEncodeI8(&encoder, pRsp->sysInfo) < 0) return -1; if (tEncodeI8(&encoder, pRsp->connType) < 0) return -1; if (tEncodeSEpSet(&encoder, &pRsp->epSet) < 0) return -1; if (tEncodeI32(&encoder, pRsp->svrTimestamp) < 0) return -1; @@ -3652,6 +3653,7 @@ int32_t tDeserializeSConnectRsp(void *buf, int32_t bufLen, SConnectRsp *pRsp) { if (tDecodeU32(&decoder, &pRsp->connId) < 0) return -1; if (tDecodeI32(&decoder, &pRsp->dnodeNum) < 0) return -1; if (tDecodeI8(&decoder, &pRsp->superUser) < 0) return -1; + if (tDecodeI8(&decoder, &pRsp->sysInfo) < 0) return -1; if (tDecodeI8(&decoder, &pRsp->connType) < 0) return -1; if (tDecodeSEpSet(&decoder, &pRsp->epSet) < 0) return -1; if (tDecodeI32(&decoder, &pRsp->svrTimestamp) < 0) return -1; diff --git a/source/dnode/mnode/impl/inc/mndInfoSchema.h b/source/dnode/mnode/impl/inc/mndInfoSchema.h index b10d92ee3d..4f98465cd1 100644 --- a/source/dnode/mnode/impl/inc/mndInfoSchema.h +++ b/source/dnode/mnode/impl/inc/mndInfoSchema.h @@ -24,7 +24,8 @@ extern "C" { int32_t mndInitInfos(SMnode *pMnode); void mndCleanupInfos(SMnode *pMnode); -int32_t mndBuildInsTableSchema(SMnode *pMnode, const char *dbFName, const char *tbName, STableMetaRsp *pRsp); +int32_t mndBuildInsTableSchema(SMnode *pMnode, const char *dbFName, const char *tbName, bool sysinfo, + STableMetaRsp *pRsp); int32_t mndBuildInsTableCfg(SMnode *pMnode, const char *dbFName, const char *tbName, STableCfgRsp *pRsp); #ifdef __cplusplus diff --git a/source/dnode/mnode/impl/src/mndDb.c b/source/dnode/mnode/impl/src/mndDb.c index 853ace79fd..8c1c3ba873 100644 --- a/source/dnode/mnode/impl/src/mndDb.c +++ b/source/dnode/mnode/impl/src/mndDb.c @@ -1731,7 +1731,7 @@ static int32_t mndRetrieveDbs(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBloc SDbObj infoschemaDb = {0}; setInformationSchemaDbCfg(&infoschemaDb); size_t numOfTables = 0; - getInfosDbMeta(NULL, &numOfTables); + getVisibleInfosTablesNum(sysinfo, &numOfTables); mndDumpDbInfoData(pMnode, pBlock, &infoschemaDb, pShow, numOfRows, numOfTables, true, 0, 1); numOfRows += 1; diff --git a/source/dnode/mnode/impl/src/mndInfoSchema.c b/source/dnode/mnode/impl/src/mndInfoSchema.c index bf33cf603f..09172115f8 100644 --- a/source/dnode/mnode/impl/src/mndInfoSchema.c +++ b/source/dnode/mnode/impl/src/mndInfoSchema.c @@ -14,8 +14,8 @@ */ #define _DEFAULT_SOURCE -#include "systable.h" #include "mndInt.h" +#include "systable.h" static int32_t mndInitInfosTableSchema(const SSysDbTableSchema *pSrc, int32_t colNum, SSchema **pDst) { SSchema *schema = taosMemoryCalloc(colNum, sizeof(SSchema)); @@ -29,6 +29,9 @@ static int32_t mndInitInfosTableSchema(const SSysDbTableSchema *pSrc, int32_t co schema[i].type = pSrc[i].type; schema[i].colId = i + 1; schema[i].bytes = pSrc[i].bytes; + if (pSrc[i].sysInfo) { + schema[i].flags |= COL_IS_SYSINFO; + } } *pDst = schema; @@ -43,13 +46,14 @@ static int32_t mndInsInitMeta(SHashObj *hash) { meta.sversion = 1; meta.tversion = 1; - size_t size = 0; - const SSysTableMeta* pInfosTableMeta = NULL; + size_t size = 0; + const SSysTableMeta *pInfosTableMeta = NULL; getInfosDbMeta(&pInfosTableMeta, &size); for (int32_t i = 0; i < size; ++i) { tstrncpy(meta.tbName, pInfosTableMeta[i].name, sizeof(meta.tbName)); meta.numOfColumns = pInfosTableMeta[i].colNum; + meta.sysInfo = pInfosTableMeta[i].sysInfo; if (mndInitInfosTableSchema(pInfosTableMeta[i].schema, pInfosTableMeta[i].colNum, &meta.pSchemas)) { return -1; @@ -64,14 +68,15 @@ static int32_t mndInsInitMeta(SHashObj *hash) { return 0; } -int32_t mndBuildInsTableSchema(SMnode *pMnode, const char *dbFName, const char *tbName, STableMetaRsp *pRsp) { +int32_t mndBuildInsTableSchema(SMnode *pMnode, const char *dbFName, const char *tbName, bool sysinfo, + STableMetaRsp *pRsp) { if (NULL == pMnode->infosMeta) { terrno = TSDB_CODE_APP_NOT_READY; return -1; } STableMetaRsp *pMeta = taosHashGet(pMnode->infosMeta, tbName, strlen(tbName)); - if (NULL == pMeta) { + if (NULL == pMeta || (!sysinfo && pMeta->sysInfo)) { mError("invalid information schema table name:%s", tbName); terrno = TSDB_CODE_MND_INVALID_SYS_TABLENAME; return -1; @@ -121,7 +126,6 @@ int32_t mndBuildInsTableCfg(SMnode *pMnode, const char *dbFName, const char *tbN return 0; } - int32_t mndInitInfos(SMnode *pMnode) { pMnode->infosMeta = taosHashInit(20, taosGetDefaultHashFunction(TSDB_DATA_TYPE_VARCHAR), false, HASH_NO_LOCK); if (pMnode->infosMeta == NULL) { diff --git a/source/dnode/mnode/impl/src/mndProfile.c b/source/dnode/mnode/impl/src/mndProfile.c index e55c562e38..e8737e30c9 100644 --- a/source/dnode/mnode/impl/src/mndProfile.c +++ b/source/dnode/mnode/impl/src/mndProfile.c @@ -270,6 +270,7 @@ static int32_t mndProcessConnectReq(SRpcMsg *pReq) { SConnectRsp connectRsp = {0}; connectRsp.acctId = pUser->acctId; connectRsp.superUser = pUser->superUser; + connectRsp.sysInfo = pUser->sysInfo; connectRsp.clusterId = pMnode->clusterId; connectRsp.connId = pConn->id; connectRsp.connType = connReq.connType; diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c index ebec3d5ea6..6359aef3f5 100644 --- a/source/dnode/mnode/impl/src/mndStb.c +++ b/source/dnode/mnode/impl/src/mndStb.c @@ -2157,6 +2157,10 @@ static int32_t mndProcessTableMetaReq(SRpcMsg *pReq) { STableInfoReq infoReq = {0}; STableMetaRsp metaRsp = {0}; + SUserObj *pUser = mndAcquireUser(pMnode, pReq->info.conn.user); + if (pUser == NULL) return 0; + bool sysinfo = pUser->sysInfo; + if (tDeserializeSTableInfoReq(pReq->pCont, pReq->contLen, &infoReq) != 0) { terrno = TSDB_CODE_INVALID_MSG; goto _OVER; @@ -2164,7 +2168,7 @@ static int32_t mndProcessTableMetaReq(SRpcMsg *pReq) { if (0 == strcmp(infoReq.dbFName, TSDB_INFORMATION_SCHEMA_DB)) { mDebug("information_schema table:%s.%s, start to retrieve meta", infoReq.dbFName, infoReq.tbName); - if (mndBuildInsTableSchema(pMnode, infoReq.dbFName, infoReq.tbName, &metaRsp) != 0) { + if (mndBuildInsTableSchema(pMnode, infoReq.dbFName, infoReq.tbName, sysinfo, &metaRsp) != 0) { goto _OVER; } } else if (0 == strcmp(infoReq.dbFName, TSDB_PERFORMANCE_SCHEMA_DB)) { @@ -2203,6 +2207,7 @@ _OVER: mError("stb:%s.%s, failed to retrieve meta since %s", infoReq.dbFName, infoReq.tbName, terrstr()); } + mndReleaseUser(pMnode, pUser); tFreeSTableMetaRsp(&metaRsp); return code; } diff --git a/source/libs/command/src/command.c b/source/libs/command/src/command.c index 1b2489acd6..7d259fe06c 100644 --- a/source/libs/command/src/command.c +++ b/source/libs/command/src/command.c @@ -17,6 +17,7 @@ #include "catalog.h" #include "commandInt.h" #include "scheduler.h" +#include "systable.h" #include "tdatablock.h" #include "tglobal.h" #include "tgrant.h" @@ -75,46 +76,41 @@ static SSDataBlock* buildDescResultDataBlock() { return pBlock; } -static void setDescResultIntoDataBlock(SSDataBlock* pBlock, int32_t numOfRows, STableMeta* pMeta) { +static void setDescResultIntoDataBlock(bool sysInfoUser, SSDataBlock* pBlock, int32_t numOfRows, STableMeta* pMeta) { blockDataEnsureCapacity(pBlock, numOfRows); - pBlock->info.rows = numOfRows; + pBlock->info.rows = 0; // field SColumnInfoData* pCol1 = taosArrayGet(pBlock->pDataBlock, 0); - char buf[DESCRIBE_RESULT_FIELD_LEN] = {0}; - for (int32_t i = 0; i < numOfRows; ++i) { - STR_TO_VARSTR(buf, pMeta->schema[i].name); - colDataAppend(pCol1, i, buf, false); - } - // Type SColumnInfoData* pCol2 = taosArrayGet(pBlock->pDataBlock, 1); - for (int32_t i = 0; i < numOfRows; ++i) { - STR_TO_VARSTR(buf, tDataTypes[pMeta->schema[i].type].name); - colDataAppend(pCol2, i, buf, false); - } - // Length SColumnInfoData* pCol3 = taosArrayGet(pBlock->pDataBlock, 2); - for (int32_t i = 0; i < numOfRows; ++i) { - int32_t bytes = getSchemaBytes(pMeta->schema + i); - colDataAppend(pCol3, i, (const char*)&bytes, false); - } - // Note SColumnInfoData* pCol4 = taosArrayGet(pBlock->pDataBlock, 3); + char buf[DESCRIBE_RESULT_FIELD_LEN] = {0}; for (int32_t i = 0; i < numOfRows; ++i) { + if (invisibleColumn(sysInfoUser, pMeta->tableType, pMeta->schema[i].flags)) { + continue; + } + STR_TO_VARSTR(buf, pMeta->schema[i].name); + colDataAppend(pCol1, pBlock->info.rows, buf, false); + STR_TO_VARSTR(buf, tDataTypes[pMeta->schema[i].type].name); + colDataAppend(pCol2, pBlock->info.rows, buf, false); + int32_t bytes = getSchemaBytes(pMeta->schema + i); + colDataAppend(pCol3, pBlock->info.rows, (const char*)&bytes, false); STR_TO_VARSTR(buf, i >= pMeta->tableInfo.numOfColumns ? "TAG" : ""); - colDataAppend(pCol4, i, buf, false); + colDataAppend(pCol4, pBlock->info.rows, buf, false); + ++(pBlock->info.rows); } } -static int32_t execDescribe(SNode* pStmt, SRetrieveTableRsp** pRsp) { +static int32_t execDescribe(bool sysInfoUser, SNode* pStmt, SRetrieveTableRsp** pRsp) { SDescribeStmt* pDesc = (SDescribeStmt*)pStmt; int32_t numOfRows = TABLE_TOTAL_COL_NUM(pDesc->pMeta); SSDataBlock* pBlock = buildDescResultDataBlock(); - setDescResultIntoDataBlock(pBlock, numOfRows, pDesc->pMeta); + setDescResultIntoDataBlock(sysInfoUser, pBlock, numOfRows, pDesc->pMeta); return buildRetrieveTableRsp(pBlock, DESCRIBE_RESULT_COLS, pRsp); } @@ -665,10 +661,10 @@ static int32_t execSelectWithoutFrom(SSelectStmt* pSelect, SRetrieveTableRsp** p return code; } -int32_t qExecCommand(SNode* pStmt, SRetrieveTableRsp** pRsp) { +int32_t qExecCommand(bool sysInfoUser, SNode* pStmt, SRetrieveTableRsp** pRsp) { switch (nodeType(pStmt)) { case QUERY_NODE_DESCRIBE_STMT: - return execDescribe(pStmt, pRsp); + return execDescribe(sysInfoUser, pStmt, pRsp); case QUERY_NODE_RESET_QUERY_CACHE_STMT: return execResetQueryCache(); case QUERY_NODE_SHOW_CREATE_DATABASE_STMT: diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index 67f7cb2f6f..a64a926710 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -497,6 +497,7 @@ typedef struct SSysTableScanInfo { SReadHandle readHandle; int32_t accountId; const char* pUser; + bool sysInfo; bool showRewrite; SNode* pCondition; // db_name filter condition, to discard data that are not in current database SMTbCursor* pCur; // cursor for iterate the local table meta store. diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 599f86f4fa..f73f72dca7 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -36,8 +36,8 @@ #define SWITCH_ORDER(n) (((n) = ((n) == TSDB_ORDER_ASC) ? TSDB_ORDER_DESC : TSDB_ORDER_ASC)) static int32_t buildSysDbTableInfo(const SSysTableScanInfo* pInfo, int32_t capacity); -static int32_t buildDbTableInfoBlock(const SSDataBlock* p, const SSysTableMeta* pSysDbTableMeta, size_t size, - const char* dbName); +static int32_t buildDbTableInfoBlock(bool sysInfo, const SSDataBlock* p, const SSysTableMeta* pSysDbTableMeta, + size_t size, const char* dbName); static bool processBlockWithProbability(const SSampleExecInfo* pInfo); @@ -2392,10 +2392,10 @@ int32_t buildSysDbTableInfo(const SSysTableScanInfo* pInfo, int32_t capacity) { const SSysTableMeta* pSysDbTableMeta = NULL; getInfosDbMeta(&pSysDbTableMeta, &size); - p->info.rows = buildDbTableInfoBlock(p, pSysDbTableMeta, size, TSDB_INFORMATION_SCHEMA_DB); + p->info.rows = buildDbTableInfoBlock(pInfo->sysInfo, p, pSysDbTableMeta, size, TSDB_INFORMATION_SCHEMA_DB); getPerfDbMeta(&pSysDbTableMeta, &size); - p->info.rows = buildDbTableInfoBlock(p, pSysDbTableMeta, size, TSDB_PERFORMANCE_SCHEMA_DB); + p->info.rows = buildDbTableInfoBlock(pInfo->sysInfo, p, pSysDbTableMeta, size, TSDB_PERFORMANCE_SCHEMA_DB); pInfo->pRes->info.rows = p->info.rows; relocateColumnData(pInfo->pRes, pInfo->scanCols, p->pDataBlock, false); @@ -2404,13 +2404,16 @@ int32_t buildSysDbTableInfo(const SSysTableScanInfo* pInfo, int32_t capacity) { return pInfo->pRes->info.rows; } -int32_t buildDbTableInfoBlock(const SSDataBlock* p, const SSysTableMeta* pSysDbTableMeta, size_t size, +int32_t buildDbTableInfoBlock(bool sysInfo, const SSDataBlock* p, const SSysTableMeta* pSysDbTableMeta, size_t size, const char* dbName) { char n[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; int32_t numOfRows = p->info.rows; for (int32_t i = 0; i < size; ++i) { const SSysTableMeta* pm = &pSysDbTableMeta[i]; + if (!sysInfo && pm->sysInfo) { + continue; + } SColumnInfoData* pColInfoData = taosArrayGet(p->pDataBlock, 0); @@ -2464,6 +2467,7 @@ SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScan pInfo->accountId = pScanPhyNode->accountId; pInfo->pUser = taosMemoryStrDup((void*)pUser); + pInfo->sysInfo = pScanPhyNode->sysInfo; pInfo->showRewrite = pScanPhyNode->showRewrite; pInfo->pRes = pResBlock; pInfo->pCondition = pScanNode->node.pConditions; diff --git a/source/libs/nodes/src/nodesCloneFuncs.c b/source/libs/nodes/src/nodesCloneFuncs.c index 9390d129df..83bccbffb4 100644 --- a/source/libs/nodes/src/nodesCloneFuncs.c +++ b/source/libs/nodes/src/nodesCloneFuncs.c @@ -545,6 +545,7 @@ static int32_t physiSysTableScanCopy(const SSystemTableScanPhysiNode* pSrc, SSys COPY_OBJECT_FIELD(mgmtEpSet, sizeof(SEpSet)); COPY_SCALAR_FIELD(showRewrite); COPY_SCALAR_FIELD(accountId); + COPY_SCALAR_FIELD(sysInfo); return TSDB_CODE_SUCCESS; } diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index 0f32001c47..822bdec365 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -1654,6 +1654,7 @@ static int32_t jsonToPhysiTableScanNode(const SJson* pJson, void* pObj) { static const char* jkSysTableScanPhysiPlanMnodeEpSet = "MnodeEpSet"; static const char* jkSysTableScanPhysiPlanShowRewrite = "ShowRewrite"; static const char* jkSysTableScanPhysiPlanAccountId = "AccountId"; +static const char* jkSysTableScanPhysiPlanSysInfo = "SysInfo"; static int32_t physiSysTableScanNodeToJson(const void* pObj, SJson* pJson) { const SSystemTableScanPhysiNode* pNode = (const SSystemTableScanPhysiNode*)pObj; @@ -1668,6 +1669,9 @@ static int32_t physiSysTableScanNodeToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = tjsonAddIntegerToObject(pJson, jkSysTableScanPhysiPlanAccountId, pNode->accountId); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddBoolToObject(pJson, jkSysTableScanPhysiPlanSysInfo, pNode->sysInfo); + } return code; } @@ -1684,7 +1688,9 @@ static int32_t jsonToPhysiSysTableScanNode(const SJson* pJson, void* pObj) { } if (TSDB_CODE_SUCCESS == code) { tjsonGetNumberValue(pJson, jkSysTableScanPhysiPlanAccountId, pNode->accountId, code); - ; + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetBoolValue(pJson, jkSysTableScanPhysiPlanSysInfo, &pNode->sysInfo); } return code; diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 8a1d8763bf..5a32c87fc3 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -784,6 +784,9 @@ static int32_t createColumnsByTable(STranslateContext* pCxt, const STableNode* p int32_t nums = pMeta->tableInfo.numOfColumns + (igTags ? 0 : ((TSDB_SUPER_TABLE == pMeta->tableType) ? pMeta->tableInfo.numOfTags : 0)); for (int32_t i = 0; i < nums; ++i) { + if (invisibleColumn(pCxt->pParseCxt->enableSysInfo, pMeta->tableType, pMeta->schema[i].flags)) { + continue; + } SColumnNode* pCol = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN); if (NULL == pCol) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_OUT_OF_MEMORY); @@ -826,7 +829,8 @@ static int32_t findAndSetColumn(STranslateContext* pCxt, SColumnNode** pColRef, } int32_t nums = pMeta->tableInfo.numOfTags + pMeta->tableInfo.numOfColumns; for (int32_t i = 0; i < nums; ++i) { - if (0 == strcmp(pCol->colName, pMeta->schema[i].name)) { + if (0 == strcmp(pCol->colName, pMeta->schema[i].name) && + !invisibleColumn(pCxt->pParseCxt->enableSysInfo, pMeta->tableType, pMeta->schema[i].flags)) { setColumnInfoBySchema((SRealTableNode*)pTable, pMeta->schema + i, (i - pMeta->tableInfo.numOfColumns), pCol); *pFound = true; break; @@ -2192,14 +2196,14 @@ static int32_t translateTable(STranslateContext* pCxt, SNode* pTable) { code = setTableCacheLastMode(pCxt, &name, pRealTable); } } - pRealTable->table.precision = pRealTable->pMeta->tableInfo.precision; - pRealTable->table.singleTable = isSingleTable(pRealTable); if (TSDB_CODE_SUCCESS == code) { + pRealTable->table.precision = pRealTable->pMeta->tableInfo.precision; + pRealTable->table.singleTable = isSingleTable(pRealTable); + if (TSDB_SUPER_TABLE == pRealTable->pMeta->tableType) { + pCxt->stableQuery = true; + } code = addNamespace(pCxt, pRealTable); } - if (TSDB_SUPER_TABLE == pRealTable->pMeta->tableType) { - pCxt->stableQuery = true; - } break; } case QUERY_NODE_TEMP_TABLE: { @@ -2594,8 +2598,7 @@ static int32_t getQueryTimeRange(STranslateContext* pCxt, SNode* pWhere, STimeWi return code; } -static int32_t checkFill(STranslateContext* pCxt, SFillNode* pFill, SValueNode* pInterval, - bool isInterpFill) { +static int32_t checkFill(STranslateContext* pCxt, SFillNode* pFill, SValueNode* pInterval, bool isInterpFill) { if (FILL_MODE_NONE == pFill->mode) { if (isInterpFill) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Unsupported fill type"); diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index c7eb6f7b5e..cafae18dbe 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -576,6 +576,7 @@ static int32_t createSystemTableScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pScan->showRewrite = pScanLogicNode->showRewrite; pScan->accountId = pCxt->pPlanCxt->acctId; + pScan->sysInfo = pCxt->pPlanCxt->sysInfo; if (0 == strcmp(pScanLogicNode->tableName.tname, TSDB_INS_TABLE_TABLES) || 0 == strcmp(pScanLogicNode->tableName.tname, TSDB_INS_TABLE_TABLE_DISTRIBUTED) || 0 == strcmp(pScanLogicNode->tableName.tname, TSDB_INS_TABLE_TAGS)) { From a6fa87fb6a443b1edfa486c78edb6c41c768f914 Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Wed, 24 Aug 2022 17:36:22 +0800 Subject: [PATCH 29/79] enh: add specific threads for rsma task --- docs/en/05-get-started/01-docker.md | 2 +- docs/zh/02-intro.md | 4 +- docs/zh/05-get-started/03-package.md | 2 +- docs/zh/05-get-started/index.md | 2 +- docs/zh/07-develop/01-connect/index.md | 2 +- docs/zh/07-develop/index.md | 2 +- docs/zh/08-connector/{cpp.mdx => 03-cpp.mdx} | 4 +- .../zh/08-connector/{java.mdx => 04-java.mdx} | 6 +- docs/zh/08-connector/{go.mdx => 05-go.mdx} | 4 +- .../zh/08-connector/{rust.mdx => 06-rust.mdx} | 4 +- .../{python.mdx => 07-python.mdx} | 6 +- .../zh/08-connector/{node.mdx => 08-node.mdx} | 4 +- .../{csharp.mdx => 09-csharp.mdx} | 6 +- docs/zh/08-connector/{php.mdx => 10-php.mdx} | 2 +- .../{03-connector.mdx => index.md} | 2 + docs/zh/12-taos-sql/14-stream.md | 4 +- docs/zh/14-reference/04-taosadapter.md | 4 +- docs/zh/20-third-party/09-emq-broker.md | 2 +- docs/zh/28-releases/01-tdengine.md | 4 +- examples/c/stream_demo.c | 8 +- include/libs/executor/executor.h | 9 +- include/libs/stream/tstream.h | 44 ++++- source/client/inc/clientInt.h | 7 +- source/client/inc/clientLog.h | 1 + source/client/src/clientEnv.c | 15 +- source/client/src/clientImpl.c | 2 + source/client/src/clientMain.c | 10 + source/common/src/tglobal.c | 2 +- source/dnode/mnode/impl/src/mndTelem.c | 4 +- source/dnode/mnode/impl/src/mndTrans.c | 2 +- .../dnode/mnode/impl/test/sma/CMakeLists.txt | 10 +- .../dnode/mnode/impl/test/stb/CMakeLists.txt | 10 +- source/dnode/vnode/src/sma/smaRollup.c | 75 ++++--- source/dnode/vnode/src/tq/tq.c | 9 + source/dnode/vnode/src/tsdb/tsdbRead.c | 4 +- source/libs/executor/inc/executorimpl.h | 1 + source/libs/executor/src/executil.c | 3 + source/libs/executor/src/executorimpl.c | 14 +- source/libs/executor/src/timewindowoperator.c | 4 +- source/libs/scheduler/inc/schInt.h | 2 +- source/libs/scheduler/src/schTask.c | 12 +- source/libs/stream/src/streamExec.c | 1 - source/libs/stream/src/streamMeta.c | 21 +- source/libs/stream/src/streamRecover.c | 5 +- source/libs/stream/src/streamState.c | 187 ++++++++++++++++++ source/libs/stream/src/streamTask.c | 3 + source/libs/transport/src/transSvr.c | 2 +- source/libs/wal/src/walMeta.c | 3 +- tests/script/tsim/db/basic2.sim | 5 +- 49 files changed, 427 insertions(+), 114 deletions(-) rename docs/zh/08-connector/{cpp.mdx => 03-cpp.mdx} (99%) rename docs/zh/08-connector/{java.mdx => 04-java.mdx} (99%) rename docs/zh/08-connector/{go.mdx => 05-go.mdx} (98%) rename docs/zh/08-connector/{rust.mdx => 06-rust.mdx} (99%) rename docs/zh/08-connector/{python.mdx => 07-python.mdx} (96%) rename docs/zh/08-connector/{node.mdx => 08-node.mdx} (97%) rename docs/zh/08-connector/{csharp.mdx => 09-csharp.mdx} (97%) rename docs/zh/08-connector/{php.mdx => 10-php.mdx} (97%) rename docs/zh/08-connector/{03-connector.mdx => index.md} (98%) create mode 100644 source/libs/stream/src/streamState.c diff --git a/docs/en/05-get-started/01-docker.md b/docs/en/05-get-started/01-docker.md index 32eee6b942..de5b620a77 100644 --- a/docs/en/05-get-started/01-docker.md +++ b/docs/en/05-get-started/01-docker.md @@ -42,7 +42,7 @@ To do so, run the following command: ``` - This command creates the `meters` supertable in the `test` database. In the `meters` supertable, it then creates 10,000 subtables named `d0` to `d9999`. Each table has 10,000 rows and each row has four columns: `ts`, `current`, `voltage`, and `phase`. The timestamps of the data in these columns range from 2017-07-14 10:40:00 000 to 2017-07-14 10:40:09 999. Each table is randomly assigned a `groupId` tag from 1 to ten and a `location` tag of either `California.SanFrancisco` or `California.SanDiego`. +This command creates the `meters` supertable in the `test` database. In the `meters` supertable, it then creates 10,000 subtables named `d0` to `d9999`. Each table has 10,000 rows and each row has four columns: `ts`, `current`, `voltage`, and `phase`. The timestamps of the data in these columns range from 2017-07-14 10:40:00 000 to 2017-07-14 10:40:09 999. Each table is randomly assigned a `groupId` tag from 1 to 10 and a `location` tag of either `Campbell`, `Cupertino`, `Los Angeles`, `Mountain View`, `Palo Alto`, `San Diego`, `San Francisco`, `San Jose`, `Santa Clara` or `Sunnyvale`. The `taosBenchmark` command creates a deployment with 100 million data points that you can use for testing purposes. The time required depends on the hardware specifications of the local system. diff --git a/docs/zh/02-intro.md b/docs/zh/02-intro.md index a6ef2b94b6..f726b4ea92 100644 --- a/docs/zh/02-intro.md +++ b/docs/zh/02-intro.md @@ -22,8 +22,8 @@ TDengine的主要功能如下: 9. 提供[命令行程序](../reference/taos-shell),便于管理集群,检查系统状态,做即席查询 10. 提供多种数据的[导入](../operation/import)、[导出](../operation/export) 11. 支持对[TDengine 集群本身的监控](../operation/monitor) -12. 提供 [C/C++](../reference/connector/cpp), [Java](../reference/connector/java), [Python](../reference/connector/python), [Go](../reference/connector/go), [Rust](../reference/connector/rust), [Node.js](../reference/connector/node) 等多种编程语言的[连接器](../reference/connector/) -13. 支持 [REST 接口](../reference/rest-api/) +12. 提供各种语言的[连接器](../connector): 如 C/C++, Java, Go, Node.JS, Rust, Python, C# 等 +13. 支持 [REST 接口](../connector/rest-api/) 14. 支持与[ Grafana 无缝集成](../third-party/grafana) 15. 支持与 Google Data Studio 无缝集成 16. 支持 [Kubernetes 部署](../deployment/k8s) diff --git a/docs/zh/05-get-started/03-package.md b/docs/zh/05-get-started/03-package.md index 85005b9551..a1c1802d77 100644 --- a/docs/zh/05-get-started/03-package.md +++ b/docs/zh/05-get-started/03-package.md @@ -9,7 +9,7 @@ import PkgListV3 from "/components/PkgListV3"; 您可以[用 Docker 立即体验](../../get-started/docker/) TDengine。如果您希望对 TDengine 贡献代码或对内部实现感兴趣,请参考我们的 [TDengine GitHub 主页](https://github.com/taosdata/TDengine) 下载源码构建和安装. -TDengine 完整的软件包包括服务端(taosd)、用于与第三方系统对接并提供 RESTful 接口的 taosAdapter、应用驱动(taosc)、命令行程序 (CLI,taos) 和一些工具软件。目前 taosAdapter 仅在 Linux 系统上安装和运行,后续将支持 Windows、macOS 等系统。TDengine 除了提供多种语言的连接器之外,还通过 [taosAdapter](../../reference/taosadapter/) 提供 [RESTful 接口](../../reference/rest-api/)。 +TDengine 完整的软件包包括服务端(taosd)、用于与第三方系统对接并提供 RESTful 接口的 taosAdapter、应用驱动(taosc)、命令行程序 (CLI,taos) 和一些工具软件。目前 taosAdapter 仅在 Linux 系统上安装和运行,后续将支持 Windows、macOS 等系统。TDengine 除了提供多种语言的连接器之外,还通过 [taosAdapter](../../reference/taosadapter/) 提供 [RESTful 接口](../../connector/rest-api/)。 为方便使用,标准的服务端安装包包含了 taosd、taosAdapter、taosc、taos、taosdump、taosBenchmark、TDinsight 安装脚本和示例代码;如果您只需要用到服务端程序和客户端连接的 C/C++ 语言支持,也可以仅下载 lite 版本的安装包。 diff --git a/docs/zh/05-get-started/index.md b/docs/zh/05-get-started/index.md index 794081b4e4..20f8235d87 100644 --- a/docs/zh/05-get-started/index.md +++ b/docs/zh/05-get-started/index.md @@ -3,7 +3,7 @@ title: 立即开始 description: '快速设置 TDengine 环境并体验其高效写入和查询' --- -TDengine 完整的软件包包括服务端(taosd)、用于与第三方系统对接并提供 RESTful 接口的 taosAdapter、应用驱动(taosc)、命令行程序 (CLI,taos) 和一些工具软件。TDengine 除了提供多种语言的连接器之外,还通过 [taosAdapter](/reference/taosadapter) 提供 [RESTful 接口](/reference/rest-api)。 +TDengine 完整的软件包包括服务端(taosd)、用于与第三方系统对接并提供 RESTful 接口的 taosAdapter、应用驱动(taosc)、命令行程序 (CLI,taos) 和一些工具软件。TDengine 除了提供多种语言的连接器之外,还通过 [taosAdapter](../reference/taosadapter) 提供 [RESTful 接口](../connector/rest-api)。 本章主要介绍如何利用 Docker 或者安装包快速设置 TDengine 环境并体验其高效写入和查询。 diff --git a/docs/zh/07-develop/01-connect/index.md b/docs/zh/07-develop/01-connect/index.md index 77f5838c8f..3e44e6c5da 100644 --- a/docs/zh/07-develop/01-connect/index.md +++ b/docs/zh/07-develop/01-connect/index.md @@ -33,7 +33,7 @@ TDengine 提供了丰富的应用程序开发接口,为了便于用户快速 关键不同点在于: 1. 使用 REST 连接,用户无需安装客户端驱动程序 taosc,具有跨平台易用的优势,但性能要下降 30%左右。 -2. 使用原生连接可以体验 TDengine 的全部功能,如[参数绑定接口](../../connector/cpp#参数绑定-api)、[订阅](../../connector/cpp#订阅和消费-api)等等。 +2. 使用原生连接可以体验 TDengine 的全部功能,如[参数绑定接口](../../connector/cpp/#参数绑定-api)、[订阅](../../connector/cpp/#订阅和消费-api)等等。 ## 安装客户端驱动 taosc diff --git a/docs/zh/07-develop/index.md b/docs/zh/07-develop/index.md index f46f91df70..20c0170844 100644 --- a/docs/zh/07-develop/index.md +++ b/docs/zh/07-develop/index.md @@ -12,7 +12,7 @@ title: 开发指南 7. 在很多场景下(如车辆管理),应用需要获取每个数据采集点的最新状态,那么建议你采用TDengine的cache功能,而不用单独部署Redis等缓存软件。 8. 如果你发现TDengine的函数无法满足你的要求,那么你可以使用用户自定义函数来解决问题。 -本部分内容就是按照上述的顺序组织的。为便于理解,TDengine为每个功能为每个支持的编程语言都提供了示例代码。如果你希望深入了解SQL的使用,需要查看[SQL手册](/taos-sql/)。如果想更深入地了解各连接器的使用,请阅读[连接器参考指南](../reference/connector/)。如果还希望想将TDengine与第三方系统集成起来,比如Grafana, 请参考[第三方工具](/third-party/)。 +本部分内容就是按照上述的顺序组织的。为便于理解,TDengine为每个功能为每个支持的编程语言都提供了示例代码。如果你希望深入了解SQL的使用,需要查看[SQL手册](/taos-sql/)。如果想更深入地了解各连接器的使用,请阅读[连接器参考指南](../connector/)。如果还希望想将TDengine与第三方系统集成起来,比如Grafana, 请参考[第三方工具](../third-party/)。 如果在开发过程中遇到任何问题,请点击每个页面下方的["反馈问题"](https://github.com/taosdata/TDengine/issues/new/choose), 在GitHub上直接递交issue。 diff --git a/docs/zh/08-connector/cpp.mdx b/docs/zh/08-connector/03-cpp.mdx similarity index 99% rename from docs/zh/08-connector/cpp.mdx rename to docs/zh/08-connector/03-cpp.mdx index 6e7d6c25b9..d27eeb7dfb 100644 --- a/docs/zh/08-connector/cpp.mdx +++ b/docs/zh/08-connector/03-cpp.mdx @@ -22,7 +22,7 @@ TDengine 客户端驱动的动态库位于: ## 支持的平台 -请参考[支持的平台列表](../connector#支持的平台) +请参考[支持的平台列表](../#支持的平台) ## 支持的版本 @@ -30,7 +30,7 @@ TDengine 客户端驱动的版本号与 TDengine 服务端的版本号是一一 ## 安装步骤 -TDengine 客户端驱动的安装请参考 [安装指南](../connector#安装步骤) +TDengine 客户端驱动的安装请参考 [安装指南](../#安装步骤) ## 建立连接 diff --git a/docs/zh/08-connector/java.mdx b/docs/zh/08-connector/04-java.mdx similarity index 99% rename from docs/zh/08-connector/java.mdx rename to docs/zh/08-connector/04-java.mdx index 723b2ad681..20d2e4fabd 100644 --- a/docs/zh/08-connector/java.mdx +++ b/docs/zh/08-connector/04-java.mdx @@ -35,7 +35,7 @@ REST 连接支持所有能运行 Java 的平台。 ## 版本支持 -请参考[版本支持列表](../connector#版本支持) +请参考[版本支持列表](../#版本支持) ## TDengine DataType 和 Java DataType @@ -64,7 +64,7 @@ TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对 使用 Java Connector 连接数据库前,需要具备以下条件: - 已安装 Java 1.8 或以上版本运行时环境和 Maven 3.6 或以上版本 -- 已安装 TDengine 客户端驱动(使用原生连接必须安装,使用 REST 连接无需安装),具体步骤请参考[安装客户端驱动](../connector#安装客户端驱动) +- 已安装 TDengine 客户端驱动(使用原生连接必须安装,使用 REST 连接无需安装),具体步骤请参考[安装客户端驱动](../#安装客户端驱动) ### 安装连接器 @@ -630,7 +630,7 @@ public void setNString(int columnIndex, ArrayList list, int size) throws ### 无模式写入 -TDengine 支持无模式写入功能。无模式写入兼容 InfluxDB 的 行协议(Line Protocol)、OpenTSDB 的 telnet 行协议和 OpenTSDB 的 JSON 格式协议。详情请参见[无模式写入](../../schemaless)。 +TDengine 支持无模式写入功能。无模式写入兼容 InfluxDB 的 行协议(Line Protocol)、OpenTSDB 的 telnet 行协议和 OpenTSDB 的 JSON 格式协议。详情请参见[无模式写入](../../reference/schemaless/)。 **注意**: diff --git a/docs/zh/08-connector/go.mdx b/docs/zh/08-connector/05-go.mdx similarity index 98% rename from docs/zh/08-connector/go.mdx rename to docs/zh/08-connector/05-go.mdx index e883598c12..9d30f75190 100644 --- a/docs/zh/08-connector/go.mdx +++ b/docs/zh/08-connector/05-go.mdx @@ -30,7 +30,7 @@ REST 连接支持所有能运行 Go 的平台。 ## 版本支持 -请参考[版本支持列表](../connector#版本支持) +请参考[版本支持列表](../#版本支持) ## 支持的功能特性 @@ -56,7 +56,7 @@ REST 连接支持所有能运行 Go 的平台。 ### 安装前准备 * 安装 Go 开发环境(Go 1.14 及以上,GCC 4.8.5 及以上) -* 如果使用原生连接器,请安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](../connector#安装客户端驱动) +* 如果使用原生连接器,请安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](../#安装客户端驱动) 配置好环境变量,检查命令: diff --git a/docs/zh/08-connector/rust.mdx b/docs/zh/08-connector/06-rust.mdx similarity index 99% rename from docs/zh/08-connector/rust.mdx rename to docs/zh/08-connector/06-rust.mdx index ddb0885f9d..187e2f0b33 100644 --- a/docs/zh/08-connector/rust.mdx +++ b/docs/zh/08-connector/06-rust.mdx @@ -28,7 +28,7 @@ Websocket 连接支持所有能运行 Rust 的平台。 ## 版本支持 -请参考[版本支持列表](../connector#版本支持) +请参考[版本支持列表](../#版本支持) Rust 连接器仍然在快速开发中,1.0 之前无法保证其向后兼容。建议使用 3.0 版本以上的 TDengine,以避免已知问题。 @@ -37,7 +37,7 @@ Rust 连接器仍然在快速开发中,1.0 之前无法保证其向后兼容 ### 安装前准备 * 安装 Rust 开发工具链 -* 如果使用原生连接,请安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](../connector#安装客户端驱动) +* 如果使用原生连接,请安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](../#安装客户端驱动) ### 添加 taos 依赖 diff --git a/docs/zh/08-connector/python.mdx b/docs/zh/08-connector/07-python.mdx similarity index 96% rename from docs/zh/08-connector/python.mdx rename to docs/zh/08-connector/07-python.mdx index 46b2bf89a2..88a5d4f84d 100644 --- a/docs/zh/08-connector/python.mdx +++ b/docs/zh/08-connector/07-python.mdx @@ -8,7 +8,7 @@ description: "taospy 是 TDengine 的官方 Python 连接器。taospy 提供了 import Tabs from "@theme/Tabs"; import TabItem from "@theme/TabItem"; -`taospy` 是 TDengine 的官方 Python 连接器。`taospy` 提供了丰富的 API, 使得 Python 应用可以很方便地使用 TDengine。`taospy` 对 TDengine 的[原生接口](../connector/cpp)和 [REST 接口](/reference/rest-api)都进行了封装, 分别对应 `taospy` 包的 `taos` 模块 和 `taosrest` 模块。 +`taospy` 是 TDengine 的官方 Python 连接器。`taospy` 提供了丰富的 API, 使得 Python 应用可以很方便地使用 TDengine。`taospy` 对 TDengine 的[原生接口](../cpp)和 [REST 接口](../rest-api)都进行了封装, 分别对应 `taospy` 包的 `taos` 模块 和 `taosrest` 模块。 除了对原生接口和 REST 接口的封装,`taospy` 还提供了符合 [Python 数据访问规范(PEP 249)](https://peps.python.org/pep-0249/) 的编程接口。这使得 `taospy` 和很多第三方工具集成变得简单,比如 [SQLAlchemy](https://www.sqlalchemy.org/) 和 [pandas](https://pandas.pydata.org/)。 使用客户端驱动提供的原生接口直接与服务端建立的连接的方式下文中称为“原生连接”;使用 taosAdapter 提供的 REST 接口与服务端建立的连接的方式下文中称为“REST 连接”。 @@ -17,7 +17,7 @@ Python 连接器的源码托管在 [GitHub](https://github.com/taosdata/taos-con ## 支持的平台 -- 原生连接[支持的平台](../connector/#支持的平台)和 TDengine 客户端支持的平台一致。 +- 原生连接[支持的平台](../#支持的平台)和 TDengine 客户端支持的平台一致。 - REST 连接支持所有能运行 Python 的平台。 ## 版本选择 @@ -275,7 +275,7 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线 ##### RestClient 类的使用 -`RestClient` 类是对于 [REST API](/reference/rest-api) 的直接封装。它只包含一个 `sql()` 方法用于执行任意 SQL 语句, 并返回执行结果。 +`RestClient` 类是对于 [REST API](../rest-api) 的直接封装。它只包含一个 `sql()` 方法用于执行任意 SQL 语句, 并返回执行结果。 ```python title="RestClient 的使用" {{#include docs/examples/python/rest_client_example.py}} diff --git a/docs/zh/08-connector/node.mdx b/docs/zh/08-connector/08-node.mdx similarity index 97% rename from docs/zh/08-connector/node.mdx rename to docs/zh/08-connector/08-node.mdx index c4004a5f59..63d690e554 100644 --- a/docs/zh/08-connector/node.mdx +++ b/docs/zh/08-connector/08-node.mdx @@ -28,7 +28,7 @@ REST 连接器支持所有能运行 Node.js 的平台。 ## 版本支持 -请参考[版本支持列表](../connector#版本支持) +请参考[版本支持列表](../#版本支持) ## 支持的功能特性 @@ -52,7 +52,7 @@ REST 连接器支持所有能运行 Node.js 的平台。 ### 安装前准备 - 安装 Node.js 开发环境 -- 如果使用 REST 连接器,跳过此步。但如果使用原生连接器,请安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](../connector#安装客户端驱动)。我们使用 [node-gyp](https://github.com/nodejs/node-gyp) 和 TDengine 实例进行交互,还需要根据具体操作系统来安装下文提到的一些依赖工具。 +- 如果使用 REST 连接器,跳过此步。但如果使用原生连接器,请安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](../#安装客户端驱动)。我们使用 [node-gyp](https://github.com/nodejs/node-gyp) 和 TDengine 实例进行交互,还需要根据具体操作系统来安装下文提到的一些依赖工具。 diff --git a/docs/zh/08-connector/csharp.mdx b/docs/zh/08-connector/09-csharp.mdx similarity index 97% rename from docs/zh/08-connector/csharp.mdx rename to docs/zh/08-connector/09-csharp.mdx index f4aa30c310..8214717583 100644 --- a/docs/zh/08-connector/csharp.mdx +++ b/docs/zh/08-connector/09-csharp.mdx @@ -18,7 +18,7 @@ import CSAsyncQuery from "../07-develop/04-query-data/_cs_async.mdx" `TDengine.Connector` 是 TDengine 提供的 C# 语言连接器。C# 开发人员可以通过它开发存取 TDengine 集群数据的 C# 应用软件。 -`TDengine.Connector` 连接器支持通过 TDengine 客户端驱动(taosc)建立与 TDengine 运行实例的连接,提供数据写入、查询、订阅、schemaless 数据写入、参数绑定接口数据写入等功能 `TDengine.Connector` 目前暂未提供 REST 连接方式,用户可以参考 [REST API](/reference/rest-api/) 文档自行编写。 +`TDengine.Connector` 连接器支持通过 TDengine 客户端驱动(taosc)建立与 TDengine 运行实例的连接,提供数据写入、查询、订阅、schemaless 数据写入、参数绑定接口数据写入等功能 `TDengine.Connector` 目前暂未提供 REST 连接方式,用户可以参考 [REST API](../rest-api/) 文档自行编写。 本文介绍如何在 Linux 或 Windows 环境中安装 `TDengine.Connector`,并通过 `TDengine.Connector` 连接 TDengine 集群,进行数据写入、查询等基本操作。 @@ -32,7 +32,7 @@ import CSAsyncQuery from "../07-develop/04-query-data/_cs_async.mdx" ## 版本支持 -请参考[版本支持列表](../connector#版本支持) +请参考[版本支持列表](../#版本支持) ## 支持的功能特性 @@ -49,7 +49,7 @@ import CSAsyncQuery from "../07-develop/04-query-data/_cs_async.mdx" * 安装 [.NET SDK](https://dotnet.microsoft.com/download) * [Nuget 客户端](https://docs.microsoft.com/en-us/nuget/install-nuget-client-tools) (可选安装) -* 安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](../connector#安装客户端驱动) +* 安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](../#安装客户端驱动) ### 使用 dotnet CLI 安装 diff --git a/docs/zh/08-connector/php.mdx b/docs/zh/08-connector/10-php.mdx similarity index 97% rename from docs/zh/08-connector/php.mdx rename to docs/zh/08-connector/10-php.mdx index 5c7525842a..53611c0274 100644 --- a/docs/zh/08-connector/php.mdx +++ b/docs/zh/08-connector/10-php.mdx @@ -38,7 +38,7 @@ TDengine 客户端驱动的版本号与 TDengine 服务端的版本号是一一 ### 安装 TDengine 客户端驱动 -TDengine 客户端驱动的安装请参考 [安装指南](../connector#安装步骤) +TDengine 客户端驱动的安装请参考 [安装指南](../#安装步骤) ### 编译安装 php-tdengine diff --git a/docs/zh/08-connector/03-connector.mdx b/docs/zh/08-connector/index.md similarity index 98% rename from docs/zh/08-connector/03-connector.mdx rename to docs/zh/08-connector/index.md index bdad0b7e25..17de8e926c 100644 --- a/docs/zh/08-connector/03-connector.mdx +++ b/docs/zh/08-connector/index.md @@ -1,5 +1,7 @@ --- +sidebar_label: 连接器 title: 连接器 +description: 详细介绍各种语言的连接器及 REST API --- TDengine 提供了丰富的应用程序开发接口,为了便于用户快速开发自己的应用,TDengine 支持了多种编程语言的连接器,其中官方连接器包括支持 C/C++、Java、Python、Go、Node.js、C# 和 Rust 的连接器。这些连接器支持使用原生接口(taosc)和 REST 接口(部分语言暂不支持)连接 TDengine 集群。社区开发者也贡献了多个非官方连接器,例如 ADO.NET 连接器、Lua 连接器和 PHP 连接器。 diff --git a/docs/zh/12-taos-sql/14-stream.md b/docs/zh/12-taos-sql/14-stream.md index a967299e40..28f52be59a 100644 --- a/docs/zh/12-taos-sql/14-stream.md +++ b/docs/zh/12-taos-sql/14-stream.md @@ -18,7 +18,7 @@ stream_options: { 其中 subquery 是 select 普通查询语法的子集: ```sql -subquery: SELECT [DISTINCT] select_list +subquery: SELECT select_list from_clause [WHERE condition] [PARTITION BY tag_list] @@ -37,7 +37,7 @@ window_clause: { 其中,SESSION 是会话窗口,tol_val 是时间间隔的最大范围。在 tol_val 时间间隔范围内的数据都属于同一个窗口,如果连续的两条数据的时间超过 tol_val,则自动开启下一个窗口。 -窗口的定义与时序数据特色查询中的定义完全相同。 +窗口的定义与时序数据特色查询中的定义完全相同,详见 [TDengine 特色查询](../distinguished) 例如,如下语句创建流式计算,同时自动创建名为 avg_vol 的超级表,此流计算以一分钟为时间窗口、30 秒为前向增量统计这些电表的平均电压,并将来自 meters 表的数据的计算结果写入 avg_vol 表,不同 partition 的数据会分别创建子表并写入不同子表。 diff --git a/docs/zh/14-reference/04-taosadapter.md b/docs/zh/14-reference/04-taosadapter.md index 6177b52e4c..71bf5f4223 100644 --- a/docs/zh/14-reference/04-taosadapter.md +++ b/docs/zh/14-reference/04-taosadapter.md @@ -156,7 +156,7 @@ AllowWebSockets ## 功能列表 - RESTful 接口 - [https://docs.taosdata.com/reference/rest-api/](https://docs.taosdata.com/reference/rest-api/) + [RESTful API](../../connector/rest-api) - 兼容 InfluxDB v1 写接口 [https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/](https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/) - 兼容 OpenTSDB JSON 和 telnet 格式写入 @@ -179,7 +179,7 @@ AllowWebSockets ### TDengine RESTful 接口 -您可以使用任何支持 http 协议的客户端通过访问 RESTful 接口地址 `http://:6041/rest/sql` 来写入数据到 TDengine 或从 TDengine 中查询数据。细节请参考[官方文档](/reference/rest-api/)。 +您可以使用任何支持 http 协议的客户端通过访问 RESTful 接口地址 `http://:6041/rest/sql` 来写入数据到 TDengine 或从 TDengine 中查询数据。细节请参考[官方文档](../../connector/rest-api/)。 ### InfluxDB diff --git a/docs/zh/20-third-party/09-emq-broker.md b/docs/zh/20-third-party/09-emq-broker.md index dd98374558..f252e520a7 100644 --- a/docs/zh/20-third-party/09-emq-broker.md +++ b/docs/zh/20-third-party/09-emq-broker.md @@ -90,7 +90,7 @@ http://127.0.0.1:6041/rest/sql ``` Basic cm9vdDp0YW9zZGF0YQ== ``` -相关文档请参考[ TDengine REST API 文档](/reference/rest-api/)。 +相关文档请参考[ TDengine REST API 文档](../../connector/rest-api/)。 在消息体中输入规则引擎替换模板: diff --git a/docs/zh/28-releases/01-tdengine.md b/docs/zh/28-releases/01-tdengine.md index a64798caa0..1e97572ca4 100644 --- a/docs/zh/28-releases/01-tdengine.md +++ b/docs/zh/28-releases/01-tdengine.md @@ -9,7 +9,7 @@ import Release from "/components/ReleaseV3"; -## 3.0.0.0 + diff --git a/examples/c/stream_demo.c b/examples/c/stream_demo.c index 2fcf4dd62c..55556f21a1 100644 --- a/examples/c/stream_demo.c +++ b/examples/c/stream_demo.c @@ -13,6 +13,7 @@ * along with this program. If not, see . */ +// clang-format off #include #include #include @@ -94,13 +95,8 @@ int32_t create_stream() { } taos_free_result(pRes); - /*const char* sql = "select min(k), max(k), sum(k) from tu1";*/ - /*const char* sql = "select min(k), max(k), sum(k) as sum_of_k from st1";*/ - /*const char* sql = "select sum(k) from tu1 interval(10m)";*/ - /*pRes = tmq_create_stream(pConn, "stream1", "out1", sql);*/ pRes = taos_query(pConn, - "create stream stream1 trigger max_delay 10s watermark 10s into outstb as select _wstart start, " - "count(k) from st1 partition by tbname interval(20s) "); + "create stream stream1 trigger at_once watermark 10s into outstb as select _wstart start, k from st1 partition by tbname state_window(k)"); if (taos_errno(pRes) != 0) { printf("failed to create stream stream1, reason:%s\n", taos_errstr(pRes)); return -1; diff --git a/include/libs/executor/executor.h b/include/libs/executor/executor.h index a64815f14f..1ce88905c2 100644 --- a/include/libs/executor/executor.h +++ b/include/libs/executor/executor.h @@ -29,7 +29,7 @@ typedef void* DataSinkHandle; struct SRpcMsg; struct SSubplan; -typedef struct SReadHandle { +typedef struct { void* tqReader; void* meta; void* config; @@ -41,6 +41,7 @@ typedef struct SReadHandle { bool initTableReader; bool initTqReader; int32_t numOfVgroups; + void* pStateBackend; } SReadHandle; // in queue mode, data streams are seperated by msg @@ -78,8 +79,8 @@ int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numO /** * @brief Cleanup SSDataBlock for StreamScanInfo - * - * @param tinfo + * + * @param tinfo */ void tdCleanupStreamInputDataBlock(qTaskInfo_t tinfo); @@ -163,7 +164,7 @@ int32_t qGetQualifiedTableIdList(void* pTableList, const char* tagCond, int32_t void qProcessRspMsg(void* parent, struct SRpcMsg* pMsg, struct SEpSet* pEpSet); -int32_t qGetExplainExecInfo(qTaskInfo_t tinfo, SArray* pExecInfoList/*,int32_t* resNum, SExplainExecInfo** pRes*/); +int32_t qGetExplainExecInfo(qTaskInfo_t tinfo, SArray* pExecInfoList /*,int32_t* resNum, SExplainExecInfo** pRes*/); int32_t qSerializeTaskStatus(qTaskInfo_t tinfo, char** pOutput, int32_t* len); diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index 384c6a289f..16b259cf59 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -263,6 +263,14 @@ typedef struct { SArray* checkpointVer; } SStreamRecoveringState; +// incremental state storage +typedef struct { + SStreamTask* pOwner; + TDB* db; + TTB* pStateDb; + TXN txn; +} SStreamState; + typedef struct SStreamTask { int64_t streamId; int32_t taskId; @@ -312,6 +320,10 @@ typedef struct SStreamTask { // msg handle SMsgCb* pMsgCb; + + // state backend + SStreamState* pState; + } SStreamTask; int32_t tEncodeStreamEpInfo(SEncoder* pEncoder, const SStreamChildEpInfo* pInfo); @@ -507,7 +519,7 @@ typedef struct SStreamMeta { char* path; TDB* db; TTB* pTaskDb; - TTB* pStateDb; + TTB* pCheckpointDb; SHashObj* pTasks; SHashObj* pRecoverStatus; void* ahandle; @@ -528,6 +540,36 @@ int32_t streamMetaCommit(SStreamMeta* pMeta); int32_t streamMetaRollBack(SStreamMeta* pMeta); int32_t streamLoadTasks(SStreamMeta* pMeta); +SStreamState* streamStateOpen(char* path, SStreamTask* pTask); +void streamStateClose(SStreamState* pState); +int32_t streamStateBegin(SStreamState* pState); +int32_t streamStateCommit(SStreamState* pState); +int32_t streamStateAbort(SStreamState* pState); + +typedef struct { + TBC* pCur; +} SStreamStateCur; + +#if 1 +int32_t streamStatePut(SStreamState* pState, const void* key, int32_t kLen, const void* value, int32_t vLen); +int32_t streamStateGet(SStreamState* pState, const void* key, int32_t kLen, void** pVal, int32_t* pVLen); +int32_t streamStateDel(SStreamState* pState, const void* key, int32_t kLen); + +SStreamStateCur* streamStateGetCur(SStreamState* pState, const void* key, int32_t kLen); +SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const void* key, int32_t kLen); +SStreamStateCur* streamStateSeekKeyPrev(SStreamState* pState, const void* key, int32_t kLen); +void streamStateFreeCur(SStreamStateCur* pCur); + +int32_t streamGetKVByCur(SStreamStateCur* pCur, void** pKey, int32_t* pKLen, void** pVal, int32_t* pVLen); + +int32_t streamStateSeekFirst(SStreamState* pState, SStreamStateCur* pCur); +int32_t streamStateSeekLast(SStreamState* pState, SStreamStateCur* pCur); + +int32_t streamStateCurNext(SStreamState* pState, SStreamStateCur* pCur); +int32_t streamStateCurPrev(SStreamState* pState, SStreamStateCur* pCur); + +#endif + #ifdef __cplusplus } #endif diff --git a/source/client/inc/clientInt.h b/source/client/inc/clientInt.h index f275ae0885..855dfb15ee 100644 --- a/source/client/inc/clientInt.h +++ b/source/client/inc/clientInt.h @@ -96,7 +96,12 @@ typedef struct { typedef struct SQueryExecMetric { int64_t start; // start timestamp, us - int64_t parsed; // start to parse, us + int64_t syntaxStart; // start to parse, us + int64_t syntaxEnd; // end to parse, us + int64_t ctgStart; // start to parse, us + int64_t ctgEnd; // end to parse, us + int64_t semanticEnd; + int64_t execEnd; int64_t send; // start to send to server, us int64_t rsp; // receive response from server, us } SQueryExecMetric; diff --git a/source/client/inc/clientLog.h b/source/client/inc/clientLog.h index d47edcd795..ec0a41a68f 100644 --- a/source/client/inc/clientLog.h +++ b/source/client/inc/clientLog.h @@ -29,6 +29,7 @@ extern "C" { #define tscDebug(...) do { if (cDebugFlag & DEBUG_DEBUG) { taosPrintLog("TSC ", DEBUG_DEBUG, cDebugFlag, __VA_ARGS__); }} while(0) #define tscTrace(...) do { if (cDebugFlag & DEBUG_TRACE) { taosPrintLog("TSC ", DEBUG_TRACE, cDebugFlag, __VA_ARGS__); }} while(0) #define tscDebugL(...) do { if (cDebugFlag & DEBUG_DEBUG) { taosPrintLongString("TSC ", DEBUG_DEBUG, cDebugFlag, __VA_ARGS__); }} while(0) +#define tscPerf(...) do { taosPrintLog("TSC ", 0, cDebugFlag, __VA_ARGS__); } while(0) #ifdef __cplusplus } diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c index ff1b9322c9..ae92d2dc7c 100644 --- a/source/client/src/clientEnv.c +++ b/source/client/src/clientEnv.c @@ -69,14 +69,25 @@ static void deregisterRequest(SRequestObj *pRequest) { int32_t currentInst = atomic_sub_fetch_64((int64_t *)&pActivity->currentRequests, 1); int32_t num = atomic_sub_fetch_32(&pTscObj->numOfReqs, 1); - int64_t duration = taosGetTimestampUs() - pRequest->metric.start; + int64_t nowUs = taosGetTimestampUs(); + int64_t duration = nowUs - pRequest->metric.start; tscDebug("0x%" PRIx64 " free Request from connObj: 0x%" PRIx64 ", reqId:0x%" PRIx64 " elapsed:%" PRIu64 " ms, current:%d, app current:%d", pRequest->self, pTscObj->id, pRequest->requestId, duration / 1000, num, currentInst); if (QUERY_NODE_VNODE_MODIF_STMT == pRequest->stmtType) { + tscPerf("insert duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64 "us, exec:%" PRId64 "us", + duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart, + pRequest->metric.ctgEnd - pRequest->metric.ctgStart, + pRequest->metric.semanticEnd - pRequest->metric.ctgEnd, + pRequest->metric.execEnd - pRequest->metric.semanticEnd); atomic_add_fetch_64((int64_t *)&pActivity->insertElapsedTime, duration); } else if (QUERY_NODE_SELECT_STMT == pRequest->stmtType) { + tscPerf("select duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64 "us, exec:%" PRId64 "us", + duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart, + pRequest->metric.ctgEnd - pRequest->metric.ctgStart, + pRequest->metric.semanticEnd - pRequest->metric.ctgEnd, + pRequest->metric.execEnd - pRequest->metric.semanticEnd); atomic_add_fetch_64((int64_t *)&pActivity->queryElapsedTime, duration); } @@ -330,7 +341,6 @@ void doDestroyRequest(void *p) { schedulerFreeJob(&pRequest->body.queryJob, 0); taosMemoryFreeClear(pRequest->msgBuf); - taosMemoryFreeClear(pRequest->sqlstr); taosMemoryFreeClear(pRequest->pDb); doFreeReqResultInfo(&pRequest->body.resInfo); @@ -349,6 +359,7 @@ void doDestroyRequest(void *p) { taosMemoryFree(pRequest->body.param); } + taosMemoryFreeClear(pRequest->sqlstr); taosMemoryFree(pRequest); tscTrace("end to destroy request %" PRIx64 " p:%p", reqId, pRequest); } diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 5f0af55d13..998b9cee5c 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -842,6 +842,8 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) { } schedulerFreeJob(&pRequest->body.queryJob, 0); + + pRequest->metric.execEnd = taosGetTimestampUs(); } taosMemoryFree(pResult); diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index f449641f10..31ae443d5b 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -685,6 +685,8 @@ void retrieveMetaCallback(SMetaData *pResultMeta, void *param, int32_t code) { SQuery *pQuery = pWrapper->pQuery; SRequestObj *pRequest = pWrapper->pRequest; + pRequest->metric.ctgEnd = taosGetTimestampUs(); + if (code == TSDB_CODE_SUCCESS) { code = qAnalyseSqlSemantic(pWrapper->pCtx, &pWrapper->catalogReq, pResultMeta, pQuery); pRequest->stableQuery = pQuery->stableQuery; @@ -693,6 +695,8 @@ void retrieveMetaCallback(SMetaData *pResultMeta, void *param, int32_t code) { } } + pRequest->metric.semanticEnd = taosGetTimestampUs(); + if (code == TSDB_CODE_SUCCESS) { if (pQuery->haveResultSet) { setResSchemaInfo(&pRequest->body.resInfo, pQuery->pResSchema, pQuery->numOfResCols); @@ -784,12 +788,16 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) { SQuery *pQuery = NULL; + pRequest->metric.syntaxStart = taosGetTimestampUs(); + SCatalogReq catalogReq = {.forceUpdate = updateMetaForce, .qNodeRequired = qnodeRequired(pRequest)}; code = qParseSqlSyntax(pCxt, &pQuery, &catalogReq); if (code != TSDB_CODE_SUCCESS) { goto _error; } + pRequest->metric.syntaxEnd = taosGetTimestampUs(); + if (!updateMetaForce) { STscObj *pTscObj = pRequest->pTscObj; SAppClusterSummary *pActivity = &pTscObj->pAppInfo->summary; @@ -816,6 +824,8 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) { .requestObjRefId = pCxt->requestRid, .mgmtEps = pCxt->mgmtEpSet}; + pRequest->metric.ctgStart = taosGetTimestampUs(); + code = catalogAsyncGetAllMeta(pCxt->pCatalog, &conn, &catalogReq, retrieveMetaCallback, pWrapper, &pRequest->body.queryJob); pCxt = NULL; diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 31738f3c28..bb2729c776 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -77,7 +77,7 @@ bool tsMonitorComp = false; // telem bool tsEnableTelem = true; -int32_t tsTelemInterval = 86400; +int32_t tsTelemInterval = 43200; char tsTelemServer[TSDB_FQDN_LEN] = "telemetry.taosdata.com"; uint16_t tsTelemPort = 80; diff --git a/source/dnode/mnode/impl/src/mndTelem.c b/source/dnode/mnode/impl/src/mndTelem.c index 27814fe5be..93f7531a27 100644 --- a/source/dnode/mnode/impl/src/mndTelem.c +++ b/source/dnode/mnode/impl/src/mndTelem.c @@ -131,7 +131,9 @@ static int32_t mndProcessTelemTimer(SRpcMsg* pReq) { char* pCont = mndBuildTelemetryReport(pMnode); if (pCont != NULL) { if (taosSendHttpReport(tsTelemServer, tsTelemPort, pCont, strlen(pCont), HTTP_FLAT) != 0) { - mError("failed to send telemetry msg"); + mError("failed to send telemetry report"); + } else { + mTrace("succeed to send telemetry report"); } taosMemoryFree(pCont); } diff --git a/source/dnode/mnode/impl/src/mndTrans.c b/source/dnode/mnode/impl/src/mndTrans.c index 17b4336465..c77a80cc82 100644 --- a/source/dnode/mnode/impl/src/mndTrans.c +++ b/source/dnode/mnode/impl/src/mndTrans.c @@ -1308,7 +1308,7 @@ static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans) { if (pTrans->policy == TRN_POLICY_ROLLBACK) { if (pTrans->lastAction != 0) { STransAction *pAction = taosArrayGet(pTrans->redoActions, pTrans->lastAction); - if (pAction->retryCode != 0 && pAction->retryCode != pAction->errCode) { + if (pAction->retryCode != 0 && pAction->retryCode == pAction->errCode) { if (pTrans->failedTimes < 6) { mError("trans:%d, stage keep on redoAction since action:%d code:0x%x not 0x%x, failedTimes:%d", pTrans->id, pTrans->lastAction, pTrans->code, pAction->retryCode, pTrans->failedTimes); diff --git a/source/dnode/mnode/impl/test/sma/CMakeLists.txt b/source/dnode/mnode/impl/test/sma/CMakeLists.txt index 3f9ec123a8..a55b45ca11 100644 --- a/source/dnode/mnode/impl/test/sma/CMakeLists.txt +++ b/source/dnode/mnode/impl/test/sma/CMakeLists.txt @@ -5,7 +5,9 @@ target_link_libraries( PUBLIC sut ) -add_test( - NAME smaTest - COMMAND smaTest -) +if(NOT ${TD_WINDOWS}) + add_test( + NAME smaTest + COMMAND smaTest + ) +endif(NOT ${TD_WINDOWS}) diff --git a/source/dnode/mnode/impl/test/stb/CMakeLists.txt b/source/dnode/mnode/impl/test/stb/CMakeLists.txt index dcfbe658fc..e3a3fc2e79 100644 --- a/source/dnode/mnode/impl/test/stb/CMakeLists.txt +++ b/source/dnode/mnode/impl/test/stb/CMakeLists.txt @@ -5,7 +5,9 @@ target_link_libraries( PUBLIC sut ) -add_test( - NAME stbTest - COMMAND stbTest -) \ No newline at end of file +if(NOT ${TD_WINDOWS}) + add_test( + NAME stbTest + COMMAND stbTest + ) +endif(NOT ${TD_WINDOWS}) \ No newline at end of file diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c index fabfcd93f1..02772b4cd0 100644 --- a/source/dnode/vnode/src/sma/smaRollup.c +++ b/source/dnode/vnode/src/sma/smaRollup.c @@ -734,10 +734,12 @@ static int32_t tdExecuteRSmaImplAsync(SSma *pSma, const void *pMsg, int32_t inpu SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pSma); - tsem_post(&(pRSmaStat->notEmpty)); - int64_t nItems = atomic_fetch_add_64(&pRSmaStat->nBufItems, 1); + if (atomic_load_8(&pInfo->assigned) == 0) { + tsem_post(&(pRSmaStat->notEmpty)); + } + // smoothing consume int32_t n = nItems / RSMA_QTASKEXEC_SMOOTH_SIZE; if (n > 1) { @@ -1526,7 +1528,9 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) { ASSERT(qItem->level == pItem->level); ASSERT(qItem->fetchLevel == pItem->fetchLevel); #endif - tsem_post(&(pStat->notEmpty)); + if (atomic_load_8(&pRSmaInfo->assigned) == 0) { + tsem_post(&(pStat->notEmpty)); + } smaInfo("vgId:%d, rsma fetch task planned for level:%" PRIi8 " suid:%" PRIi64, SMA_VID(pSma), pItem->level, pRSmaInfo->suid); } break; @@ -1691,38 +1695,48 @@ int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type) { while (true) { // step 1: rsma exec - consume data in buffer queue for all suids if (type == RSMA_EXEC_OVERFLOW || type == RSMA_EXEC_COMMIT) { - void *pIter = taosHashIterate(infoHash, NULL); // infoHash has r/w lock - while (pIter) { + void *pIter = NULL; + while ((pIter = taosHashIterate(infoHash, pIter))) { SRSmaInfo *pInfo = *(SRSmaInfo **)pIter; - int64_t itemSize = 0; - if ((itemSize = taosQueueItemSize(pInfo->queue)) || RSMA_INFO_ITEM(pInfo, 0)->fetchLevel || - RSMA_INFO_ITEM(pInfo, 1)->fetchLevel) { - if (atomic_val_compare_exchange_8(&pInfo->assigned, 0, 1) == 0) { - taosReadAllQitems(pInfo->queue, pInfo->qall); // queue has mutex lock - int32_t qallItemSize = taosQallItemSize(pInfo->qall); - if (qallItemSize > 0) { - tdRSmaBatchExec(pSma, pInfo, pInfo->qall, pSubmitArr, type); - smaDebug("vgId:%d, qitemSize:%" PRIi64 ", batchSize:%d, execType:%" PRIi8, SMA_VID(pSma), itemSize, - qallItemSize, type); + if (atomic_val_compare_exchange_8(&pInfo->assigned, 0, 1) == 0) { + if ((taosQueueItemSize(pInfo->queue) > 0) || RSMA_INFO_ITEM(pInfo, 0)->fetchLevel || + RSMA_INFO_ITEM(pInfo, 1)->fetchLevel) { + int32_t batchCnt = -1; + int32_t batchMax = taosHashGetSize(infoHash) / tsNumOfVnodeRsmaThreads; + bool occupied = (batchMax <= 1); + if (batchMax > 1) { + batchMax = 100 / batchMax; } + while (occupied || (++batchCnt > batchMax)) { // greedy mode + taosReadAllQitems(pInfo->queue, pInfo->qall); // queue has mutex lock + int32_t qallItemSize = taosQallItemSize(pInfo->qall); + if (qallItemSize > 0) { + tdRSmaBatchExec(pSma, pInfo, pInfo->qall, pSubmitArr, type); + smaDebug("vgId:%d, batchSize:%d, execType:%" PRIi8, SMA_VID(pSma), qallItemSize, type); + } - if (type == RSMA_EXEC_OVERFLOW) { - tdRSmaFetchAllResult(pSma, pInfo, pSubmitArr); - } + if (type == RSMA_EXEC_OVERFLOW) { + tdRSmaFetchAllResult(pSma, pInfo, pSubmitArr); + } - if (qallItemSize > 0) { - // subtract the item size after the task finished, commit should wait for all items be consumed - atomic_fetch_sub_64(&pRSmaStat->nBufItems, qallItemSize); + if (qallItemSize > 0) { + atomic_fetch_sub_64(&pRSmaStat->nBufItems, qallItemSize); + continue; + } else if (RSMA_INFO_ITEM(pInfo, 0)->fetchLevel || RSMA_INFO_ITEM(pInfo, 1)->fetchLevel) { + continue; + } + + break; } - ASSERT(1 == atomic_val_compare_exchange_8(&pInfo->assigned, 1, 0)); } + ASSERT(1 == atomic_val_compare_exchange_8(&pInfo->assigned, 1, 0)); } - pIter = taosHashIterate(infoHash, pIter); } if (type == RSMA_EXEC_COMMIT) { if (atomic_load_64(&pRSmaStat->nBufItems) <= 0) { break; } else { + // commit should wait for all items be consumed continue; } } @@ -1761,15 +1775,16 @@ int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type) { if (pEnv->flag & SMA_ENV_FLG_CLOSE) { break; } + + tsem_wait(&pRSmaStat->notEmpty); + + if ((pEnv->flag & SMA_ENV_FLG_CLOSE) && (atomic_load_64(&pRSmaStat->nBufItems) <= 0)) { + smaInfo("vgId:%d, exec task end, flag:%" PRIi8 ", nBufItems:%" PRIi64, SMA_VID(pSma), pEnv->flag, + atomic_load_64(&pRSmaStat->nBufItems)); + break; + } } - tsem_wait(&pRSmaStat->notEmpty); - - if ((pEnv->flag & SMA_ENV_FLG_CLOSE) && (atomic_load_64(&pRSmaStat->nBufItems) <= 0)) { - smaInfo("vgId:%d, exec task end, flag:%" PRIi8 ", nBufItems:%" PRIi64, SMA_VID(pSma), pEnv->flag, - atomic_load_64(&pRSmaStat->nBufItems)); - break; - } } // end of while(true) _end: diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index c6bc8e6e59..1456c6c067 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -79,6 +79,10 @@ STQ* tqOpen(const char* path, SVnode* pVnode) { ASSERT(0); } + if (streamLoadTasks(pTq->pStreamMeta) < 0) { + ASSERT(0); + } + return pTq; } @@ -664,6 +668,11 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask) { ASSERT(pTask->exec.executor); } + pTask->pState = streamStateOpen(pTq->pStreamMeta->path, pTask); + if (pTask->pState == NULL) { + return -1; + } + // sink /*pTask->ahandle = pTq->pVnode;*/ if (pTask->outputType == TASK_OUTPUT__SMA) { diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index cb288cecfb..0b027367da 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -1416,7 +1416,7 @@ static int32_t doMergeBufAndFileRows_Rv(STsdbReader* pReader, STableBlockScanInf int64_t minKey = 0; if (pReader->order == TSDB_ORDER_ASC) { minKey = INT64_MAX; // chosen the minimum value - if (minKey > tsLast && pLastBlockReader->lastBlockData.nRow > 0) { + if (minKey > tsLast && hasDataInLastBlock(pLastBlockReader)) { minKey = tsLast; } @@ -1429,7 +1429,7 @@ static int32_t doMergeBufAndFileRows_Rv(STsdbReader* pReader, STableBlockScanInf } } else { minKey = INT64_MIN; - if (minKey < tsLast && pLastBlockReader->lastBlockData.nRow > 0) { + if (minKey < tsLast && hasDataInLastBlock(pLastBlockReader)) { minKey = tsLast; } diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index 601c22a3ba..67f7cb2f6f 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -150,6 +150,7 @@ typedef struct { SQueryTableDataCond tableCond; int64_t recoverStartVer; int64_t recoverEndVer; + SStreamState* pState; } SStreamTaskInfo; typedef struct { diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 4c694026cb..197d94dcf4 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -469,6 +469,7 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, uint64_t suid, SArray SDataType type = {.type = TSDB_DATA_TYPE_BOOL, .bytes = sizeof(bool)}; code = createResultData(&type, rows, &output); if (code != TSDB_CODE_SUCCESS) { + terrno = code; qError("failed to create result, reason:%s", tstrerror(code)); goto end; } @@ -477,6 +478,7 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, uint64_t suid, SArray if(code != TSDB_CODE_SUCCESS){ qError("failed to calculate scalar, reason:%s", tstrerror(code)); terrno = code; + goto end; } // int64_t st2 = taosGetTimestampUs(); // qDebug("calculate tag block rows:%d, cost:%ld us", rows, st2-st1); @@ -763,6 +765,7 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, } if (pTagCond) { + terrno = TDB_CODE_SUCCESS; SColumnInfoData* pColInfoData = getColInfoResult(metaHandle, pListInfo->suid, res, pTagCond); if(terrno != TDB_CODE_SUCCESS){ colDataDestroy(pColInfoData); diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 1836ca6d9b..98c7c56d72 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -392,7 +392,7 @@ static void functionCtxSave(SqlFunctionCtx* pCtx, SFunctionCtxStatus* pStatus) { static void functionCtxRestore(SqlFunctionCtx* pCtx, SFunctionCtxStatus* pStatus) { pCtx->input.colDataAggIsSet = pStatus->hasAgg; - pCtx->input.numOfRows = pStatus->numOfRows; + pCtx->input.numOfRows = pStatus->numOfRows; pCtx->input.startRowIndex = pStatus->startOffset; } @@ -3715,7 +3715,7 @@ static int32_t initFillInfo(SFillOperatorInfo* pInfo, SExprInfo* pExpr, int32_t const char* id, SInterval* pInterval, int32_t fillType, int32_t order) { SFillColInfo* pColInfo = createFillColInfo(pExpr, numOfCols, pNotFillExpr, numOfNotFillCols, pValNode); - int64_t startKey = (order == TSDB_ORDER_ASC) ? win.skey : win.ekey; + int64_t startKey = (order == TSDB_ORDER_ASC) ? win.skey : win.ekey; STimeWindow w = getAlignQueryTimeWindow(pInterval, pInterval->precision, startKey); w = getFirstQualifiedTimeWindow(startKey, &w, pInterval, order); @@ -3988,15 +3988,15 @@ int32_t generateGroupIdMap(STableListInfo* pTableListInfo, SReadHandle* pHandle, bool assignUid = groupbyTbname(group); - size_t numOfTables = taosArrayGetSize(pTableListInfo->pTableList); + size_t numOfTables = taosArrayGetSize(pTableListInfo->pTableList); - if(assignUid){ + if (assignUid) { for (int32_t i = 0; i < numOfTables; i++) { STableKeyInfo* info = taosArrayGet(pTableListInfo->pTableList, i); info->groupId = info->uid; taosHashPut(pTableListInfo->map, &(info->uid), sizeof(uint64_t), &info->groupId, sizeof(uint64_t)); } - }else{ + } else { int32_t code = getColInfoResultForGroupby(pHandle->meta, group, pTableListInfo); if (code != TSDB_CODE_SUCCESS) { return code; @@ -4615,6 +4615,10 @@ int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SRead goto _complete; } + if (pHandle && pHandle->pStateBackend) { + (*pTaskInfo)->streamInfo.pState = pHandle->pStateBackend; + } + (*pTaskInfo)->sql = sql; sql = NULL; (*pTaskInfo)->pSubplan = pPlan; diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 551180f639..3d54b791a8 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -3128,8 +3128,8 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) { maxTs = TMAX(maxTs, pBlock->info.window.ekey); maxTs = TMAX(maxTs, pBlock->info.watermark); - if (pBlock->info.type == STREAM_NORMAL || pBlock->info.type == STREAM_PULL_DATA || - pBlock->info.type == STREAM_INVALID) { + ASSERT(pBlock->info.type != STREAM_INVERT); + if (pBlock->info.type == STREAM_NORMAL || pBlock->info.type == STREAM_PULL_DATA) { pInfo->binfo.pRes->info.type = pBlock->info.type; } else if (pBlock->info.type == STREAM_CLEAR) { SArray* pUpWins = taosArrayInit(8, sizeof(SWinRes)); diff --git a/source/libs/scheduler/inc/schInt.h b/source/libs/scheduler/inc/schInt.h index ce841ed83c..957fd46ba5 100644 --- a/source/libs/scheduler/inc/schInt.h +++ b/source/libs/scheduler/inc/schInt.h @@ -283,7 +283,7 @@ typedef struct SSchJob { } SSchJob; typedef struct SSchTaskCtx { - SSchJob *pJob; + int64_t jobRid; SSchTask *pTask; } SSchTaskCtx; diff --git a/source/libs/scheduler/src/schTask.c b/source/libs/scheduler/src/schTask.c index d16d15c119..9cab39c301 100644 --- a/source/libs/scheduler/src/schTask.c +++ b/source/libs/scheduler/src/schTask.c @@ -821,7 +821,13 @@ int32_t schProcessOnTaskStatusRsp(SQueryNodeEpId *pEpId, SArray *pStatusList) { int32_t schLaunchTaskImpl(void *param) { SSchTaskCtx *pCtx = (SSchTaskCtx *)param; - SSchJob *pJob = pCtx->pJob; + SSchJob *pJob = schAcquireJob(pCtx->jobRid); + if (NULL == pJob) { + taosMemoryFree(param); + qDebug("job refId 0x%" PRIx64 " already not exist", pCtx->jobRid); + SCH_RET(TSDB_CODE_SCH_JOB_IS_DROPPING); + } + SSchTask *pTask = pCtx->pTask; int8_t status = 0; int32_t code = 0; @@ -880,6 +886,8 @@ _return: } } + schReleaseJob(pJob->refId); + SCH_RET(code); } @@ -890,7 +898,7 @@ int32_t schAsyncLaunchTaskImpl(SSchJob *pJob, SSchTask *pTask) { SCH_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); } - param->pJob = pJob; + param->jobRid = pJob->refId; param->pTask = pTask; if (pJob->taskNum >= SCH_MIN_AYSNC_EXEC_NUM) { diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index 06ca26f029..102bad7426 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -140,7 +140,6 @@ int32_t streamPipelineExec(SStreamTask* pTask, int32_t batchNum, bool dispatch) return 0; } -// TODO: handle version int32_t streamExecForAll(SStreamTask* pTask) { while (1) { int32_t batchCnt = 1; diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 5ff700546c..20a2f7d332 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -14,7 +14,7 @@ */ #include "executor.h" -#include "tstream.h" +#include "streamInc.h" #include "ttimer.h" SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandFunc) { @@ -23,17 +23,23 @@ SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandF terrno = TSDB_CODE_OUT_OF_MEMORY; return NULL; } - pMeta->path = strdup(path); + int32_t len = strlen(path) + 20; + char* streamPath = taosMemoryCalloc(1, len); + sprintf(streamPath, "%s/%s", path, "stream"); + pMeta->path = strdup(streamPath); if (tdbOpen(pMeta->path, 16 * 1024, 1, &pMeta->db) < 0) { goto _err; } + sprintf(streamPath, "%s/%s", pMeta->path, "checkpoints"); + mkdir(streamPath, 0755); + taosMemoryFree(streamPath); + if (tdbTbOpen("task.db", sizeof(int32_t), -1, NULL, pMeta->db, &pMeta->pTaskDb) < 0) { goto _err; } - // open state storage backend - if (tdbTbOpen("state.db", sizeof(int32_t), -1, NULL, pMeta->db, &pMeta->pStateDb) < 0) { + if (tdbTbOpen("checkpoint.db", sizeof(int32_t), -1, NULL, pMeta->db, &pMeta->pCheckpointDb) < 0) { goto _err; } @@ -49,16 +55,13 @@ SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandF pMeta->ahandle = ahandle; pMeta->expandFunc = expandFunc; - if (streamLoadTasks(pMeta) < 0) { - goto _err; - } return pMeta; _err: if (pMeta->path) taosMemoryFree(pMeta->path); if (pMeta->pTasks) taosHashCleanup(pMeta->pTasks); - if (pMeta->pStateDb) tdbTbClose(pMeta->pStateDb); if (pMeta->pTaskDb) tdbTbClose(pMeta->pTaskDb); + if (pMeta->pCheckpointDb) tdbTbClose(pMeta->pCheckpointDb); if (pMeta->db) tdbClose(pMeta->db); taosMemoryFree(pMeta); return NULL; @@ -67,7 +70,7 @@ _err: void streamMetaClose(SStreamMeta* pMeta) { tdbCommit(pMeta->db, &pMeta->txn); tdbTbClose(pMeta->pTaskDb); - tdbTbClose(pMeta->pStateDb); + tdbTbClose(pMeta->pCheckpointDb); tdbClose(pMeta->db); void* pIter = NULL; diff --git a/source/libs/stream/src/streamRecover.c b/source/libs/stream/src/streamRecover.c index 263053778b..0505c3edd6 100644 --- a/source/libs/stream/src/streamRecover.c +++ b/source/libs/stream/src/streamRecover.c @@ -176,6 +176,7 @@ int32_t tDecodeSStreamTaskRecoverRsp(SDecoder* pDecoder, SStreamRecoverDownstrea } int32_t streamSaveStateInfo(SStreamMeta* pMeta, SStreamTask* pTask) { +#if 0 void* buf = NULL; ASSERT(pTask->taskLevel == TASK_LEVEL__SINK); @@ -224,10 +225,12 @@ int32_t streamSaveStateInfo(SStreamMeta* pMeta, SStreamTask* pTask) { FAIL: if (buf) taosMemoryFree(buf); return -1; +#endif return 0; } int32_t streamLoadStateInfo(SStreamMeta* pMeta, SStreamTask* pTask) { +#if 0 void* pVal = NULL; int32_t vLen = 0; if (tdbTbGet(pMeta->pStateDb, &pTask->taskId, sizeof(void*), &pVal, &vLen) < 0) { @@ -241,7 +244,7 @@ int32_t streamLoadStateInfo(SStreamMeta* pMeta, SStreamTask* pTask) { pTask->nextCheckId = aggCheckpoint.checkpointId + 1; pTask->checkpointInfo = aggCheckpoint.checkpointVer; - +#endif return 0; } diff --git a/source/libs/stream/src/streamState.c b/source/libs/stream/src/streamState.c new file mode 100644 index 0000000000..6ccc90fa51 --- /dev/null +++ b/source/libs/stream/src/streamState.c @@ -0,0 +1,187 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "executor.h" +#include "streamInc.h" +#include "ttimer.h" + +SStreamState* streamStateOpen(char* path, SStreamTask* pTask) { + SStreamState* pState = taosMemoryCalloc(1, sizeof(SStreamState)); + if (pState == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return NULL; + } + char statePath[200]; + sprintf(statePath, "%s/%d", path, pTask->taskId); + if (tdbOpen(statePath, 16 * 1024, 1, &pState->db) < 0) { + goto _err; + } + + // open state storage backend + if (tdbTbOpen("state.db", sizeof(int32_t), -1, NULL, pState->db, &pState->pStateDb) < 0) { + goto _err; + } + + pState->pOwner = pTask; + + return pState; + +_err: + if (pState->pStateDb) tdbTbClose(pState->pStateDb); + if (pState->db) tdbClose(pState->db); + taosMemoryFree(pState); + return NULL; +} + +void streamStateClose(SStreamState* pState) { + tdbCommit(pState->db, &pState->txn); + tdbTbClose(pState->pStateDb); + tdbClose(pState->db); + + taosMemoryFree(pState); +} + +int32_t streamStateBegin(SStreamState* pState) { + if (tdbTxnOpen(&pState->txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < + 0) { + return -1; + } + + if (tdbBegin(pState->db, &pState->txn) < 0) { + return -1; + } + return 0; +} + +int32_t streamStateCommit(SStreamState* pState) { + if (tdbCommit(pState->db, &pState->txn) < 0) { + return -1; + } + memset(&pState->txn, 0, sizeof(TXN)); + if (tdbTxnOpen(&pState->txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < + 0) { + return -1; + } + if (tdbBegin(pState->db, &pState->txn) < 0) { + return -1; + } + return 0; +} + +int32_t streamStateAbort(SStreamState* pState) { + if (tdbAbort(pState->db, &pState->txn) < 0) { + return -1; + } + memset(&pState->txn, 0, sizeof(TXN)); + if (tdbTxnOpen(&pState->txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < + 0) { + return -1; + } + if (tdbBegin(pState->db, &pState->txn) < 0) { + return -1; + } + return 0; +} + +int32_t streamStatePut(SStreamState* pState, const void* key, int32_t kLen, const void* value, int32_t vLen) { + return tdbTbUpsert(pState->pStateDb, key, kLen, value, vLen, &pState->txn); +} +int32_t streamStateGet(SStreamState* pState, const void* key, int32_t kLen, void** pVal, int32_t* pVLen) { + return tdbTbGet(pState->pStateDb, key, kLen, pVal, pVLen); +} + +int32_t streamStateDel(SStreamState* pState, const void* key, int32_t kLen) { + return tdbTbDelete(pState->pStateDb, key, kLen, &pState->txn); +} + +SStreamStateCur* streamStateGetCur(SStreamState* pState, const void* key, int32_t kLen) { + SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur)); + if (pCur == NULL) return NULL; + tdbTbcOpen(pState->pStateDb, &pCur->pCur, NULL); + + int32_t c; + tdbTbcMoveTo(pCur->pCur, key, kLen, &c); + if (c != 0) { + taosMemoryFree(pCur); + return NULL; + } + return 0; +} + +int32_t streamGetKVByCur(SStreamStateCur* pCur, void** pKey, int32_t* pKLen, void** pVal, int32_t* pVLen) { + return tdbTbcGet(pCur->pCur, (const void**)pKey, pKLen, (const void**)pVal, pVLen); +} + +int32_t streamStateSeekFirst(SStreamState* pState, SStreamStateCur* pCur) { + // + return tdbTbcMoveToFirst(pCur->pCur); +} + +int32_t streamStateSeekLast(SStreamState* pState, SStreamStateCur* pCur) { + // + return tdbTbcMoveToLast(pCur->pCur); +} + +SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const void* key, int32_t kLen) { + SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur)); + if (pCur == NULL) { + return NULL; + } + + int32_t c; + if (tdbTbcMoveTo(pCur->pCur, key, kLen, &c) < 0) { + taosMemoryFree(pCur); + return NULL; + } + if (c > 0) return pCur; + + if (tdbTbcMoveToNext(pCur->pCur) < 0) { + taosMemoryFree(pCur); + return NULL; + } + + return pCur; +} + +SStreamStateCur* streamStateSeekKeyPrev(SStreamState* pState, const void* key, int32_t kLen) { + SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur)); + if (pCur == NULL) { + return NULL; + } + + int32_t c; + if (tdbTbcMoveTo(pCur->pCur, key, kLen, &c) < 0) { + taosMemoryFree(pCur); + return NULL; + } + if (c < 0) return pCur; + + if (tdbTbcMoveToPrev(pCur->pCur) < 0) { + taosMemoryFree(pCur); + return NULL; + } + + return pCur; +} + +int32_t streamStateCurNext(SStreamState* pState, SStreamStateCur* pCur) { + // + return tdbTbcMoveToNext(pCur->pCur); +} + +int32_t streamStateCurPrev(SStreamState* pState, SStreamStateCur* pCur) { + // + return tdbTbcMoveToPrev(pCur->pCur); +} diff --git a/source/libs/stream/src/streamTask.c b/source/libs/stream/src/streamTask.c index 4009a47c65..ce5917de29 100644 --- a/source/libs/stream/src/streamTask.c +++ b/source/libs/stream/src/streamTask.c @@ -165,5 +165,8 @@ void tFreeSStreamTask(SStreamTask* pTask) { if (pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH) { taosArrayDestroy(pTask->shuffleDispatcher.dbInfo.pVgroupInfos); } + + if (pTask->pState) streamStateClose(pTask->pState); + taosMemoryFree(pTask); } diff --git a/source/libs/transport/src/transSvr.c b/source/libs/transport/src/transSvr.c index 447db76136..6dd9481b95 100644 --- a/source/libs/transport/src/transSvr.c +++ b/source/libs/transport/src/transSvr.c @@ -276,7 +276,7 @@ void uvOnRecvCb(uv_stream_t* cli, ssize_t nread, const uv_buf_t* buf) { while (transReadComplete(pBuf)) { tTrace("%s conn %p alread read complete packet", transLabel(pTransInst), conn); if (true == pBuf->invalid || false == uvHandleReq(conn)) { - tError("%s conn %p read invalid packet", transLabel(pTransInst), conn); + tError("%s conn %p read invalid packet, dst: %s, srv: %s", transLabel(pTransInst), conn, conn->dst, conn->src); destroyConn(conn, true); return; } diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c index a8da680910..93ced912f8 100644 --- a/source/libs/wal/src/walMeta.c +++ b/source/libs/wal/src/walMeta.c @@ -121,7 +121,7 @@ static FORCE_INLINE int64_t walScanLogGetLastVer(SWal* pWal) { if (found == NULL) { // file corrupted, no complete log // TODO delete and search in previous files - ASSERT(0); + /*ASSERT(0);*/ terrno = TSDB_CODE_WAL_FILE_CORRUPTED; return -1; } @@ -221,7 +221,6 @@ int walCheckAndRepairMeta(SWal* pWal) { int code = walSaveMeta(pWal); if (code < 0) { - taosArrayDestroy(actualLog); return -1; } } diff --git a/tests/script/tsim/db/basic2.sim b/tests/script/tsim/db/basic2.sim index b7ac0b5edd..4f0ba4a13c 100644 --- a/tests/script/tsim/db/basic2.sim +++ b/tests/script/tsim/db/basic2.sim @@ -4,7 +4,7 @@ system sh/exec.sh -n dnode1 -s start sql connect print =============== conflict stb -sql create database db vgroups 1; +sql create database db vgroups 4; sql use db; sql create table stb (ts timestamp, i int) tags (j int); sql_error create table stb using stb tags (1); @@ -16,6 +16,9 @@ sql_error create table ctb (ts timestamp, i int) tags (j int); sql create table ntb (ts timestamp, i int); sql_error create table ntb (ts timestamp, i int) tags (j int); +sql drop table ntb +sql create table ntb (ts timestamp, i int) tags (j int); + sql drop database db print =============== create database d1 From c7cb06e3962a7a00bbabfbe1905bff4a70632ff2 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Wed, 24 Aug 2022 17:54:04 +0800 Subject: [PATCH 30/79] feat: system table visible permission --- tests/script/tsim/user/privilege_sysinfo.sim | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/script/tsim/user/privilege_sysinfo.sim b/tests/script/tsim/user/privilege_sysinfo.sim index 25c1a84db6..5a892cb2c4 100644 --- a/tests/script/tsim/user/privilege_sysinfo.sim +++ b/tests/script/tsim/user/privilege_sysinfo.sim @@ -51,13 +51,13 @@ sql select * from information_schema.ins_tables where db_name = 'db' print =============== check show sql select * from information_schema.ins_users sql_error show cluster -sql select * from information_schema.ins_dnodes -sql select * from information_schema.ins_mnodes +sql_error select * from information_schema.ins_dnodes +sql_error select * from information_schema.ins_mnodes sql_error show snodes -sql select * from information_schema.ins_qnodes +sql_error select * from information_schema.ins_qnodes sql_error show bnodes sql_error show grants sql_error show dnode 1 variables; -sql show variables; +sql_error show variables; system sh/exec.sh -n dnode1 -s stop -x SIGINT From 39cd95d825bf23440140eefa2b4217066aa63294 Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Wed, 24 Aug 2022 18:15:37 +0800 Subject: [PATCH 31/79] fix: while condition --- source/dnode/vnode/src/sma/smaRollup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c index 02772b4cd0..426ab521fd 100644 --- a/source/dnode/vnode/src/sma/smaRollup.c +++ b/source/dnode/vnode/src/sma/smaRollup.c @@ -1707,7 +1707,7 @@ int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type) { if (batchMax > 1) { batchMax = 100 / batchMax; } - while (occupied || (++batchCnt > batchMax)) { // greedy mode + while (occupied || (++batchCnt < batchMax)) { // greedy mode taosReadAllQitems(pInfo->queue, pInfo->qall); // queue has mutex lock int32_t qallItemSize = taosQallItemSize(pInfo->qall); if (qallItemSize > 0) { From e75ed9f06d694762e0968026e7e2860f251bffac Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Wed, 24 Aug 2022 18:17:55 +0800 Subject: [PATCH 32/79] other: revert to 3.0 --- source/libs/stream/src/streamState.c | 251 ++++++++++++++++++--------- 1 file changed, 170 insertions(+), 81 deletions(-) diff --git a/source/libs/stream/src/streamState.c b/source/libs/stream/src/streamState.c index 1c9d11b755..dfd6f012cc 100644 --- a/source/libs/stream/src/streamState.c +++ b/source/libs/stream/src/streamState.c @@ -13,104 +13,193 @@ * along with this program. If not, see . */ -// clang-format off -#include -#include -#include -#include -#include "taos.h" +#include "executor.h" +#include "streamInc.h" +#include "tcommon.h" +#include "ttimer.h" -int32_t init_env() { - TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); - if (pConn == NULL) { +SStreamState* streamStateOpen(char* path, SStreamTask* pTask) { + SStreamState* pState = taosMemoryCalloc(1, sizeof(SStreamState)); + if (pState == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return NULL; + } + char statePath[300]; + sprintf(statePath, "%s/%d", path, pTask->taskId); + if (tdbOpen(statePath, 4096, 256, &pState->db) < 0) { + goto _err; + } + + // open state storage backend + if (tdbTbOpen("state.db", sizeof(SWinKey), -1, SWinKeyCmpr, pState->db, &pState->pStateDb) < 0) { + goto _err; + } + + if (streamStateBegin(pState) < 0) { + goto _err; + } + + pState->pOwner = pTask; + + return pState; + +_err: + if (pState->pStateDb) tdbTbClose(pState->pStateDb); + if (pState->db) tdbClose(pState->db); + taosMemoryFree(pState); + return NULL; +} + +void streamStateClose(SStreamState* pState) { + tdbCommit(pState->db, &pState->txn); + tdbTbClose(pState->pStateDb); + tdbClose(pState->db); + + taosMemoryFree(pState); +} + +int32_t streamStateBegin(SStreamState* pState) { + if (tdbTxnOpen(&pState->txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < + 0) { return -1; } - TAOS_RES* pRes = taos_query(pConn, "create database if not exists abc1 vgroups 2"); - if (taos_errno(pRes) != 0) { - printf("error in create db, reason:%s\n", taos_errstr(pRes)); + if (tdbBegin(pState->db, &pState->txn) < 0) { + tdbTxnClose(&pState->txn); return -1; } - taos_free_result(pRes); - -#if 0 - pRes = taos_query(pConn, "create database if not exists abc2 vgroups 20"); - if (taos_errno(pRes) != 0) { - printf("error in create db, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); -#endif - - pRes = taos_query(pConn, "use abc1"); - if (taos_errno(pRes) != 0) { - printf("error in use db, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "create stable if not exists st1 (ts timestamp, k int) tags(a int)"); - if (taos_errno(pRes) != 0) { - printf("failed to create super table st1, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "create table if not exists tu1 using st1 tags(1)"); - if (taos_errno(pRes) != 0) { - printf("failed to create child table tu1, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "create table if not exists tu2 using st1 tags(2)"); - if (taos_errno(pRes) != 0) { - printf("failed to create child table tu2, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "create table if not exists tu3 using st1 tags(3)"); - if (taos_errno(pRes) != 0) { - printf("failed to create child table tu3, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - return 0; } -int32_t create_stream() { - printf("create stream\n"); - TAOS_RES* pRes; - TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); - if (pConn == NULL) { +int32_t streamStateCommit(SStreamState* pState) { + if (tdbCommit(pState->db, &pState->txn) < 0) { return -1; } - - pRes = taos_query(pConn, "use abc1"); - if (taos_errno(pRes) != 0) { - printf("error in use db, reason:%s\n", taos_errstr(pRes)); + memset(&pState->txn, 0, sizeof(TXN)); + if (tdbTxnOpen(&pState->txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < + 0) { return -1; } - taos_free_result(pRes); - - pRes = taos_query(pConn, - "create stream stream1 trigger at_once watermark 10s into outstb as select _wstart start, avg(k) from st1 partition by tbname interval(10s)"); - if (taos_errno(pRes) != 0) { - printf("failed to create stream stream1, reason:%s\n", taos_errstr(pRes)); + if (tdbBegin(pState->db, &pState->txn) < 0) { return -1; } - taos_free_result(pRes); - taos_close(pConn); return 0; } -int main(int argc, char* argv[]) { - int code; - if (argc > 1) { - printf("env init\n"); - code = init_env(); +int32_t streamStateAbort(SStreamState* pState) { + if (tdbAbort(pState->db, &pState->txn) < 0) { + return -1; } - create_stream(); + memset(&pState->txn, 0, sizeof(TXN)); + if (tdbTxnOpen(&pState->txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < + 0) { + return -1; + } + if (tdbBegin(pState->db, &pState->txn) < 0) { + return -1; + } + return 0; } + +int32_t streamStatePut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen) { + return tdbTbUpsert(pState->pStateDb, key, sizeof(SWinKey), value, vLen, &pState->txn); +} +int32_t streamStateGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen) { + return tdbTbGet(pState->pStateDb, key, sizeof(SWinKey), pVal, pVLen); +} + +int32_t streamStateDel(SStreamState* pState, const SWinKey* key) { + return tdbTbDelete(pState->pStateDb, key, sizeof(SWinKey), &pState->txn); +} + +SStreamStateCur* streamStateGetCur(SStreamState* pState, const SWinKey* key) { + SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur)); + if (pCur == NULL) return NULL; + tdbTbcOpen(pState->pStateDb, &pCur->pCur, NULL); + + int32_t c; + tdbTbcMoveTo(pCur->pCur, key, sizeof(SWinKey), &c); + if (c != 0) { + taosMemoryFree(pCur); + return NULL; + } + return pCur; +} + +int32_t streamStateGetKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen) { + const SWinKey* pKTmp = NULL; + int32_t kLen; + if (tdbTbcGet(pCur->pCur, (const void**)&pKTmp, &kLen, pVal, pVLen) < 0) { + return -1; + } + *pKey = *pKTmp; + return 0; +} + +int32_t streamStateSeekFirst(SStreamState* pState, SStreamStateCur* pCur) { + // + return tdbTbcMoveToFirst(pCur->pCur); +} + +int32_t streamStateSeekLast(SStreamState* pState, SStreamStateCur* pCur) { + // + return tdbTbcMoveToLast(pCur->pCur); +} + +SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const SWinKey* key) { + SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur)); + if (pCur == NULL) { + return NULL; + } + + int32_t c; + if (tdbTbcMoveTo(pCur->pCur, key, sizeof(SWinKey), &c) < 0) { + taosMemoryFree(pCur); + return NULL; + } + if (c > 0) return pCur; + + if (tdbTbcMoveToNext(pCur->pCur) < 0) { + taosMemoryFree(pCur); + return NULL; + } + + return pCur; +} + +SStreamStateCur* streamStateSeekKeyPrev(SStreamState* pState, const SWinKey* key) { + SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur)); + if (pCur == NULL) { + return NULL; + } + + int32_t c; + if (tdbTbcMoveTo(pCur->pCur, key, sizeof(SWinKey), &c) < 0) { + taosMemoryFree(pCur); + return NULL; + } + if (c < 0) return pCur; + + if (tdbTbcMoveToPrev(pCur->pCur) < 0) { + taosMemoryFree(pCur); + return NULL; + } + + return pCur; +} + +int32_t streamStateCurNext(SStreamState* pState, SStreamStateCur* pCur) { + // + return tdbTbcMoveToNext(pCur->pCur); +} + +int32_t streamStateCurPrev(SStreamState* pState, SStreamStateCur* pCur) { + // + return tdbTbcMoveToPrev(pCur->pCur); +} +void streamStateFreeCur(SStreamStateCur* pCur) { + tdbTbcClose(pCur->pCur); + taosMemoryFree(pCur); +} + +void streamFreeVal(void* val) { tdbFree(val); } From 23098a08cf2bb3152bd2085eb281b251c2bbef86 Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Wed, 24 Aug 2022 18:18:53 +0800 Subject: [PATCH 33/79] other: revert to 3.0 --- source/dnode/vnode/src/tq/tq.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 7440c03767..3ff59ac2c0 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -679,11 +679,6 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask) { ASSERT(pTask->exec.executor); } - pTask->pState = streamStateOpen(pTq->pStreamMeta->path, pTask); - if (pTask->pState == NULL) { - return -1; - } - // sink /*pTask->ahandle = pTq->pVnode;*/ if (pTask->outputType == TASK_OUTPUT__SMA) { From f6b39e5911de1701845d0d80a5be3735342b567f Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Wed, 24 Aug 2022 18:21:49 +0800 Subject: [PATCH 34/79] enh: add hash performance test --- include/util/thash.h | 2 + source/util/src/thash.c | 8 +- source/util/test/hashTest.cpp | 167 ++++++++++++++++++++++++++++++++++ 3 files changed, 176 insertions(+), 1 deletion(-) diff --git a/include/util/thash.h b/include/util/thash.h index 781c22a56a..f4d09eb090 100644 --- a/include/util/thash.h +++ b/include/util/thash.h @@ -210,6 +210,8 @@ void taosHashSetEqualFp(SHashObj *pHashObj, _equal_fn_t fp); */ void taosHashSetFreeFp(SHashObj *pHashObj, _hash_free_fn_t fp); +int64_t taosHashGetCompTimes(SHashObj *pHashObj); + #ifdef __cplusplus } #endif diff --git a/source/util/src/thash.c b/source/util/src/thash.c index aee84a0d55..0029f1ab1e 100644 --- a/source/util/src/thash.c +++ b/source/util/src/thash.c @@ -21,7 +21,7 @@ // the add ref count operation may trigger the warning if the reference count is greater than the MAX_WARNING_REF_COUNT #define MAX_WARNING_REF_COUNT 10000 -#define HASH_MAX_CAPACITY (1024 * 1024 * 16) +#define HASH_MAX_CAPACITY (1024 * 1024 * 1024) #define HASH_DEFAULT_LOAD_FACTOR (0.75) #define HASH_INDEX(v, c) ((v) & ((c)-1)) @@ -67,6 +67,7 @@ struct SHashObj { bool enableUpdate; // enable update SArray *pMemBlock; // memory block allocated for SHashEntry _hash_before_fn_t callbackFp; // function invoked before return the value to caller + int64_t compTimes; }; /* @@ -146,6 +147,7 @@ static FORCE_INLINE SHashNode *doSearchInEntryList(SHashObj *pHashObj, SHashEntr uint32_t hashVal) { SHashNode *pNode = pe->next; while (pNode) { + atomic_add_fetch_64(&pHashObj->compTimes, 1); if ((pNode->keyLen == keyLen) && ((*(pHashObj->equalFp))(GET_HASH_NODE_KEY(pNode), key, keyLen) == 0) && pNode->removed == 0) { assert(pNode->hashVal == hashVal); @@ -882,3 +884,7 @@ void *taosHashAcquire(SHashObj *pHashObj, const void *key, size_t keyLen) { } void taosHashRelease(SHashObj *pHashObj, void *p) { taosHashCancelIterate(pHashObj, p); } + +int64_t taosHashGetCompTimes(SHashObj *pHashObj) { return atomic_load_64(&pHashObj->compTimes); } + + diff --git a/source/util/test/hashTest.cpp b/source/util/test/hashTest.cpp index 99f5a761c5..5a426f9317 100644 --- a/source/util/test/hashTest.cpp +++ b/source/util/test/hashTest.cpp @@ -197,6 +197,172 @@ void acquireRleaseTest() { taosMemoryFreeClear(data.p); } +void perfTest() { + SHashObj* hash1h = (SHashObj*) taosHashInit(100, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + SHashObj* hash1s = (SHashObj*) taosHashInit(1000, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + SHashObj* hash10s = (SHashObj*) taosHashInit(10000, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + SHashObj* hash100s = (SHashObj*) taosHashInit(100000, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + SHashObj* hash1m = (SHashObj*) taosHashInit(1000000, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + SHashObj* hash10m = (SHashObj*) taosHashInit(10000000, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + SHashObj* hash100m = (SHashObj*) taosHashInit(100000000, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + + char *name = (char*)taosMemoryCalloc(50000000, 9); + for (int64_t i = 0; i < 50000000; ++i) { + sprintf(name + i * 9, "t%08d", i); + } + + for (int64_t i = 0; i < 50; ++i) { + taosHashPut(hash1h, name + i * 9, 9, &i, sizeof(i)); + } + + for (int64_t i = 0; i < 500; ++i) { + taosHashPut(hash1s, name + i * 9, 9, &i, sizeof(i)); + } + + for (int64_t i = 0; i < 5000; ++i) { + taosHashPut(hash10s, name + i * 9, 9, &i, sizeof(i)); + } + + for (int64_t i = 0; i < 50000; ++i) { + taosHashPut(hash100s, name + i * 9, 9, &i, sizeof(i)); + } + + for (int64_t i = 0; i < 500000; ++i) { + taosHashPut(hash1m, name + i * 9, 9, &i, sizeof(i)); + } + + for (int64_t i = 0; i < 5000000; ++i) { + taosHashPut(hash10m, name + i * 9, 9, &i, sizeof(i)); + } + + for (int64_t i = 0; i < 50000000; ++i) { + taosHashPut(hash100m, name + i * 9, 9, &i, sizeof(i)); + } + + int64_t start1h = taosGetTimestampMs(); + int64_t start1hCt = taosHashGetCompTimes(hash1h); + for (int64_t i = 0; i < 10000000; ++i) { + ASSERT(taosHashGet(hash1h, name + (i % 50) * 9, 9)); + } + int64_t end1h = taosGetTimestampMs(); + int64_t end1hCt = taosHashGetCompTimes(hash1h); + + int64_t start1s = taosGetTimestampMs(); + int64_t start1sCt = taosHashGetCompTimes(hash1s); + for (int64_t i = 0; i < 10000000; ++i) { + ASSERT(taosHashGet(hash1s, name + (i % 500) * 9, 9)); + } + int64_t end1s = taosGetTimestampMs(); + int64_t end1sCt = taosHashGetCompTimes(hash1s); + + int64_t start10s = taosGetTimestampMs(); + int64_t start10sCt = taosHashGetCompTimes(hash10s); + for (int64_t i = 0; i < 10000000; ++i) { + ASSERT(taosHashGet(hash10s, name + (i % 5000) * 9, 9)); + } + int64_t end10s = taosGetTimestampMs(); + int64_t end10sCt = taosHashGetCompTimes(hash10s); + + int64_t start100s = taosGetTimestampMs(); + int64_t start100sCt = taosHashGetCompTimes(hash100s); + for (int64_t i = 0; i < 10000000; ++i) { + ASSERT(taosHashGet(hash100s, name + (i % 50000) * 9, 9)); + } + int64_t end100s = taosGetTimestampMs(); + int64_t end100sCt = taosHashGetCompTimes(hash100s); + + int64_t start1m = taosGetTimestampMs(); + int64_t start1mCt = taosHashGetCompTimes(hash1m); + for (int64_t i = 0; i < 10000000; ++i) { + ASSERT(taosHashGet(hash1m, name + (i % 500000) * 9, 9)); + } + int64_t end1m = taosGetTimestampMs(); + int64_t end1mCt = taosHashGetCompTimes(hash1m); + + int64_t start10m = taosGetTimestampMs(); + int64_t start10mCt = taosHashGetCompTimes(hash10m); + for (int64_t i = 0; i < 10000000; ++i) { + ASSERT(taosHashGet(hash10m, name + (i % 5000000) * 9, 9)); + } + int64_t end10m = taosGetTimestampMs(); + int64_t end10mCt = taosHashGetCompTimes(hash10m); + + int64_t start100m = taosGetTimestampMs(); + int64_t start100mCt = taosHashGetCompTimes(hash100m); + for (int64_t i = 0; i < 10000000; ++i) { + ASSERT(taosHashGet(hash100m, name + (i % 50000000) * 9, 9)); + } + int64_t end100m = taosGetTimestampMs(); + int64_t end100mCt = taosHashGetCompTimes(hash100m); + + int64_t start100mS = taosGetTimestampMs(); + int64_t start100mSCt = taosHashGetCompTimes(hash100m); + _hash_fn_t hashFp = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); + for (int64_t i = 0; i < 10000000; ++i) { + ASSERT(taosHashGet(hash100m, name + (i % 50000000) * 9, 9)); + } + int64_t end100mS = taosGetTimestampMs(); + int64_t end100mSCt = taosHashGetCompTimes(hash100m); + + + printf("1h \t %" PRId64 "ms,%" PRId64 "\n", end1h - start1h, end1hCt - start1hCt); + printf("1s \t %" PRId64 "ms,%" PRId64 "\n", end1s - start1s, end1sCt - start1sCt); + printf("10s \t %" PRId64 "ms,%" PRId64 "\n", end10s - start10s, end10sCt - start10sCt); + printf("100s \t %" PRId64 "ms,%" PRId64 "\n", end100s - start100s, end100sCt - start100sCt); + printf("1m \t %" PRId64 "ms,%" PRId64 "\n", end1m - start1m, end1mCt - start1mCt); + printf("10m \t %" PRId64 "ms,%" PRId64 "\n", end10m - start10m, end10mCt - start10mCt); + printf("100m \t %" PRId64 "ms,%" PRId64 "\n", end100m - start100m, end100mCt - start100mCt); + + taosHashCleanup(hash1h); + taosHashCleanup(hash1s); + taosHashCleanup(hash10s); + taosHashCleanup(hash100s); + taosHashCleanup(hash1m); + taosHashCleanup(hash10m); + taosHashCleanup(hash100m); + + SHashObj *mhash[1000] = {0}; + for (int64_t i = 0; i < 1000; ++i) { + mhash[i] = (SHashObj*) taosHashInit(100000, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + } + + for (int64_t i = 0; i < 50000000; ++i) { +#if 0 + taosHashPut(mhash[i%1000], name + i * 9, 9, &i, sizeof(i)); +#else + taosHashPut(mhash[i/50000], name + i * 9, 9, &i, sizeof(i)); +#endif + } + + int64_t startMhashCt = 0; + for (int64_t i = 0; i < 1000; ++i) { + startMhashCt += taosHashGetCompTimes(mhash[i]); + } + + int64_t startMhash = taosGetTimestampMs(); +#if 0 + for (int32_t i = 0; i < 10000000; ++i) { + ASSERT(taosHashGet(mhash[i%1000], name + i * 9, 9)); + } +#else + for (int64_t i = 0; i < 10000000; ++i) { + ASSERT(taosHashGet(mhash[i/50000], name + i * 9, 9)); + } +#endif + int64_t endMhash = taosGetTimestampMs(); + int64_t endMhashCt = 0; + for (int64_t i = 0; i < 1000; ++i) { + printf(" %" PRId64 , taosHashGetCompTimes(mhash[i])); + endMhashCt += taosHashGetCompTimes(mhash[i]); + } + printf("\n100m \t %" PRId64 "ms,%" PRId64 "\n", endMhash - startMhash, endMhashCt - startMhashCt); + + for (int64_t i = 0; i < 1000; ++i) { + taosHashCleanup(mhash[i]); + } +} + + } int main(int argc, char** argv) { @@ -210,4 +376,5 @@ TEST(testCase, hashTest) { noLockPerformanceTest(); multithreadsTest(); acquireRleaseTest(); + //perfTest(); } From f5040b2e73e9fbb90ecf810a0b02b52b961cfd9b Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Wed, 24 Aug 2022 18:33:09 +0800 Subject: [PATCH 35/79] feat: system table visible permission --- source/libs/parser/src/parAuthenticator.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/source/libs/parser/src/parAuthenticator.c b/source/libs/parser/src/parAuthenticator.c index befc822808..7c5fa92b7a 100644 --- a/source/libs/parser/src/parAuthenticator.c +++ b/source/libs/parser/src/parAuthenticator.c @@ -108,6 +108,20 @@ static int32_t authQuery(SAuthCxt* pCxt, SNode* pStmt) { return authDelete(pCxt, (SDeleteStmt*)pStmt); case QUERY_NODE_INSERT_STMT: return authInsert(pCxt, (SInsertStmt*)pStmt); + case QUERY_NODE_SHOW_DNODES_STMT: + case QUERY_NODE_SHOW_MNODES_STMT: + case QUERY_NODE_SHOW_MODULES_STMT: + case QUERY_NODE_SHOW_QNODES_STMT: + case QUERY_NODE_SHOW_SNODES_STMT: + case QUERY_NODE_SHOW_BNODES_STMT: + case QUERY_NODE_SHOW_CLUSTER_STMT: + case QUERY_NODE_SHOW_LICENCES_STMT: + case QUERY_NODE_SHOW_VGROUPS_STMT: + case QUERY_NODE_SHOW_TRANSACTIONS_STMT: + case QUERY_NODE_SHOW_TABLE_DISTRIBUTED_STMT: + case QUERY_NODE_SHOW_VNODES_STMT: + case QUERY_NODE_SHOW_SCORES_STMT: + return !pCxt->pParseCxt->enableSysInfo ? TSDB_CODE_PAR_PERMISSION_DENIED : TSDB_CODE_SUCCESS; default: break; } From ea54266daa2800d1b6793c9e511d6272fd45ab5f Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Wed, 24 Aug 2022 18:35:06 +0800 Subject: [PATCH 36/79] feat: system table visible permission --- source/libs/parser/src/parAuthenticator.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/libs/parser/src/parAuthenticator.c b/source/libs/parser/src/parAuthenticator.c index 7c5fa92b7a..fc76d8ffc6 100644 --- a/source/libs/parser/src/parAuthenticator.c +++ b/source/libs/parser/src/parAuthenticator.c @@ -117,6 +117,7 @@ static int32_t authQuery(SAuthCxt* pCxt, SNode* pStmt) { case QUERY_NODE_SHOW_CLUSTER_STMT: case QUERY_NODE_SHOW_LICENCES_STMT: case QUERY_NODE_SHOW_VGROUPS_STMT: + case QUERY_NODE_SHOW_VARIABLES_STMT: case QUERY_NODE_SHOW_TRANSACTIONS_STMT: case QUERY_NODE_SHOW_TABLE_DISTRIBUTED_STMT: case QUERY_NODE_SHOW_VNODES_STMT: From b87535734fb77519b6cab6f45ee463268fed47b3 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 24 Aug 2022 18:47:23 +0800 Subject: [PATCH 37/79] refactor:do some internal refactor. --- source/libs/executor/src/tlinearhash.c | 18 ++++++++++++------ source/util/src/tpagedbuf.c | 2 ++ 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/source/libs/executor/src/tlinearhash.c b/source/libs/executor/src/tlinearhash.c index ad97d79f7e..e0752840db 100644 --- a/source/libs/executor/src/tlinearhash.c +++ b/source/libs/executor/src/tlinearhash.c @@ -26,7 +26,7 @@ typedef struct SLHashBucket { int32_t size; // the number of element in this entry } SLHashBucket; -typedef struct SLHashObj { +struct SLHashObj { SDiskbasedBuf *pBuf; _hash_fn_t hashFn; SLHashBucket **pBucket; // entry list @@ -35,7 +35,7 @@ typedef struct SLHashObj { int32_t bits; // the number of bits used in hash int32_t numOfBuckets; // the number of buckets int64_t size; // the number of total items -} SLHashObj; +}; /** * the data struct for each hash node @@ -99,7 +99,7 @@ static int32_t doAddToBucket(SLHashObj* pHashObj, SLHashBucket* pBucket, int32_t int32_t newPageId = -1; SFilePage* pNewPage = getNewBufPage(pHashObj->pBuf, 0, &newPageId); if (pNewPage == NULL) { - return TSDB_CODE_OUT_OF_MEMORY; + return terrno; } taosArrayPush(pBucket->pPageIdList, &newPageId); @@ -138,7 +138,6 @@ static void doRemoveFromBucket(SFilePage* pPage, SLHashNode* pNode, SLHashBucket } setBufPageDirty(pPage, true); - pBucket->size -= 1; } @@ -229,6 +228,10 @@ static int32_t doAddNewBucket(SLHashObj* pHashObj) { int32_t pageId = -1; SFilePage* p = getNewBufPage(pHashObj->pBuf, 0, &pageId); + if (p == NULL) { + return terrno; + } + p->num = sizeof(SFilePage); setBufPageDirty(p, true); @@ -252,7 +255,8 @@ SLHashObj* tHashInit(int32_t inMemPages, int32_t pageSize, _hash_fn_t fn, int32_ printf("tHash Init failed since %s", terrstr(terrno)); return NULL; } - int32_t code = createDiskbasedBuf(&pHashObj->pBuf, pageSize, inMemPages * pageSize, 0, tsTempDir); + + int32_t code = createDiskbasedBuf(&pHashObj->pBuf, pageSize, inMemPages * pageSize, "", tsTempDir); if (code != 0) { terrno = code; return NULL; @@ -389,7 +393,9 @@ char* tHashGet(SLHashObj* pHashObj, const void *key, size_t keyLen) { } SLHashBucket* pBucket = pHashObj->pBucket[bucketId]; - for (int32_t i = 0; i < taosArrayGetSize(pBucket->pPageIdList); ++i) { + int32_t num = taosArrayGetSize(pBucket->pPageIdList); + + for (int32_t i = 0; i < num; ++i) { int32_t pageId = *(int32_t*)taosArrayGet(pBucket->pPageIdList, i); SFilePage* p = getBufPage(pHashObj->pBuf, pageId); diff --git a/source/util/src/tpagedbuf.c b/source/util/src/tpagedbuf.c index 0e608d0da2..ac2128dd70 100644 --- a/source/util/src/tpagedbuf.c +++ b/source/util/src/tpagedbuf.c @@ -309,6 +309,7 @@ static SListNode* getEldestUnrefedPage(SDiskbasedBuf* pBuf) { static char* evacOneDataPage(SDiskbasedBuf* pBuf) { char* bufPage = NULL; SListNode* pn = getEldestUnrefedPage(pBuf); + terrno = 0; // all pages are referenced by user, try to allocate new space if (pn == NULL) { @@ -332,6 +333,7 @@ static char* evacOneDataPage(SDiskbasedBuf* pBuf) { bufPage = flushPageToDisk(pBuf, d); } + ASSERT((bufPage != NULL) || terrno != TSDB_CODE_SUCCESS); return bufPage; } From 033b251994752a2d1bf4c37c2b5b6b481881b52e Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 24 Aug 2022 18:48:03 +0800 Subject: [PATCH 38/79] test: update the unit test cases. --- source/libs/executor/test/executorTests.cpp | 1 - source/libs/executor/test/lhashTests.cpp | 55 ++++++++++++--------- source/libs/executor/test/sortTests.cpp | 5 +- 3 files changed, 33 insertions(+), 28 deletions(-) diff --git a/source/libs/executor/test/executorTests.cpp b/source/libs/executor/test/executorTests.cpp index bba4b254c5..1c42163349 100644 --- a/source/libs/executor/test/executorTests.cpp +++ b/source/libs/executor/test/executorTests.cpp @@ -26,7 +26,6 @@ #include "executor.h" #include "executorimpl.h" #include "function.h" -#include "stub.h" #include "taos.h" #include "tdatablock.h" #include "tdef.h" diff --git a/source/libs/executor/test/lhashTests.cpp b/source/libs/executor/test/lhashTests.cpp index 695552faa0..c9b75395bc 100644 --- a/source/libs/executor/test/lhashTests.cpp +++ b/source/libs/executor/test/lhashTests.cpp @@ -26,40 +26,47 @@ TEST(testCase, linear_hash_Tests) { taosSeedRand(taosGetTimestampSec()); + strcpy(tsTempDir, "/tmp/"); _hash_fn_t fn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT); -#if 0 - SLHashObj* pHashObj = tHashInit(256, 4096, fn, 320); - for(int32_t i = 0; i < 5000000; ++i) { + + int64_t st = taosGetTimestampUs(); + + SLHashObj* pHashObj = tHashInit(4098*4*2, 512, fn, 40); + for(int32_t i = 0; i < 1000000; ++i) { int32_t code = tHashPut(pHashObj, &i, sizeof(i), &i, sizeof(i)); assert(code == 0); } // tHashPrint(pHashObj, LINEAR_HASH_STATIS); + int64_t et = taosGetTimestampUs(); -// for(int32_t i = 0; i < 10000; ++i) { -// char* v = tHashGet(pHashObj, &i, sizeof(i)); -// if (v != NULL) { -//// printf("find value: %d, key:%d\n", *(int32_t*) v, i); -// } else { -// printf("failed to found key:%d in hash\n", i); -// } -// } - - tHashPrint(pHashObj, LINEAR_HASH_STATIS); - tHashCleanup(pHashObj); -#endif - -#if 0 - SHashObj* pHashObj = taosHashInit(1000, fn, false, HASH_NO_LOCK); for(int32_t i = 0; i < 1000000; ++i) { - taosHashPut(pHashObj, &i, sizeof(i), &i, sizeof(i)); + if (i == 950000) { + printf("kf\n"); + } + char* v = tHashGet(pHashObj, &i, sizeof(i)); + if (v != NULL) { +// printf("find value: %d, key:%d\n", *(int32_t*) v, i); + } else { +// printf("failed to found key:%d in hash\n", i); + } } - for(int32_t i = 0; i < 10000; ++i) { - void* v = taosHashGet(pHashObj, &i, sizeof(i)); - } - taosHashCleanup(pHashObj); -#endif +// tHashPrint(pHashObj, LINEAR_HASH_STATIS); + tHashCleanup(pHashObj); + int64_t et1 = taosGetTimestampUs(); + SHashObj* pHashObj1 = taosHashInit(1000, fn, false, HASH_NO_LOCK); + for(int32_t i = 0; i < 1000000; ++i) { + taosHashPut(pHashObj1, &i, sizeof(i), &i, sizeof(i)); + } + + for(int32_t i = 0; i < 1000000; ++i) { + void* v = taosHashGet(pHashObj1, &i, sizeof(i)); + } + taosHashCleanup(pHashObj1); + + int64_t et2 = taosGetTimestampUs(); + printf("linear hash time:%.2f ms, buildHash:%.2f ms, hash:%.2f\n", (et1-st)/1000.0, (et-st)/1000.0, (et2-et1)/1000.0); } \ No newline at end of file diff --git a/source/libs/executor/test/sortTests.cpp b/source/libs/executor/test/sortTests.cpp index 6e244152f2..4ac15670ac 100644 --- a/source/libs/executor/test/sortTests.cpp +++ b/source/libs/executor/test/sortTests.cpp @@ -27,7 +27,6 @@ #include "executorimpl.h" #include "executor.h" -#include "stub.h" #include "taos.h" #include "tdatablock.h" #include "tdef.h" @@ -196,7 +195,7 @@ int32_t docomp(const void* p1, const void* p2, void* param) { } } // namespace -#if 1 +#if 0 TEST(testCase, inMem_sort_Test) { SBlockOrderInfo oi = {0}; oi.order = TSDB_ORDER_ASC; @@ -382,7 +381,7 @@ TEST(testCase, ordered_merge_sort_Test) { } void* v = tsortGetValue(pTupleHandle, 0); - printf("%d: %d\n", row, *(int32_t*) v); +// printf("%d: %d\n", row, *(int32_t*) v); ASSERT_EQ(row++, *(int32_t*) v); } From eef595318d9d6dafcbdc22137d0cd2f91b91e93e Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Wed, 24 Aug 2022 19:08:50 +0800 Subject: [PATCH 39/79] feat: system table visible permission --- source/libs/parser/test/parTestUtil.cpp | 1 + source/libs/planner/test/planTestUtil.cpp | 1 + 2 files changed, 2 insertions(+) diff --git a/source/libs/parser/test/parTestUtil.cpp b/source/libs/parser/test/parTestUtil.cpp index 98281b7bf0..360b904c17 100644 --- a/source/libs/parser/test/parTestUtil.cpp +++ b/source/libs/parser/test/parTestUtil.cpp @@ -207,6 +207,7 @@ class ParserTestBaseImpl { pCxt->db = caseEnv_.db_.c_str(); pCxt->pUser = caseEnv_.user_.c_str(); pCxt->isSuperUser = caseEnv_.user_ == "root"; + pCxt->enableSysInfo = true; pCxt->pSql = stmtEnv_.sql_.c_str(); pCxt->sqlLen = stmtEnv_.sql_.length(); pCxt->pMsg = stmtEnv_.msgBuf_.data(); diff --git a/source/libs/planner/test/planTestUtil.cpp b/source/libs/planner/test/planTestUtil.cpp index 5fc8b3cf30..f904643be9 100644 --- a/source/libs/planner/test/planTestUtil.cpp +++ b/source/libs/planner/test/planTestUtil.cpp @@ -343,6 +343,7 @@ class PlannerTestBaseImpl { cxt.pMsg = stmtEnv_.msgBuf_.data(); cxt.msgLen = stmtEnv_.msgBuf_.max_size(); cxt.svrVer = "3.0.0.0"; + cxt.enableSysInfo = true; if (prepare) { SStmtCallback stmtCb = {0}; cxt.pStmtCb = &stmtCb; From 437d4947f6914a7c7491b5f2ae087a58c33ecc8e Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 24 Aug 2022 20:13:18 +0800 Subject: [PATCH 40/79] fix: tdb page cache hash page removing fix --- source/libs/tdb/src/db/tdbBtree.c | 2 +- source/libs/tdb/src/db/tdbPCache.c | 35 ++++++++++++++++-------------- source/libs/tdb/src/db/tdbPage.c | 3 +++ 3 files changed, 23 insertions(+), 17 deletions(-) diff --git a/source/libs/tdb/src/db/tdbBtree.c b/source/libs/tdb/src/db/tdbBtree.c index 7a44edb12c..5430acb972 100644 --- a/source/libs/tdb/src/db/tdbBtree.c +++ b/source/libs/tdb/src/db/tdbBtree.c @@ -1401,7 +1401,7 @@ static int tdbBtreeDecodeCell(SPage *pPage, const SCell *pCell, SCellDecoder *pD pDecoder->pgno = 0; TDB_CELLDECODER_SET_FREE_NIL(pDecoder); - tdbDebug("tdb btc decoder set nil: %p/0x%x ", pDecoder, pDecoder->freeKV); + // tdbTrace("tdb btc decoder set nil: %p/0x%x ", pDecoder, pDecoder->freeKV); // 1. Decode header part if (!leaf) { diff --git a/source/libs/tdb/src/db/tdbPCache.c b/source/libs/tdb/src/db/tdbPCache.c index ab9b21dc3f..527ad200d4 100644 --- a/source/libs/tdb/src/db/tdbPCache.c +++ b/source/libs/tdb/src/db/tdbPCache.c @@ -145,7 +145,7 @@ static SPage *tdbPCacheFetchImpl(SPCache *pCache, const SPgid *pPgid, TXN *pTxn) // 1. Search the hash table pPage = pCache->pgHash[tdbPCachePageHash(pPgid) % pCache->nHash]; while (pPage) { - if (memcmp(pPage->pgid.fileid, pPgid->fileid, TDB_FILE_ID_LEN) == 0 && pPage->pgid.pgno == pPgid->pgno) break; + if (pPage->pgid.pgno == pPgid->pgno && memcmp(pPage->pgid.fileid, pPgid->fileid, TDB_FILE_ID_LEN) == 0) break; pPage = pPage->pHashNext; } @@ -243,7 +243,7 @@ static void tdbPCachePinPage(SPCache *pCache, SPage *pPage) { pCache->nRecyclable--; // printf("pin page %d pgno %d pPage %p\n", pPage->id, TDB_PAGE_PGNO(pPage), pPage); - tdbTrace("pin page %d", pPage->id); + tdbDebug("pin page %d", pPage->id); } } @@ -264,30 +264,33 @@ static void tdbPCacheUnpinPage(SPCache *pCache, SPage *pPage) { pCache->nRecyclable++; // printf("unpin page %d pgno %d pPage %p\n", pPage->id, TDB_PAGE_PGNO(pPage), pPage); - tdbTrace("unpin page %d", pPage->id); + tdbDebug("unpin page %d", pPage->id); } static void tdbPCacheRemovePageFromHash(SPCache *pCache, SPage *pPage) { - SPage **ppPage; - uint32_t h; - - h = tdbPCachePageHash(&(pPage->pgid)); - for (ppPage = &(pCache->pgHash[h % pCache->nHash]); (*ppPage) && *ppPage != pPage; ppPage = &((*ppPage)->pHashNext)) - ; + uint32_t h = tdbPCachePageHash(&(pPage->pgid)) % pCache->nHash; + SPage **ppPage = &(pCache->pgHash[h]); + if (*ppPage == pPage) { + pCache->pgHash[h] = pPage->pHashNext; + } else { + for (; (*ppPage) && (*ppPage)->pHashNext != pPage; ppPage = &((*ppPage)->pHashNext)) + ; + if (*ppPage) { + (*ppPage)->pHashNext = pPage->pHashNext; + } + } if (*ppPage) { - *ppPage = pPage->pHashNext; - pCache->nPage--; + pPage->pHashNext = NULL; + --pCache->nPage; // printf("rmv page %d to hash, pgno %d, pPage %p\n", pPage->id, TDB_PAGE_PGNO(pPage), pPage); } - tdbTrace("remove page %d to hash", pPage->id); + tdbDebug("remove page %p/%d from hash", pPage, pPage->id); } static void tdbPCacheAddPageToHash(SPCache *pCache, SPage *pPage) { - int h; - - h = tdbPCachePageHash(&(pPage->pgid)) % pCache->nHash; + uint32_t h = tdbPCachePageHash(&(pPage->pgid)) % pCache->nHash; pPage->pHashNext = pCache->pgHash[h]; pCache->pgHash[h] = pPage; @@ -295,7 +298,7 @@ static void tdbPCacheAddPageToHash(SPCache *pCache, SPage *pPage) { pCache->nPage++; // printf("add page %d to hash, pgno %d, pPage %p\n", pPage->id, TDB_PAGE_PGNO(pPage), pPage); - tdbTrace("add page %d to hash", pPage->id); + tdbDebug("add page %p/%d to hash", pPage, pPage->id); } static int tdbPCacheOpenImpl(SPCache *pCache) { diff --git a/source/libs/tdb/src/db/tdbPage.c b/source/libs/tdb/src/db/tdbPage.c index 276b06b147..9e0cd76573 100644 --- a/source/libs/tdb/src/db/tdbPage.c +++ b/source/libs/tdb/src/db/tdbPage.c @@ -74,6 +74,7 @@ int tdbPageCreate(int pageSize, SPage **ppPage, void *(*xMalloc)(void *, size_t) int tdbPageDestroy(SPage *pPage, void (*xFree)(void *arg, void *ptr), void *arg) { u8 *ptr; + tdbDebug("page/destroy: %p %p", pPage, xFree); ASSERT(xFree); for (int iOvfl = 0; iOvfl < pPage->nOverflow; iOvfl++) { @@ -87,6 +88,7 @@ int tdbPageDestroy(SPage *pPage, void (*xFree)(void *arg, void *ptr), void *arg) } void tdbPageZero(SPage *pPage, u8 szAmHdr, int (*xCellSize)(const SPage *, SCell *, int, TXN *, SBTree *pBt)) { + tdbDebug("page/zero: %p %" PRIu8 " %p", pPage, szAmHdr, xCellSize); pPage->pPageHdr = pPage->pData + szAmHdr; TDB_PAGE_NCELLS_SET(pPage, 0); TDB_PAGE_CCELLS_SET(pPage, pPage->pageSize - sizeof(SPageFtr)); @@ -103,6 +105,7 @@ void tdbPageZero(SPage *pPage, u8 szAmHdr, int (*xCellSize)(const SPage *, SCell } void tdbPageInit(SPage *pPage, u8 szAmHdr, int (*xCellSize)(const SPage *, SCell *, int, TXN *, SBTree *pBt)) { + tdbDebug("page/init: %p %" PRIu8 " %p", pPage, szAmHdr, xCellSize); pPage->pPageHdr = pPage->pData + szAmHdr; pPage->pCellIdx = pPage->pPageHdr + TDB_PAGE_HDR_SIZE(pPage); pPage->pFreeStart = pPage->pCellIdx + TDB_PAGE_OFFSET_SIZE(pPage) * TDB_PAGE_NCELLS(pPage); From f8ecbdc8057e62673ab1ce60975aee4c1865acf7 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Wed, 24 Aug 2022 20:21:18 +0800 Subject: [PATCH 41/79] add err msg --- source/libs/index/src/indexFilter.c | 7 +++++++ source/libs/transport/src/thttp.c | 5 +++-- source/libs/transport/src/transSvr.c | 8 +++++--- 3 files changed, 15 insertions(+), 5 deletions(-) diff --git a/source/libs/index/src/indexFilter.c b/source/libs/index/src/indexFilter.c index 21aeaba70b..75844ce76f 100644 --- a/source/libs/index/src/indexFilter.c +++ b/source/libs/index/src/indexFilter.c @@ -255,6 +255,13 @@ static int32_t sifInitOperParams(SIFParam **params, SOperatorNode *node, SIFCtx if (node->opType == OP_TYPE_JSON_GET_VALUE) { return code; } + if ((node->pLeft != NULL && nodeType(node->pLeft) == QUERY_NODE_COLUMN) && + (node->pRight != NULL && nodeType(node->pRight) == QUERY_NODE_VALUE)) { + SColumnNode *cn = (SColumnNode *)(node->pLeft); + if (cn->node.resType.type == TSDB_DATA_TYPE_JSON) { + SIF_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); + } + } SIFParam *paramList = taosMemoryCalloc(nParam, sizeof(SIFParam)); if (NULL == paramList) { diff --git a/source/libs/transport/src/thttp.c b/source/libs/transport/src/thttp.c index 935f536a90..98e0b8f9c9 100644 --- a/source/libs/transport/src/thttp.c +++ b/source/libs/transport/src/thttp.c @@ -23,6 +23,7 @@ #define HTTP_RECV_BUF_SIZE 1024 + typedef struct SHttpClient { uv_connect_t conn; uv_tcp_t tcp; @@ -143,9 +144,9 @@ static void clientAllocBuffCb(uv_handle_t *handle, size_t suggested_size, uv_buf static void clientRecvCb(uv_stream_t* handle, ssize_t nread, const uv_buf_t *buf) { SHttpClient* cli = handle->data; if (nread < 0) { - uError("http-report read error:%s", uv_err_name(nread)); + uError("http-report recv error:%s", uv_err_name(nread)); } else { - uTrace("http-report succ to read %d bytes, just ignore it", nread); + uTrace("http-report succ to recv %d bytes, just ignore it", nread); } uv_close((uv_handle_t*)&cli->tcp, clientCloseCb); } diff --git a/source/libs/transport/src/transSvr.c b/source/libs/transport/src/transSvr.c index 6dd9481b95..207b967923 100644 --- a/source/libs/transport/src/transSvr.c +++ b/source/libs/transport/src/transSvr.c @@ -276,14 +276,16 @@ void uvOnRecvCb(uv_stream_t* cli, ssize_t nread, const uv_buf_t* buf) { while (transReadComplete(pBuf)) { tTrace("%s conn %p alread read complete packet", transLabel(pTransInst), conn); if (true == pBuf->invalid || false == uvHandleReq(conn)) { - tError("%s conn %p read invalid packet, dst: %s, srv: %s", transLabel(pTransInst), conn, conn->dst, conn->src); + tError("%s conn %p read invalid packet, received from %s, local info:%s", transLabel(pTransInst), conn, + conn->dst, conn->src); destroyConn(conn, true); return; } } return; } else { - tError("%s conn %p read invalid packet, exceed limit", transLabel(pTransInst), conn); + tError("%s conn %p read invalid packet, exceed limit, received from %s, local info:", transLabel(pTransInst), + conn, conn->dst, conn->src); destroyConn(conn, true); return; } @@ -649,7 +651,7 @@ void uvOnAcceptCb(uv_stream_t* stream, int status) { pObj->workerIdx = (pObj->workerIdx + 1) % pObj->numOfThreads; - tTrace("new conntion accepted by main server, dispatch to %dth worker-thread", pObj->workerIdx); + tTrace("new connection accepted by main server, dispatch to %dth worker-thread", pObj->workerIdx); uv_write2(wr, (uv_stream_t*)&(pObj->pipe[pObj->workerIdx][0]), &buf, 1, (uv_stream_t*)cli, uvOnPipeWriteCb); } else { From 46c3edbfd9ba8f377c401958af46190f93b4e54b Mon Sep 17 00:00:00 2001 From: wade zhang <95411902+gccgdb1234@users.noreply.github.com> Date: Thu, 25 Aug 2022 08:37:11 +0800 Subject: [PATCH 42/79] Update README-CN.md --- README-CN.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/README-CN.md b/README-CN.md index e30e38ae78..fccbe1a99e 100644 --- a/README-CN.md +++ b/README-CN.md @@ -303,14 +303,14 @@ Query OK, 2 row(s) in set (0.001700s) TDengine 提供了丰富的应用程序开发接口,其中包括 C/C++、Java、Python、Go、Node.js、C# 、RESTful 等,便于用户快速开发应用: -- [Java](https://docs.taosdata.com/reference/connector/java/) -- [C/C++](https://www.taosdata.com/cn/documentation/connector#c-cpp) -- [Python](https://docs.taosdata.com/reference/connector/python/) -- [Go](https://docs.taosdata.com/reference/connector/go/) -- [Node.js](https://docs.taosdata.com/reference/connector/node/) -- [Rust](https://docs.taosdata.com/reference/connector/rust/) -- [C#](https://docs.taosdata.com/reference/connector/csharp/) -- [RESTful API](https://docs.taosdata.com/reference/rest-api/) +- [Java](https://docs.taosdata.com/connector/java/) +- [C/C++](https://docs.taosdata.com/connector/cpp/) +- [Python](https://docs.taosdata.com/connector/python/) +- [Go](https://docs.taosdata.com/connector/go/) +- [Node.js](https://docs.taosdata.com/connector/node/) +- [Rust](https://docs.taosdata.com/connector/rust/) +- [C#](https://docs.taosdata.com/connector/csharp/) +- [RESTful API](https://docs.taosdata.com/rest-api/) # 成为社区贡献者 From 14f1bf314d6f0bf1a7ea996bdc6800999a369dba Mon Sep 17 00:00:00 2001 From: wade zhang <95411902+gccgdb1234@users.noreply.github.com> Date: Thu, 25 Aug 2022 08:38:10 +0800 Subject: [PATCH 43/79] Update README-CN.md --- README-CN.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README-CN.md b/README-CN.md index fccbe1a99e..0b7e42d4fa 100644 --- a/README-CN.md +++ b/README-CN.md @@ -310,7 +310,7 @@ TDengine 提供了丰富的应用程序开发接口,其中包括 C/C++、Java - [Node.js](https://docs.taosdata.com/connector/node/) - [Rust](https://docs.taosdata.com/connector/rust/) - [C#](https://docs.taosdata.com/connector/csharp/) -- [RESTful API](https://docs.taosdata.com/rest-api/) +- [RESTful API](https://docs.taosdata.com/connector/rest-api/) # 成为社区贡献者 From dd479baded5329e1af72cb0c8b16cda220f124fa Mon Sep 17 00:00:00 2001 From: wade zhang <95411902+gccgdb1234@users.noreply.github.com> Date: Thu, 25 Aug 2022 08:40:45 +0800 Subject: [PATCH 44/79] Update README.md --- README.md | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index 57b184682e..611d97aac9 100644 --- a/README.md +++ b/README.md @@ -35,13 +35,13 @@ TDengine is an open source, high-performance, cloud native [time-series database # Documentation -For user manual, system design and architecture, please refer to [TDengine Documentation](https://docs.taosdata.com) ([TDengine 文档](https://docs.taosdata.com)) +For user manual, system design and architecture, please refer to [TDengine Documentation](https://docs.tdengine.com) ([TDengine 文档](https://docs.taosdata.com)) # Building At the moment, TDengine server supports running on Linux and Windows systems. Any application can also choose the RESTful interface provided by taosAdapter to connect the taosd service . TDengine supports X64/ARM64 CPU, and it will support MIPS64, Alpha64, ARM32, RISC-V and other CPU architectures in the future. -You can choose to install through source code, [container](https://docs.taosdata.com/get-started/docker/), [installation package](https://docs.taosdata.com/get-started/package/) or [Kubernetes](https://docs.taosdata.com/deployment/k8s/). This quick guide only applies to installing from source. +You can choose to install through source code, [container](https://docs.tdengine.com/get-started/docker/), [installation package](https://docs.tdengine.com/get-started/package/) or [Kubernetes](https://docs.tdengine.com/deployment/k8s/). This quick guide only applies to installing from source. TDengine provide a few useful tools such as taosBenchmark (was named taosdemo) and taosdump. They were part of TDengine. By default, TDengine compiling does not include taosTools. You can use `cmake .. -DBUILD_TOOLS=true` to make them be compiled with TDengine. @@ -306,14 +306,14 @@ Query OK, 2 row(s) in set (0.001700s) TDengine provides abundant developing tools for users to develop on TDengine. Follow the links below to find your desired connectors and relevant documentation. -- [Java](https://docs.taosdata.com/reference/connector/java/) -- [C/C++](https://docs.taosdata.com/reference/connector/cpp/) -- [Python](https://docs.taosdata.com/reference/connector/python/) -- [Go](https://docs.taosdata.com/reference/connector/go/) -- [Node.js](https://docs.taosdata.com/reference/connector/node/) -- [Rust](https://docs.taosdata.com/reference/connector/rust/) -- [C#](https://docs.taosdata.com/reference/connector/csharp/) -- [RESTful API](https://docs.taosdata.com/reference/rest-api/) +- [Java](https://docs.tdengine.com/reference/connector/java/) +- [C/C++](https://docs.tdengine.com/reference/connector/cpp/) +- [Python](https://docs.tdengine.com/reference/connector/python/) +- [Go](https://docs.tdengine.com/reference/connector/go/) +- [Node.js](https://docs.tdengine.com/reference/connector/node/) +- [Rust](https://docs.tdengine.com/reference/connector/rust/) +- [C#](https://docs.tdengine.com/reference/connector/csharp/) +- [RESTful API](https://docs.tdengine.com/reference/rest-api/) # Contribute to TDengine From dc6baa13a62e87095275116a31cedbb5128bb53e Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Thu, 25 Aug 2022 09:07:44 +0800 Subject: [PATCH 45/79] fix: fill if window pseudo column --- source/libs/executor/src/tfill.c | 56 +++++++++++++++++++++----------- 1 file changed, 37 insertions(+), 19 deletions(-) diff --git a/source/libs/executor/src/tfill.c b/source/libs/executor/src/tfill.c index 6d25a49847..f23552c5a7 100644 --- a/source/libs/executor/src/tfill.c +++ b/source/libs/executor/src/tfill.c @@ -36,6 +36,7 @@ #define GET_DEST_SLOT_ID(_p) ((_p)->pExpr->base.resSchema.slotId) static void doSetVal(SColumnInfoData* pDstColInfoData, int32_t rowIndex, const SGroupKeys* pKey); +static bool fillIfWindowPseudoColumn(SFillInfo* pFillInfo, SFillColInfo* pCol, SColumnInfoData* pDstColInfoData, int32_t rowIndex); static void setNullRow(SSDataBlock* pBlock, SFillInfo* pFillInfo, int32_t rowIndex) { for(int32_t i = 0; i < pFillInfo->numOfCols; ++i) { @@ -43,9 +44,8 @@ static void setNullRow(SSDataBlock* pBlock, SFillInfo* pFillInfo, int32_t rowInd int32_t dstSlotId = GET_DEST_SLOT_ID(pCol); SColumnInfoData* pDstColInfo = taosArrayGet(pBlock->pDataBlock, dstSlotId); if (pCol->notFillCol) { - if (pDstColInfo->info.type == TSDB_DATA_TYPE_TIMESTAMP) { - colDataAppend(pDstColInfo, rowIndex, (const char*)&pFillInfo->currentKey, false); - } else { + bool filled = fillIfWindowPseudoColumn(pFillInfo, pCol, pDstColInfo, rowIndex); + if (!filled) { SArray* p = FILL_IS_ASC_FILL(pFillInfo) ? pFillInfo->prev.pRowVal : pFillInfo->next.pRowVal; SGroupKeys* pKey = taosArrayGet(p, i); doSetVal(pDstColInfo, rowIndex, pKey); @@ -76,8 +76,32 @@ static void doSetUserSpecifiedValue(SColumnInfoData* pDst, SVariant* pVar, int32 } } -static bool fillWindowPseudoColumn(SFillInfo* pFillInfo, SFillColInfo* pCol, SColumnInfoData* pDstColInfoData, int32_t rowIndex) { - //fill windows pseduo column, _wstart, _wend, _wduration and return true +//fill windows pseudo column, _wstart, _wend, _wduration and return true, otherwise return false +static bool fillIfWindowPseudoColumn(SFillInfo* pFillInfo, SFillColInfo* pCol, SColumnInfoData* pDstColInfoData, int32_t rowIndex) { + if (!pCol->notFillCol) { + return false; + } + if (pCol->pExpr->pExpr->nodeType == QUERY_NODE_COLUMN) { + if (pCol->pExpr->base.numOfParams != 1) { + return false; + } + if (pCol->pExpr->base.pParam[0].pCol->colType == COLUMN_TYPE_WINDOW_START) { + colDataAppend(pDstColInfoData, rowIndex, (const char*)&pFillInfo->currentKey, false); + return true; + } else if (pCol->pExpr->base.pParam[0].pCol->colType == COLUMN_TYPE_WINDOW_END) { + //TODO: include endpoint + SInterval* pInterval = &pFillInfo->interval; + int32_t step = (pFillInfo->order == TSDB_ORDER_ASC) ? 1 : -1; + int64_t windowEnd = + taosTimeAdd(pFillInfo->currentKey, pInterval->sliding * step, pInterval->slidingUnit, pInterval->precision); + colDataAppend(pDstColInfoData, rowIndex, (const char*)&windowEnd, false); + return true; + } else if (pCol->pExpr->base.pParam[0].pCol->colType == COLUMN_TYPE_WINDOW_DURATION) { + //TODO: include endpoint + colDataAppend(pDstColInfoData, rowIndex, (const char*)&pFillInfo->interval.sliding, false); + return true; + } + } return false; } @@ -97,10 +121,8 @@ static void doFillOneRow(SFillInfo* pFillInfo, SSDataBlock* pBlock, SSDataBlock* SFillColInfo* pCol = &pFillInfo->pFillCol[i]; SColumnInfoData* pDstColInfoData = taosArrayGet(pBlock->pDataBlock, GET_DEST_SLOT_ID(pCol)); - - if (pDstColInfoData->info.type == TSDB_DATA_TYPE_TIMESTAMP) { - colDataAppend(pDstColInfoData, index, (const char*)&pFillInfo->currentKey, false); - } else { + bool filled = fillIfWindowPseudoColumn(pFillInfo, pCol, pDstColInfoData, index); + if (!filled) { SGroupKeys* pKey = taosArrayGet(p, i); doSetVal(pDstColInfoData, index, pKey); } @@ -111,10 +133,8 @@ static void doFillOneRow(SFillInfo* pFillInfo, SSDataBlock* pBlock, SSDataBlock* for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) { SFillColInfo* pCol = &pFillInfo->pFillCol[i]; SColumnInfoData* pDstColInfoData = taosArrayGet(pBlock->pDataBlock, GET_DEST_SLOT_ID(pCol)); - - if (pDstColInfoData->info.type == TSDB_DATA_TYPE_TIMESTAMP) { - colDataAppend(pDstColInfoData, index, (const char*)&pFillInfo->currentKey, false); - } else { + bool filled = fillIfWindowPseudoColumn(pFillInfo, pCol, pDstColInfoData, index); + if (!filled) { SGroupKeys* pKey = taosArrayGet(p, i); doSetVal(pDstColInfoData, index, pKey); } @@ -132,9 +152,8 @@ static void doFillOneRow(SFillInfo* pFillInfo, SSDataBlock* pBlock, SSDataBlock* int16_t type = pDstCol->info.type; if (pCol->notFillCol) { - if (type == TSDB_DATA_TYPE_TIMESTAMP) { - colDataAppend(pDstCol, index, (const char*)&pFillInfo->currentKey, false); - } else { + bool filled = fillIfWindowPseudoColumn(pFillInfo, pCol, pDstCol, index); + if (!filled) { SArray* p = FILL_IS_ASC_FILL(pFillInfo) ? pFillInfo->prev.pRowVal : pFillInfo->next.pRowVal; SGroupKeys* pKey = taosArrayGet(p, i); doSetVal(pDstCol, index, pKey); @@ -175,9 +194,8 @@ static void doFillOneRow(SFillInfo* pFillInfo, SSDataBlock* pBlock, SSDataBlock* SColumnInfoData* pDst = taosArrayGet(pBlock->pDataBlock, slotId); if (pCol->notFillCol) { - if (pDst->info.type == TSDB_DATA_TYPE_TIMESTAMP) { - colDataAppend(pDst, index, (const char*)&pFillInfo->currentKey, false); - } else { + bool filled = fillIfWindowPseudoColumn(pFillInfo, pCol, pDst, index); + if (!filled) { SArray* p = FILL_IS_ASC_FILL(pFillInfo) ? pFillInfo->prev.pRowVal : pFillInfo->next.pRowVal; SGroupKeys* pKey = taosArrayGet(p, i); doSetVal(pDst, index, pKey); From 0eabbd238b907bbf5c091e08e5857b596726fdfa Mon Sep 17 00:00:00 2001 From: wade zhang <95411902+gccgdb1234@users.noreply.github.com> Date: Thu, 25 Aug 2022 09:29:53 +0800 Subject: [PATCH 46/79] Update 24-show.md --- docs/zh/12-taos-sql/24-show.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/12-taos-sql/24-show.md b/docs/zh/12-taos-sql/24-show.md index 781f94324c..75efd5f514 100644 --- a/docs/zh/12-taos-sql/24-show.md +++ b/docs/zh/12-taos-sql/24-show.md @@ -3,7 +3,7 @@ sidebar_label: SHOW 命令 title: 使用 SHOW 命令查看系统元数据 --- -除了使用 `select` 语句查询 `INFORMATION_SCHEMA` 数据库中的表获得系统中的各种元数据、系统信息和状态之外,也可以用 `SHOW` 命令来实现同样的目的。 +SHOW 命令可以用来获取简要的系统信息。若想获取系统中详细的各种元数据、系统信息和状态,请使用 select 语句查询 INFORMATION_SCHEMA 数据库中的表。 ## SHOW ACCOUNTS From e971af427c837d22bf9344362d2c2aecdacd12cb Mon Sep 17 00:00:00 2001 From: wade zhang <95411902+gccgdb1234@users.noreply.github.com> Date: Thu, 25 Aug 2022 09:32:16 +0800 Subject: [PATCH 47/79] Update 24-show.md --- docs/en/12-taos-sql/24-show.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/12-taos-sql/24-show.md b/docs/en/12-taos-sql/24-show.md index 96503c9598..6b56161322 100644 --- a/docs/en/12-taos-sql/24-show.md +++ b/docs/en/12-taos-sql/24-show.md @@ -3,7 +3,7 @@ sidebar_label: SHOW Statement title: SHOW Statement for Metadata --- -In addition to running SELECT statements on INFORMATION_SCHEMA, you can also use SHOW to obtain system metadata, information, and status. +`SHOW` command can be used to get brief system information. To get details about metatadata, information, and status in the system, please use `select` to query the tables in database `INFORMATION_SCHEMA`. ## SHOW ACCOUNTS From b49d4ed414d33d4e182baebdf3f9d7a809a4ee70 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Thu, 25 Aug 2022 10:40:15 +0800 Subject: [PATCH 48/79] fix: remove debug info --- source/libs/scheduler/src/schRemote.c | 2 +- source/util/test/hashTest.cpp | 41 +++++++++++++++++++++++---- 2 files changed, 37 insertions(+), 6 deletions(-) diff --git a/source/libs/scheduler/src/schRemote.c b/source/libs/scheduler/src/schRemote.c index fc2a8d1e08..5a64aaaebb 100644 --- a/source/libs/scheduler/src/schRemote.c +++ b/source/libs/scheduler/src/schRemote.c @@ -1117,7 +1117,7 @@ int32_t schBuildAndSendMsg(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr, break; } -#if 0 +#if 1 SSchTrans trans = {.pTrans = pJob->conn.pTrans, .pHandle = SCH_GET_TASK_HANDLE(pTask)}; code = schAsyncSendMsg(pJob, pTask, &trans, addr, msgType, msg, msgSize, persistHandle, (rpcCtx.args ? &rpcCtx : NULL)); msg = NULL; diff --git a/source/util/test/hashTest.cpp b/source/util/test/hashTest.cpp index 5a426f9317..135db8192a 100644 --- a/source/util/test/hashTest.cpp +++ b/source/util/test/hashTest.cpp @@ -295,15 +295,44 @@ void perfTest() { int64_t end100m = taosGetTimestampMs(); int64_t end100mCt = taosHashGetCompTimes(hash100m); + + SArray *sArray[1000] = {0}; + for (int64_t i = 0; i < 1000; ++i) { + sArray[i] = taosArrayInit(100000, 9); + } + int64_t cap = 4; + while (cap < 100000000) cap = (cap << 1u); + + _hash_fn_t hashFp = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); + int32_t slotR = cap / 1000 + 1; + for (int64_t i = 0; i < 10000000; ++i) { + char* p = name + (i % 50000000) * 9; + uint32_t v = (*hashFp)(p, 9); + taosArrayPush(sArray[(v%cap)/slotR], p); + } + SArray *slArray = taosArrayInit(100000000, 9); + for (int64_t i = 0; i < 1000; ++i) { + int32_t num = taosArrayGetSize(sArray[i]); + printf("%d ", num); + SArray* pArray = sArray[i]; + for (int64_t m = 0; m < num; ++m) { + char* p = (char*)taosArrayGet(pArray, m); + ASSERT(taosArrayPush(slArray, p)); + } + } + printf("\n"); int64_t start100mS = taosGetTimestampMs(); int64_t start100mSCt = taosHashGetCompTimes(hash100m); - _hash_fn_t hashFp = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); - for (int64_t i = 0; i < 10000000; ++i) { - ASSERT(taosHashGet(hash100m, name + (i % 50000000) * 9, 9)); + int32_t num = taosArrayGetSize(slArray); + for (int64_t i = 0; i < num; ++i) { + ASSERT(taosHashGet(hash100m, (char*)TARRAY_GET_ELEM(slArray, i), 9)); } int64_t end100mS = taosGetTimestampMs(); int64_t end100mSCt = taosHashGetCompTimes(hash100m); - + for (int64_t i = 0; i < 1000; ++i) { + taosArrayDestroy(sArray[i]); + } + taosArrayDestroy(slArray); printf("1h \t %" PRId64 "ms,%" PRId64 "\n", end1h - start1h, end1hCt - start1hCt); printf("1s \t %" PRId64 "ms,%" PRId64 "\n", end1s - start1s, end1sCt - start1sCt); @@ -312,6 +341,7 @@ void perfTest() { printf("1m \t %" PRId64 "ms,%" PRId64 "\n", end1m - start1m, end1mCt - start1mCt); printf("10m \t %" PRId64 "ms,%" PRId64 "\n", end10m - start10m, end10mCt - start10mCt); printf("100m \t %" PRId64 "ms,%" PRId64 "\n", end100m - start100m, end100mCt - start100mCt); + printf("100mS \t %" PRId64 "ms,%" PRId64 "\n", end100mS - start100mS, end100mSCt - start100mSCt); taosHashCleanup(hash1h); taosHashCleanup(hash1s); @@ -345,7 +375,8 @@ void perfTest() { ASSERT(taosHashGet(mhash[i%1000], name + i * 9, 9)); } #else - for (int64_t i = 0; i < 10000000; ++i) { +// for (int64_t i = 0; i < 10000000; ++i) { + for (int64_t i = 0; i < 50000000; i+=5) { ASSERT(taosHashGet(mhash[i/50000], name + i * 9, 9)); } #endif From c72c972c5f2b4ffa9c7726f398a393823036e9bf Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Thu, 25 Aug 2022 10:55:14 +0800 Subject: [PATCH 49/79] fix(query): fix multiple interp used result number of rows incorrect TD-18617 --- source/libs/executor/src/timewindowoperator.c | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 551180f639..6dc91255d9 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -2112,6 +2112,7 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp // todo set the correct primary timestamp column // output the result + bool hasInterp = true; for (int32_t j = 0; j < pExprSup->numOfExprs; ++j) { SExprInfo* pExprInfo = &pExprSup->pExprInfo[j]; int32_t srcSlot = pExprInfo->base.pParam[0].pCol->slotId; @@ -2123,7 +2124,6 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp switch (pSliceInfo->fillType) { case TSDB_FILL_NULL: { colDataAppendNULL(pDst, rows); - pResBlock->info.rows += 1; break; } @@ -2143,7 +2143,6 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp GET_TYPED_DATA(v, int64_t, pVar->nType, &pVar->i); colDataAppend(pDst, rows, (char*)&v, false); } - pResBlock->info.rows += 1; break; } @@ -2157,6 +2156,7 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp // before interp range, do not fill if (start.key == INT64_MIN || end.key == INT64_MAX) { + hasInterp = false; break; } @@ -2168,28 +2168,27 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp } taosMemoryFree(current.val); - pResBlock->info.rows += 1; break; } case TSDB_FILL_PREV: { if (!pSliceInfo->isPrevRowSet) { + hasInterp = false; break; } SGroupKeys* pkey = taosArrayGet(pSliceInfo->pPrevRow, srcSlot); colDataAppend(pDst, rows, pkey->pData, false); - pResBlock->info.rows += 1; break; } case TSDB_FILL_NEXT: { if (!pSliceInfo->isNextRowSet) { + hasInterp = false; break; } SGroupKeys* pkey = taosArrayGet(pSliceInfo->pNextRow, srcSlot); colDataAppend(pDst, rows, pkey->pData, false); - pResBlock->info.rows += 1; break; } @@ -2198,6 +2197,11 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp break; } } + + if (hasInterp) { + pResBlock->info.rows += 1; + } + } static int32_t initPrevRowsKeeper(STimeSliceOperatorInfo* pInfo, SSDataBlock* pBlock) { @@ -2378,6 +2382,11 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) { SColumnInfoData* pSrc = taosArrayGet(pBlock->pDataBlock, srcSlot); SColumnInfoData* pDst = taosArrayGet(pResBlock->pDataBlock, dstSlot); + if (colDataIsNull_s(pSrc, i)) { + colDataAppendNULL(pDst, pResBlock->info.rows); + continue; + } + char* v = colDataGetData(pSrc, i); colDataAppend(pDst, pResBlock->info.rows, v, false); } From 909529bb7bf69de94a2d9740c7a6da9677051f4a Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 25 Aug 2022 10:55:28 +0800 Subject: [PATCH 50/79] fix(query): check return value and do some internal refactor. --- source/libs/executor/inc/executorimpl.h | 2 +- source/libs/executor/src/cachescanoperator.c | 5 +- source/libs/executor/src/executorimpl.c | 22 ++-- source/libs/executor/src/groupoperator.c | 20 +++- source/libs/executor/src/joinoperator.c | 5 +- source/libs/executor/src/projectoperator.c | 22 ++-- source/libs/executor/src/scanoperator.c | 16 ++- source/libs/executor/src/sortoperator.c | 8 +- source/libs/executor/src/timewindowoperator.c | 103 +++++++++++------- 9 files changed, 119 insertions(+), 84 deletions(-) diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index fb4eac991f..73f7781c04 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -122,7 +122,7 @@ typedef int32_t (*__optr_decode_fn_t)(struct SOperatorInfo* pOperator, char* res typedef int32_t (*__optr_open_fn_t)(struct SOperatorInfo* pOptr); typedef SSDataBlock* (*__optr_fn_t)(struct SOperatorInfo* pOptr); -typedef void (*__optr_close_fn_t)(void* param, int32_t num); +typedef void (*__optr_close_fn_t)(void* param); typedef int32_t (*__optr_explain_fn_t)(struct SOperatorInfo* pOptr, void** pOptrExplain, uint32_t* len); typedef struct STaskIdInfo { diff --git a/source/libs/executor/src/cachescanoperator.c b/source/libs/executor/src/cachescanoperator.c index 94e4384b30..b31fa279e5 100644 --- a/source/libs/executor/src/cachescanoperator.c +++ b/source/libs/executor/src/cachescanoperator.c @@ -24,10 +24,9 @@ #include "tcompare.h" #include "thash.h" #include "ttypes.h" -#include "executorInt.h" static SSDataBlock* doScanLastrow(SOperatorInfo* pOperator); -static void destroyLastrowScanOperator(void* param, int32_t numOfOutput); +static void destroyLastrowScanOperator(void* param); static int32_t extractTargetSlotId(const SArray* pColMatchInfo, SExecTaskInfo* pTaskInfo, int32_t** pSlotIds); SOperatorInfo* createLastrowScanOperator(SLastRowScanPhysiNode* pScanNode, SReadHandle* readHandle, SExecTaskInfo* pTaskInfo) { @@ -211,7 +210,7 @@ SSDataBlock* doScanLastrow(SOperatorInfo* pOperator) { } } -void destroyLastrowScanOperator(void* param, int32_t numOfOutput) { +void destroyLastrowScanOperator(void* param) { SLastrowScanInfo* pInfo = (SLastrowScanInfo*)param; blockDataDestroy(pInfo->pRes); taosMemoryFreeClear(param); diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 6aaa2eb0c7..0b2b7d0220 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -90,13 +90,13 @@ static void setBlockSMAInfo(SqlFunctionCtx* pCtx, SExprInfo* pExpr, SSDataBlock* static void releaseQueryBuf(size_t numOfTables); -static void destroyFillOperatorInfo(void* param, int32_t numOfOutput); -static void destroyProjectOperatorInfo(void* param, int32_t numOfOutput); -static void destroyOrderOperatorInfo(void* param, int32_t numOfOutput); -static void destroyAggOperatorInfo(void* param, int32_t numOfOutput); +static void destroyFillOperatorInfo(void* param); +static void destroyProjectOperatorInfo(void* param); +static void destroyOrderOperatorInfo(void* param); +static void destroyAggOperatorInfo(void* param); -static void destroyIntervalOperatorInfo(void* param, int32_t numOfOutput); -static void destroyExchangeOperatorInfo(void* param, int32_t numOfOutput); +static void destroyIntervalOperatorInfo(void* param); +static void destroyExchangeOperatorInfo(void* param); static void destroyOperatorInfo(SOperatorInfo* pOperator); @@ -3424,7 +3424,7 @@ static void destroyOperatorInfo(SOperatorInfo* pOperator) { } if (pOperator->fpSet.closeFn != NULL) { - pOperator->fpSet.closeFn(pOperator->info, pOperator->exprSupp.numOfExprs); + pOperator->fpSet.closeFn(pOperator->info); } if (pOperator->pDownstream != NULL) { @@ -3616,7 +3616,7 @@ SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SExprInfo* return pOperator; _error: - destroyAggOperatorInfo(pInfo, numOfCols); + destroyAggOperatorInfo(pInfo); taosMemoryFreeClear(pOperator); pTaskInfo->code = TSDB_CODE_OUT_OF_MEMORY; return NULL; @@ -3641,7 +3641,7 @@ static void freeItem(void* pItem) { } } -void destroyAggOperatorInfo(void* param, int32_t numOfOutput) { +void destroyAggOperatorInfo(void* param) { SAggOperatorInfo* pInfo = (SAggOperatorInfo*)param; cleanupBasicInfo(&pInfo->binfo); @@ -3651,7 +3651,7 @@ void destroyAggOperatorInfo(void* param, int32_t numOfOutput) { taosMemoryFreeClear(param); } -void destroyFillOperatorInfo(void* param, int32_t numOfOutput) { +void destroyFillOperatorInfo(void* param) { SFillOperatorInfo* pInfo = (SFillOperatorInfo*)param; pInfo->pFillInfo = taosDestroyFillInfo(pInfo->pFillInfo); pInfo->pRes = blockDataDestroy(pInfo->pRes); @@ -3667,7 +3667,7 @@ void destroyFillOperatorInfo(void* param, int32_t numOfOutput) { taosMemoryFreeClear(param); } -void destroyExchangeOperatorInfo(void* param, int32_t numOfOutput) { +void destroyExchangeOperatorInfo(void* param) { SExchangeInfo* pExInfo = (SExchangeInfo*)param; taosRemoveRef(exchangeObjRefPool, pExInfo->self); } diff --git a/source/libs/executor/src/groupoperator.c b/source/libs/executor/src/groupoperator.c index ab2326ecae..53709c7dcc 100644 --- a/source/libs/executor/src/groupoperator.c +++ b/source/libs/executor/src/groupoperator.c @@ -36,8 +36,12 @@ static void freeGroupKey(void* param) { taosMemoryFree(pKey->pData); } -static void destroyGroupOperatorInfo(void* param, int32_t numOfOutput) { +static void destroyGroupOperatorInfo(void* param) { SGroupbyOperatorInfo* pInfo = (SGroupbyOperatorInfo*)param; + if (pInfo == NULL) { + return; + } + cleanupBasicInfo(&pInfo->binfo); taosMemoryFreeClear(pInfo->keyBuf); taosArrayDestroy(pInfo->pGroupCols); @@ -413,7 +417,11 @@ SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SExprInfo* pEx } initResultSizeInfo(&pOperator->resultInfo, 4096); - initAggInfo(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, numOfCols, pInfo->groupKeyLen, pTaskInfo->id.str); + code = initAggInfo(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, numOfCols, pInfo->groupKeyLen, pTaskInfo->id.str); + if (code != TSDB_CODE_SUCCESS) { + goto _error; + } + initBasicInfo(&pInfo->binfo, pResultBlock); initResultRowInfo(&pInfo->binfo.resultRowInfo); @@ -426,11 +434,15 @@ SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SExprInfo* pEx pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, hashGroupbyAggregate, NULL, NULL, destroyGroupOperatorInfo, aggEncodeResultRow, aggDecodeResultRow, NULL); code = appendDownstream(pOperator, &downstream, 1); + if (code != TSDB_CODE_SUCCESS) { + goto _error; + } + return pOperator; _error: pTaskInfo->code = TSDB_CODE_OUT_OF_MEMORY; - taosMemoryFreeClear(pInfo); + destroyGroupOperatorInfo(pInfo); taosMemoryFreeClear(pOperator); return NULL; } @@ -710,7 +722,7 @@ static SSDataBlock* hashPartition(SOperatorInfo* pOperator) { return buildPartitionResult(pOperator); } -static void destroyPartitionOperatorInfo(void* param, int32_t numOfOutput) { +static void destroyPartitionOperatorInfo(void* param) { SPartitionOperatorInfo* pInfo = (SPartitionOperatorInfo*)param; cleanupBasicInfo(&pInfo->binfo); taosArrayDestroy(pInfo->pGroupCols); diff --git a/source/libs/executor/src/joinoperator.c b/source/libs/executor/src/joinoperator.c index 7d2b84d0f0..1bc7d458e0 100644 --- a/source/libs/executor/src/joinoperator.c +++ b/source/libs/executor/src/joinoperator.c @@ -25,7 +25,7 @@ static void setJoinColumnInfo(SColumnInfo* pColumn, const SColumnNode* pColumnNode); static SSDataBlock* doMergeJoin(struct SOperatorInfo* pOperator); -static void destroyMergeJoinOperator(void* param, int32_t numOfOutput); +static void destroyMergeJoinOperator(void* param); static void extractTimeCondition(SJoinOperatorInfo* pInfo, SOperatorInfo** pDownstream, int32_t numOfDownstream, SSortMergeJoinPhysiNode* pJoinNode); @@ -128,12 +128,11 @@ void setJoinColumnInfo(SColumnInfo* pColumn, const SColumnNode* pColumnNode) { pColumn->scale = pColumnNode->node.resType.scale; } -void destroyMergeJoinOperator(void* param, int32_t numOfOutput) { +void destroyMergeJoinOperator(void* param) { SJoinOperatorInfo* pJoinOperator = (SJoinOperatorInfo*)param; nodesDestroyNode(pJoinOperator->pCondAfterMerge); pJoinOperator->pRes = blockDataDestroy(pJoinOperator->pRes); - taosMemoryFreeClear(param); } diff --git a/source/libs/executor/src/projectoperator.c b/source/libs/executor/src/projectoperator.c index b0ca219d52..0661ccd390 100644 --- a/source/libs/executor/src/projectoperator.c +++ b/source/libs/executor/src/projectoperator.c @@ -23,7 +23,7 @@ static SArray* setRowTsColumnOutputInfo(SqlFunctionCtx* pCtx, int32_t numOf static void setFunctionResultOutput(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo, SAggSupporter* pSup, int32_t stage, int32_t numOfExprs); -static void destroyProjectOperatorInfo(void* param, int32_t numOfOutput) { +static void destroyProjectOperatorInfo(void* param) { if (NULL == param) { return; } @@ -37,10 +37,13 @@ static void destroyProjectOperatorInfo(void* param, int32_t numOfOutput) { taosMemoryFreeClear(param); } -static void destroyIndefinitOperatorInfo(void* param, int32_t numOfOutput) { +static void destroyIndefinitOperatorInfo(void* param) { SIndefOperatorInfo* pInfo = (SIndefOperatorInfo*)param; - cleanupBasicInfo(&pInfo->binfo); + if (pInfo == NULL) { + return; + } + cleanupBasicInfo(&pInfo->binfo); taosArrayDestroy(pInfo->pPseudoColInfo); cleanupAggSup(&pInfo->aggSup); cleanupExprSupp(&pInfo->scalarSup); @@ -112,7 +115,7 @@ SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhys return pOperator; _error: - destroyProjectOperatorInfo(pInfo, numOfCols); + destroyProjectOperatorInfo(pInfo); taosMemoryFree(pOperator); pTaskInfo->code = code; return NULL; @@ -371,9 +374,12 @@ SOperatorInfo* createIndefinitOutputOperatorInfo(SOperatorInfo* downstream, SPhy initResultSizeInfo(&pOperator->resultInfo, numOfRows); - initAggInfo(pSup, &pInfo->aggSup, pExprInfo, numOfExpr, keyBufSize, pTaskInfo->id.str); - initBasicInfo(&pInfo->binfo, pResBlock); + int32_t code = initAggInfo(pSup, &pInfo->aggSup, pExprInfo, numOfExpr, keyBufSize, pTaskInfo->id.str); + if (code != TSDB_CODE_SUCCESS) { + goto _error; + } + initBasicInfo(&pInfo->binfo, pResBlock); setFunctionResultOutput(pOperator, &pInfo->binfo, &pInfo->aggSup, MAIN_SCAN, numOfExpr); pInfo->binfo.pRes = pResBlock; @@ -389,7 +395,7 @@ SOperatorInfo* createIndefinitOutputOperatorInfo(SOperatorInfo* downstream, SPhy pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doApplyIndefinitFunction, NULL, NULL, destroyIndefinitOperatorInfo, NULL, NULL, NULL); - int32_t code = appendDownstream(pOperator, &downstream, 1); + code = appendDownstream(pOperator, &downstream, 1); if (code != TSDB_CODE_SUCCESS) { goto _error; } @@ -397,7 +403,7 @@ SOperatorInfo* createIndefinitOutputOperatorInfo(SOperatorInfo* downstream, SPhy return pOperator; _error: - taosMemoryFree(pInfo); + destroyIndefinitOperatorInfo(pInfo); taosMemoryFree(pOperator); pTaskInfo->code = TSDB_CODE_OUT_OF_MEMORY; return NULL; diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index ec902588e3..c9b8d5a377 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -689,7 +689,7 @@ static int32_t getTableScannerExecInfo(struct SOperatorInfo* pOptr, void** pOptr return 0; } -static void destroyTableScanOperatorInfo(void* param, int32_t numOfOutput) { +static void destroyTableScanOperatorInfo(void* param) { STableScanInfo* pTableScanInfo = (STableScanInfo*)param; blockDataDestroy(pTableScanInfo->pResBlock); cleanupQueryTableDataCond(&pTableScanInfo->cond); @@ -863,7 +863,7 @@ static SSDataBlock* doBlockInfoScan(SOperatorInfo* pOperator) { return pBlock; } -static void destroyBlockDistScanOperatorInfo(void* param, int32_t numOfOutput) { +static void destroyBlockDistScanOperatorInfo(void* param) { SBlockDistInfo* pDistInfo = (SBlockDistInfo*)param; blockDataDestroy(pDistInfo->pResBlock); tsdbReaderClose(pDistInfo->pHandle); @@ -1532,11 +1532,11 @@ SOperatorInfo* createRawScanOperatorInfo(SReadHandle* pHandle, STableScanPhysiNo return NULL; } -static void destroyStreamScanOperatorInfo(void* param, int32_t numOfOutput) { +static void destroyStreamScanOperatorInfo(void* param) { SStreamScanInfo* pStreamScan = (SStreamScanInfo*)param; if (pStreamScan->pTableScanOp && pStreamScan->pTableScanOp->info) { STableScanInfo* pTableScanInfo = pStreamScan->pTableScanOp->info; - destroyTableScanOperatorInfo(pTableScanInfo, numOfOutput); + destroyTableScanOperatorInfo(pTableScanInfo); taosMemoryFreeClear(pStreamScan->pTableScanOp); } if (pStreamScan->tqReader) { @@ -1692,7 +1692,7 @@ _error: return NULL; } -static void destroySysScanOperator(void* param, int32_t numOfOutput) { +static void destroySysScanOperator(void* param) { SSysTableScanInfo* pInfo = (SSysTableScanInfo*)param; tsem_destroy(&pInfo->ready); blockDataDestroy(pInfo->pRes); @@ -2577,12 +2577,10 @@ static SSDataBlock* doTagScan(SOperatorInfo* pOperator) { return (pRes->info.rows == 0) ? NULL : pInfo->pRes; } -static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput) { +static void destroyTagScanOperatorInfo(void* param) { STagScanInfo* pInfo = (STagScanInfo*)param; pInfo->pRes = blockDataDestroy(pInfo->pRes); - taosArrayDestroy(pInfo->pColMatchInfo); - taosMemoryFreeClear(param); } @@ -3044,7 +3042,7 @@ SSDataBlock* doTableMergeScan(SOperatorInfo* pOperator) { return pBlock; } -void destroyTableMergeScanOperatorInfo(void* param, int32_t numOfOutput) { +void destroyTableMergeScanOperatorInfo(void* param) { STableMergeScanInfo* pTableScanInfo = (STableMergeScanInfo*)param; cleanupQueryTableDataCond(&pTableScanInfo->cond); taosArrayDestroy(pTableScanInfo->sortSourceParams); diff --git a/source/libs/executor/src/sortoperator.c b/source/libs/executor/src/sortoperator.c index dbaba98914..e2014ec973 100644 --- a/source/libs/executor/src/sortoperator.c +++ b/source/libs/executor/src/sortoperator.c @@ -20,7 +20,7 @@ static SSDataBlock* doSort(SOperatorInfo* pOperator); static int32_t doOpenSortOperator(SOperatorInfo* pOperator); static int32_t getExplainExecInfo(SOperatorInfo* pOptr, void** pOptrExplain, uint32_t* len); -static void destroyOrderOperatorInfo(void* param, int32_t numOfOutput); +static void destroyOrderOperatorInfo(void* param); // todo add limit/offset impl SOperatorInfo* createSortOperatorInfo(SOperatorInfo* downstream, SSortPhysiNode* pSortNode, SExecTaskInfo* pTaskInfo) { @@ -250,7 +250,7 @@ SSDataBlock* doSort(SOperatorInfo* pOperator) { return blockDataGetNumOfRows(pBlock) > 0 ? pBlock : NULL; } -void destroyOrderOperatorInfo(void* param, int32_t numOfOutput) { +void destroyOrderOperatorInfo(void* param) { SSortOperatorInfo* pInfo = (SSortOperatorInfo*)param; pInfo->binfo.pRes = blockDataDestroy(pInfo->binfo.pRes); @@ -468,7 +468,7 @@ int32_t getGroupSortExplainExecInfo(SOperatorInfo* pOptr, void** pOptrExplain, u return TSDB_CODE_SUCCESS; } -void destroyGroupSortOperatorInfo(void* param, int32_t numOfOutput) { +void destroyGroupSortOperatorInfo(void* param) { SGroupSortOperatorInfo* pInfo = (SGroupSortOperatorInfo*)param; pInfo->binfo.pRes = blockDataDestroy(pInfo->binfo.pRes); @@ -685,7 +685,7 @@ SSDataBlock* doMultiwayMerge(SOperatorInfo* pOperator) { return pBlock; } -void destroyMultiwayMergeOperatorInfo(void* param, int32_t numOfOutput) { +void destroyMultiwayMergeOperatorInfo(void* param) { SMultiwayMergeOperatorInfo* pInfo = (SMultiwayMergeOperatorInfo*)param; pInfo->binfo.pRes = blockDataDestroy(pInfo->binfo.pRes); pInfo->pInputBlock = blockDataDestroy(pInfo->pInputBlock); diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 3769c57bf3..1ef191679e 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -1664,7 +1664,7 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { return pInfo->binfo.pRes->info.rows == 0 ? NULL : pInfo->binfo.pRes; } -static void destroyStateWindowOperatorInfo(void* param, int32_t numOfOutput) { +static void destroyStateWindowOperatorInfo(void* param) { SStateWindowOperatorInfo* pInfo = (SStateWindowOperatorInfo*)param; cleanupBasicInfo(&pInfo->binfo); taosMemoryFreeClear(pInfo->stateKey.pData); @@ -1677,7 +1677,7 @@ static void freeItem(void* param) { taosMemoryFree(pKey->pData); } -void destroyIntervalOperatorInfo(void* param, int32_t numOfOutput) { +void destroyIntervalOperatorInfo(void* param) { SIntervalAggOperatorInfo* pInfo = (SIntervalAggOperatorInfo*)param; cleanupBasicInfo(&pInfo->binfo); cleanupAggSup(&pInfo->aggSup); @@ -1694,7 +1694,7 @@ void destroyIntervalOperatorInfo(void* param, int32_t numOfOutput) { taosMemoryFreeClear(param); } -void destroyStreamFinalIntervalOperatorInfo(void* param, int32_t numOfOutput) { +void destroyStreamFinalIntervalOperatorInfo(void* param) { SStreamFinalIntervalOperatorInfo* pInfo = (SStreamFinalIntervalOperatorInfo*)param; cleanupBasicInfo(&pInfo->binfo); cleanupAggSup(&pInfo->aggSup); @@ -1711,7 +1711,7 @@ void destroyStreamFinalIntervalOperatorInfo(void* param, int32_t numOfOutput) { int32_t size = taosArrayGetSize(pInfo->pChildren); for (int32_t i = 0; i < size; i++) { SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, i); - destroyStreamFinalIntervalOperatorInfo(pChildOp->info, numOfOutput); + destroyStreamFinalIntervalOperatorInfo(pChildOp->info); taosMemoryFree(pChildOp->pDownstream); cleanupExprSupp(&pChildOp->exprSupp); taosMemoryFreeClear(pChildOp); @@ -1830,6 +1830,10 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* initResultSizeInfo(&pOperator->resultInfo, 4096); int32_t code = initAggInfo(pSup, &pInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str); + if (code != TSDB_CODE_SUCCESS) { + goto _error; + } + initBasicInfo(&pInfo->binfo, pResBlock); if (isStream) { @@ -1849,6 +1853,7 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* goto _error; } } + pInfo->pRecycledPages = taosArrayInit(4, sizeof(int32_t)); pInfo->pDelWins = taosArrayInit(4, sizeof(SWinRes)); pInfo->delIndex = 0; @@ -1878,7 +1883,7 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* return pOperator; _error: - destroyIntervalOperatorInfo(pInfo, numOfCols); + destroyIntervalOperatorInfo(pInfo); taosMemoryFreeClear(pOperator); pTaskInfo->code = code; return NULL; @@ -2563,7 +2568,7 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) { return pResBlock->info.rows == 0 ? NULL : pResBlock; } -void destroyTimeSliceOperatorInfo(void* param, int32_t numOfOutput) { +void destroyTimeSliceOperatorInfo(void* param) { STimeSliceOperatorInfo* pInfo = (STimeSliceOperatorInfo*)param; pInfo->pRes = blockDataDestroy(pInfo->pRes); @@ -2671,7 +2676,11 @@ SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SExprInf size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES; initResultSizeInfo(&pOperator->resultInfo, 4096); - initAggInfo(&pOperator->exprSupp, &pInfo->aggSup, pExpr, numOfCols, keyBufSize, pTaskInfo->id.str); + int32_t code = initAggInfo(&pOperator->exprSupp, &pInfo->aggSup, pExpr, numOfCols, keyBufSize, pTaskInfo->id.str); + if (code != TSDB_CODE_SUCCESS) { + goto _error; + } + initBasicInfo(&pInfo->binfo, pResBlock); initResultRowInfo(&pInfo->binfo.resultRowInfo); @@ -2692,18 +2701,27 @@ SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SExprInf pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doStateWindowAgg, NULL, NULL, destroyStateWindowOperatorInfo, aggEncodeResultRow, aggDecodeResultRow, NULL); - int32_t code = appendDownstream(pOperator, &downstream, 1); + code = appendDownstream(pOperator, &downstream, 1); + if (code != TSDB_CODE_SUCCESS) { + goto _error; + } + return pOperator; _error: - pTaskInfo->code = TSDB_CODE_SUCCESS; + destroyStateWindowOperatorInfo(pInfo); + taosMemoryFreeClear(pOperator); + pTaskInfo->code = code; return NULL; } -void destroySWindowOperatorInfo(void* param, int32_t numOfOutput) { +void destroySWindowOperatorInfo(void* param) { SSessionAggOperatorInfo* pInfo = (SSessionAggOperatorInfo*)param; - cleanupBasicInfo(&pInfo->binfo); + if (pInfo == NULL) { + return; + } + cleanupBasicInfo(&pInfo->binfo); colDataDestroy(&pInfo->twAggSup.timeWindowData); cleanupAggSup(&pInfo->aggSup); @@ -2757,15 +2775,15 @@ SOperatorInfo* createSessionAggOperatorInfo(SOperatorInfo* downstream, SSessionW pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doSessionWindowAgg, NULL, NULL, destroySWindowOperatorInfo, aggEncodeResultRow, aggDecodeResultRow, NULL); pOperator->pTaskInfo = pTaskInfo; - code = appendDownstream(pOperator, &downstream, 1); + if (code != TSDB_CODE_SUCCESS) { + goto _error; + } + return pOperator; _error: - if (pInfo != NULL) { - destroySWindowOperatorInfo(pInfo, numOfCols); - } - + destroySWindowOperatorInfo(pInfo); taosMemoryFreeClear(pOperator); pTaskInfo->code = code; return NULL; @@ -3328,14 +3346,16 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, SSDataBlock* pResBlock = createResDataBlock(pPhyNode->pOutputDataBlockDesc); int32_t code = initAggInfo(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str); + if (code != TSDB_CODE_SUCCESS) { + goto _error; + } + initBasicInfo(&pInfo->binfo, pResBlock); ASSERT(numOfCols > 0); increaseTs(pOperator->exprSupp.pCtx); initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window); - if (code != TSDB_CODE_SUCCESS) { - goto _error; - } + initResultRowInfo(&pInfo->binfo.resultRowInfo); pInfo->pChildren = NULL; if (numOfChild > 0) { @@ -3401,7 +3421,7 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, return pOperator; _error: - destroyStreamFinalIntervalOperatorInfo(pInfo, numOfCols); + destroyStreamFinalIntervalOperatorInfo(pInfo); taosMemoryFreeClear(pOperator); pTaskInfo->code = code; return NULL; @@ -3439,7 +3459,7 @@ void destroyStateStreamAggSupporter(SStreamAggSupporter* pSup) { blockDataDestroy(pSup->pScanBlock); } -void destroyStreamSessionAggOperatorInfo(void* param, int32_t numOfOutput) { +void destroyStreamSessionAggOperatorInfo(void* param) { SStreamSessionAggOperatorInfo* pInfo = (SStreamSessionAggOperatorInfo*)param; cleanupBasicInfo(&pInfo->binfo); destroyStreamAggSupporter(&pInfo->streamAggSup); @@ -3449,7 +3469,7 @@ void destroyStreamSessionAggOperatorInfo(void* param, int32_t numOfOutput) { for (int32_t i = 0; i < size; i++) { SOperatorInfo* pChild = taosArrayGetP(pInfo->pChildren, i); SStreamSessionAggOperatorInfo* pChInfo = pChild->info; - destroyStreamSessionAggOperatorInfo(pChInfo, numOfOutput); + destroyStreamSessionAggOperatorInfo(pChInfo); taosMemoryFreeClear(pChild); } } @@ -3519,7 +3539,7 @@ SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPh if (pSessionNode->window.pExprs != NULL) { int32_t numOfScalar = 0; SExprInfo* pScalarExprInfo = createExprInfo(pSessionNode->window.pExprs, NULL, &numOfScalar); - int32_t code = initExprSupp(&pInfo->scalarSupp, pScalarExprInfo, numOfScalar); + code = initExprSupp(&pInfo->scalarSupp, pScalarExprInfo, numOfScalar); if (code != TSDB_CODE_SUCCESS) { goto _error; } @@ -3583,7 +3603,7 @@ SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPh _error: if (pInfo != NULL) { - destroyStreamSessionAggOperatorInfo(pInfo, numOfCols); + destroyStreamSessionAggOperatorInfo(pInfo); } taosMemoryFreeClear(pOperator); @@ -4411,7 +4431,7 @@ SOperatorInfo* createStreamFinalSessionAggOperatorInfo(SOperatorInfo* downstream _error: if (pInfo != NULL) { - destroyStreamSessionAggOperatorInfo(pInfo, pOperator->exprSupp.numOfExprs); + destroyStreamSessionAggOperatorInfo(pInfo); } taosMemoryFreeClear(pOperator); @@ -4419,7 +4439,7 @@ _error: return NULL; } -void destroyStreamStateOperatorInfo(void* param, int32_t numOfOutput) { +void destroyStreamStateOperatorInfo(void* param) { SStreamStateAggOperatorInfo* pInfo = (SStreamStateAggOperatorInfo*)param; cleanupBasicInfo(&pInfo->binfo); destroyStateStreamAggSupporter(&pInfo->streamAggSup); @@ -4429,7 +4449,7 @@ void destroyStreamStateOperatorInfo(void* param, int32_t numOfOutput) { for (int32_t i = 0; i < size; i++) { SOperatorInfo* pChild = taosArrayGetP(pInfo->pChildren, i); SStreamSessionAggOperatorInfo* pChInfo = pChild->info; - destroyStreamSessionAggOperatorInfo(pChInfo, numOfOutput); + destroyStreamSessionAggOperatorInfo(pChInfo); taosMemoryFreeClear(pChild); taosMemoryFreeClear(pChInfo); } @@ -4849,16 +4869,15 @@ SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhys return pOperator; _error: - destroyStreamStateOperatorInfo(pInfo, numOfCols); + destroyStreamStateOperatorInfo(pInfo); taosMemoryFreeClear(pOperator); pTaskInfo->code = code; return NULL; } -void destroyMergeAlignedIntervalOperatorInfo(void* param, int32_t numOfOutput) { +void destroyMergeAlignedIntervalOperatorInfo(void* param) { SMergeAlignedIntervalAggOperatorInfo* miaInfo = (SMergeAlignedIntervalAggOperatorInfo*)param; - destroyIntervalOperatorInfo(miaInfo->intervalAggOperatorInfo, numOfOutput); - + destroyIntervalOperatorInfo(miaInfo->intervalAggOperatorInfo); taosMemoryFreeClear(param); } @@ -5086,8 +5105,11 @@ SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream, int32_t code = initAggInfo(&pOperator->exprSupp, &iaInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str); - initBasicInfo(&iaInfo->binfo, pResBlock); + if (code != TSDB_CODE_SUCCESS) { + goto _error; + } + initBasicInfo(&iaInfo->binfo, pResBlock); initExecTimeWindowInfo(&iaInfo->twAggSup.timeWindowData, &iaInfo->win); iaInfo->timeWindowInterpo = timeWindowinterpNeeded(pSup->pCtx, numOfCols, iaInfo); @@ -5095,10 +5117,6 @@ SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream, iaInfo->binfo.resultRowInfo.openWindow = tdListNew(sizeof(SResultRowPosition)); } - if (code != TSDB_CODE_SUCCESS) { - goto _error; - } - initResultRowInfo(&iaInfo->binfo.resultRowInfo); blockDataEnsureCapacity(iaInfo->binfo.pRes, pOperator->resultInfo.capacity); @@ -5122,7 +5140,7 @@ SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream, return pOperator; _error: - destroyMergeAlignedIntervalOperatorInfo(miaInfo, numOfCols); + destroyMergeAlignedIntervalOperatorInfo(miaInfo); taosMemoryFreeClear(pOperator); pTaskInfo->code = code; return NULL; @@ -5145,10 +5163,10 @@ typedef struct SGroupTimeWindow { STimeWindow window; } SGroupTimeWindow; -void destroyMergeIntervalOperatorInfo(void* param, int32_t numOfOutput) { +void destroyMergeIntervalOperatorInfo(void* param) { SMergeIntervalAggOperatorInfo* miaInfo = (SMergeIntervalAggOperatorInfo*)param; tdListFree(miaInfo->groupIntervals); - destroyIntervalOperatorInfo(&miaInfo->intervalAggOperatorInfo, numOfOutput); + destroyIntervalOperatorInfo(&miaInfo->intervalAggOperatorInfo); taosMemoryFreeClear(param); } @@ -5392,8 +5410,11 @@ SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SExprI initResultSizeInfo(&pOperator->resultInfo, 4096); int32_t code = initAggInfo(pExprSupp, &iaInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str); - initBasicInfo(&iaInfo->binfo, pResBlock); + if (code != TSDB_CODE_SUCCESS) { + goto _error; + } + initBasicInfo(&iaInfo->binfo, pResBlock); initExecTimeWindowInfo(&iaInfo->twAggSup.timeWindowData, &iaInfo->win); iaInfo->timeWindowInterpo = timeWindowinterpNeeded(pExprSupp->pCtx, numOfCols, iaInfo); @@ -5426,7 +5447,7 @@ SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SExprI return pOperator; _error: - destroyMergeIntervalOperatorInfo(miaInfo, numOfCols); + destroyMergeIntervalOperatorInfo(miaInfo); taosMemoryFreeClear(pOperator); pTaskInfo->code = code; return NULL; From c254546e3b12182a821e91c0854d0e66cb4847b0 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Thu, 25 Aug 2022 10:56:29 +0800 Subject: [PATCH 51/79] add test cases --- tests/system-test/2-query/interp.py | 35 ++++++++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/tests/system-test/2-query/interp.py b/tests/system-test/2-query/interp.py index 934ba9e161..0fe86e44eb 100644 --- a/tests/system-test/2-query/interp.py +++ b/tests/system-test/2-query/interp.py @@ -551,7 +551,40 @@ class TDTestCase: tdSql.checkData(0, 0, 15) tdSql.checkData(1, 0, 15) - tdLog.printNoPrefix("==========step9:test error cases") + tdLog.printNoPrefix("==========step9:test multi-interp cases") + tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-12 00:00:05') every(1d) fill(null)") + tdSql.checkRows(3) + tdSql.checkCols(4) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, 15) + tdSql.checkData(2, 0, None) + + tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-12 00:00:05') every(1d) fill(value, 1)") + tdSql.checkRows(3) + tdSql.checkCols(4) + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 0, 15) + tdSql.checkData(2, 0, 1) + + tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-12 00:00:05') every(1d) fill(prev)") + tdSql.checkRows(3) + tdSql.checkCols(4) + tdSql.checkData(0, 0, 5) + tdSql.checkData(1, 0, 15) + tdSql.checkData(2, 0, 15) + + tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-12 00:00:05') every(1d) fill(next)") + tdSql.checkRows(2) + tdSql.checkCols(4) + tdSql.checkData(0, 0, 15) + tdSql.checkData(1, 0, 15) + + tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-12 00:00:05') every(1d) fill(linear)") + tdSql.checkRows(1) + tdSql.checkCols(4) + tdSql.checkData(0, 0, 15) + + tdLog.printNoPrefix("==========step10:test error cases") tdSql.error(f"select interp(c0) from {dbname}.{tbname}") tdSql.error(f"select interp(c0) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05')") From 1c062060008959256327cea3fdba9bd7a9b5d38c Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 25 Aug 2022 11:01:36 +0800 Subject: [PATCH 52/79] other:merge 3.0 --- CONTRIBUTING-CN.md | 58 ++ Jenkinsfile2 | 61 ++ README-CN.md | 4 +- README.md | 27 +- docs/zh/12-taos-sql/14-stream.md | 135 ---- examples/JDBC/JDBCDemo/pom.xml | 2 +- examples/JDBC/SpringJdbcTemplate/pom.xml | 2 +- examples/JDBC/SpringJdbcTemplate/readme.md | 4 +- .../taosdata/example/jdbcTemplate/App.java | 2 +- .../jdbcTemplate/BatcherInsertTest.java | 2 +- examples/JDBC/connectionPools/README-cn.md | 6 +- examples/JDBC/connectionPools/pom.xml | 2 +- examples/JDBC/mybatisplus-demo/pom.xml | 2 +- examples/JDBC/mybatisplus-demo/readme | 14 + .../mybatisplusdemo/mapper/WeatherMapper.java | 10 + .../src/main/resources/application.yml | 2 +- .../mapper/TemperatureMapperTest.java | 18 +- .../mapper/WeatherMapperTest.java | 31 +- examples/JDBC/readme.md | 2 +- examples/JDBC/springbootdemo/pom.xml | 2 +- examples/JDBC/springbootdemo/readme.md | 3 +- .../controller/WeatherController.java | 1 - .../springbootdemo/dao/WeatherMapper.xml | 3 +- .../src/main/resources/application.properties | 2 +- examples/JDBC/taosdemo/pom.xml | 2 +- examples/JDBC/taosdemo/readme.md | 4 +- .../taosdemo/TaosDemoApplication.java | 23 +- .../taosdemo/service/QueryService.java | 6 - .../taosdata/taosdemo/utils/SqlSpeller.java | 57 +- .../src/main/resources/application.properties | 4 +- .../taosdemo/service/TableServiceTest.java | 31 - examples/c/stream_demo.c | 8 +- include/common/tcommon.h | 26 +- include/common/tglobal.h | 2 + include/common/tmsg.h | 25 - include/common/tmsgdef.h | 3 +- include/libs/executor/executor.h | 9 +- include/libs/function/function.h | 1 + include/libs/nodes/querynodes.h | 5 +- include/libs/stream/tstream.h | 45 +- include/util/taoserror.h | 8 +- include/util/tdef.h | 2 +- include/util/tqueue.h | 1 + packaging/release.bat | 18 +- source/client/inc/clientInt.h | 7 +- source/client/inc/clientLog.h | 1 + source/client/src/clientEnv.c | 17 +- source/client/src/clientImpl.c | 5 + source/client/src/clientMain.c | 12 + source/common/src/systable.c | 3 +- source/common/src/tglobal.c | 11 +- source/dnode/mnode/impl/inc/mndCluster.h | 1 + source/dnode/mnode/impl/inc/mndDef.h | 1 + source/dnode/mnode/impl/src/mndCluster.c | 113 +++- source/dnode/mnode/impl/src/mndMain.c | 22 +- source/dnode/mnode/impl/src/mndStb.c | 2 + source/dnode/mnode/impl/src/mndSync.c | 10 +- source/dnode/mnode/impl/src/mndTelem.c | 4 +- source/dnode/mnode/impl/src/mndTrans.c | 2 +- .../dnode/mnode/impl/test/sma/CMakeLists.txt | 10 +- .../dnode/mnode/impl/test/stb/CMakeLists.txt | 10 +- source/dnode/vnode/src/inc/sma.h | 27 +- source/dnode/vnode/src/inc/vnodeInt.h | 2 - source/dnode/vnode/src/sma/smaCommit.c | 79 ++- source/dnode/vnode/src/sma/smaEnv.c | 120 +++- source/dnode/vnode/src/sma/smaRollup.c | 590 +++++++----------- source/dnode/vnode/src/sma/smaUtil.c | 3 + source/dnode/vnode/src/tq/tq.c | 15 + source/dnode/vnode/src/tsdb/tsdbCache.c | 18 +- source/dnode/vnode/src/vnd/vnodeCommit.c | 27 +- source/dnode/vnode/src/vnd/vnodeSvr.c | 8 +- source/libs/catalog/src/catalog.c | 2 +- source/libs/command/inc/commandInt.h | 1 - source/libs/command/src/explain.c | 13 +- source/libs/executor/inc/executil.h | 1 + source/libs/executor/inc/executorimpl.h | 3 +- source/libs/executor/src/executil.c | 359 +++++++++-- source/libs/executor/src/executor.c | 3 + source/libs/executor/src/executorimpl.c | 92 ++- source/libs/executor/src/scanoperator.c | 52 +- source/libs/executor/src/tfill.c | 59 +- source/libs/executor/src/timewindowoperator.c | 228 ++++--- source/libs/function/src/builtinsimpl.c | 15 +- source/libs/index/src/indexFilter.c | 7 + source/libs/nodes/src/nodesUtilFuncs.c | 1 + source/libs/parser/src/parInsert.c | 46 +- source/libs/parser/src/parTranslater.c | 23 +- source/libs/parser/src/parUtil.c | 10 + source/libs/planner/src/planLogicCreater.c | 12 +- source/libs/qworker/src/qworker.c | 7 +- source/libs/scheduler/inc/schInt.h | 5 +- source/libs/scheduler/src/schTask.c | 38 +- source/libs/stream/src/streamDispatch.c | 2 +- source/libs/stream/src/streamExec.c | 1 - source/libs/stream/src/streamMeta.c | 21 +- source/libs/stream/src/streamRecover.c | 5 +- source/libs/stream/src/streamState.c | 205 ++++++ source/libs/stream/src/streamTask.c | 3 + source/libs/sync/inc/syncSnapshot.h | 25 +- source/libs/sync/src/syncMain.c | 5 + source/libs/sync/src/syncRaftCfg.c | 2 +- source/libs/sync/src/syncSnapshot.c | 13 +- source/libs/tdb/src/db/tdbBtree.c | 2 +- source/libs/tdb/src/db/tdbPCache.c | 35 +- source/libs/tdb/src/db/tdbPage.c | 3 + source/libs/transport/src/thttp.c | 7 +- source/libs/transport/src/transSvr.c | 8 +- source/libs/wal/src/walMeta.c | 3 +- source/os/src/osFile.c | 15 +- source/os/src/osSysinfo.c | 7 +- source/util/src/tcache.c | 2 +- source/util/src/tcompression.c | 4 +- source/util/src/tqueue.c | 3 +- 113 files changed, 1957 insertions(+), 1186 deletions(-) create mode 100644 CONTRIBUTING-CN.md delete mode 100644 docs/zh/12-taos-sql/14-stream.md create mode 100644 examples/JDBC/mybatisplus-demo/readme delete mode 100644 examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/TableServiceTest.java create mode 100644 source/libs/stream/src/streamState.c diff --git a/CONTRIBUTING-CN.md b/CONTRIBUTING-CN.md new file mode 100644 index 0000000000..19f3000d45 --- /dev/null +++ b/CONTRIBUTING-CN.md @@ -0,0 +1,58 @@ +# 贡献指南 + +我们感谢所有开发者提交贡献。随时关注我们,Fork 存储库,报告错误,以及在 GitHub 上提交您的代码。但是,我们希望开发者遵循我们的指南,才能更好的做出贡献。 + +## 报告错误 + +- 任何用户都可以通过 **[GitHub issue tracker](https://github.com/taosdata/TDengine/issues)** 向我们报告错误。请您对所遇到的问题进行**详细描述**,最好提供重现错误的详细步骤。 +- 欢迎提供包含由 Bug 生成的日志文件的附录。 + +## 需要强调的代码提交规则 + +- 在提交代码之前,需要**同意贡献者许可协议(CLA)**。点击 [TaosData CLA](https://cla-assistant.io/taosdata/TDengine) 阅读并签署协议。如果您不接受该协议,请停止提交。 +- 请在 [GitHub issue tracker](https://github.com/taosdata/TDengine/issues) 中解决问题或添加注册功能。 +- 如果在 [GitHub issue tracker](https://github.com/taosdata/TDengine/issues) 中没有找到相应的问题或功能,请**创建一个新的 issue**。 +- 将代码提交到我们的存储库时,请创建**包含问题编号的 PR**。 + +## 贡献指南 + +1. 请用友好的语气书写。 + +2. **主动语态**总体上优于被动语态。主动语态中的句子会突出执行动作的人,而不是被动语态突出动作的接受者。 + +3. 文档写作建议 + +- 正确拼写产品名称 “TDengine”。 “TD” 用大写字母,“TD” 和 “engine” 之间没有空格 **(正确拼写:TDengine)**。 +- 在句号或其他标点符号后只留一个空格。 + +4. 尽量**使用简单句**,而不是复杂句。 + +## 给贡献者的礼品 + +只要您是为 TDengine 做贡献的开发者,不管是代码贡献、修复 bug 或功能请求,还是文档更改,您都将会获得一份**特别的贡献者纪念品礼物**! + +

+ + + + +TDengine 社区致力于让更多的开发者理解和使用它。 +请填写**贡献者提交表**以选择您想收到的礼物。 + +- [贡献者提交表](https://page.ma.scrmtech.com/form/index?pf_uid=27715_2095&id=12100) + +## 联系我们 + +如果您有什么问题需要解决,或者有什么问题需要解答,可以添加微信:TDengineECO diff --git a/Jenkinsfile2 b/Jenkinsfile2 index 12e806c87a..d7df07f06a 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -1,6 +1,7 @@ import hudson.model.Result import hudson.model.*; import jenkins.model.CauseOfInterruption +docs_only=0 node { } @@ -29,6 +30,49 @@ def abort_previous(){ if (buildNumber > 1) milestone(buildNumber - 1) milestone(buildNumber) } +def check_docs() { + if (env.CHANGE_URL =~ /\/TDengine\//) { + sh ''' + hostname + date + env + ''' + sh ''' + cd ${WKC} + git reset --hard + git clean -fxd + rm -rf examples/rust/ + git remote prune origin + git fetch + ''' + script { + sh ''' + cd ${WKC} + git checkout ''' + env.CHANGE_TARGET + ''' + ''' + } + sh ''' + cd ${WKC} + git remote prune origin + git pull >/dev/null + git fetch origin +refs/pull/${CHANGE_ID}/merge + git checkout -qf FETCH_HEAD + ''' + def file_changed = sh ( + script: ''' + cd ${WKC} + git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep -v "^docs/en/"|grep -v "^docs/zh/" || : + ''', + returnStdout: true + ).trim() + if (file_changed == '') { + echo "docs PR" + docs_only=1 + } else { + echo file_changed + } + } +} def pre_test(){ sh ''' hostname @@ -307,10 +351,27 @@ pipeline { WKPY = '/var/lib/jenkins/workspace/taos-connector-python' } stages { + stage('check') { + when { + allOf { + not { expression { env.CHANGE_BRANCH =~ /docs\// }} + not { expression { env.CHANGE_URL =~ /\/TDinternal\// }} + } + } + parallel { + stage('check docs') { + agent{label " worker03 || slave215 || slave217 || slave219 || Mac_catalina "} + steps { + check_docs() + } + } + } + } stage('run test') { when { allOf { not { expression { env.CHANGE_BRANCH =~ /docs\// }} + expression { docs_only == 0 } } } parallel { diff --git a/README-CN.md b/README-CN.md index 6bfab379fe..e30e38ae78 100644 --- a/README-CN.md +++ b/README-CN.md @@ -210,14 +210,14 @@ cmake .. -G "NMake Makefiles" nmake ``` -### macOS 系统 + # 安装 diff --git a/README.md b/README.md index 6baabed7be..02dd9984e8 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,6 @@ [![Coverage Status](https://coveralls.io/repos/github/taosdata/TDengine/badge.svg?branch=develop)](https://coveralls.io/github/taosdata/TDengine?branch=develop) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/4201/badge)](https://bestpractices.coreinfrastructure.org/projects/4201) - English | [简体中文](README-CN.md) | We are hiring, check [here](https://tdengine.com/careers) # What is TDengine? @@ -42,7 +41,7 @@ For user manual, system design and architecture, please refer to [TDengine Docum At the moment, TDengine server supports running on Linux, Windows systems.Any OS application can also choose the RESTful interface of taosAdapter to connect the taosd service . TDengine supports X64/ARM64 CPU , and it will support MIPS64, Alpha64, ARM32, RISC-V and other CPU architectures in the future. -You can choose to install through source code according to your needs, [container](https://docs.taosdata.com/get-started/docker/), [installation package](https://docs.taosdata.com/get-started/package/) or [Kubenetes](https://docs.taosdata.com/deployment/k8s/) to install. This quick guide only applies to installing from source. +You can choose to install through source code according to your needs, [container](https://docs.taosdata.com/get-started/docker/), [installation package](https://docs.taosdata.com/get-started/package/) or [Kubernetes](https://docs.taosdata.com/deployment/k8s/) to install. This quick guide only applies to installing from source. TDengine provide a few useful tools such as taosBenchmark (was named taosdemo) and taosdump. They were part of TDengine. By default, TDengine compiling does not include taosTools. You can use `cmake .. -DBUILD_TOOLS=true` to make them be compiled with TDengine. @@ -58,7 +57,6 @@ sudo apt-get install -y gcc cmake build-essential git libssl-dev #### Install build dependencies for taosTools - To build the [taosTools](https://github.com/taosdata/taos-tools) on Ubuntu/Debian, the following packages need to be installed. ```bash @@ -82,14 +80,13 @@ sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel #### Install build dependencies for taosTools on CentOS - #### CentOS 7.9 ``` sudo yum install -y zlib-devel xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libstdc++-static openssl-devel ``` -#### CentOS 8/Rocky Linux +#### CentOS 8/Rocky Linux ``` sudo yum install -y epel-release @@ -100,14 +97,14 @@ sudo yum install -y zlib-devel xz-devel snappy-devel jansson jansson-devel pkgco Note: Since snappy lacks pkg-config support (refer to [link](https://github.com/google/snappy/pull/86)), it leads a cmake prompt libsnappy not found. But snappy still works well. -If the powertools installation fails, you can try to use: +If the PowerTools installation fails, you can try to use: + ``` -sudo yum config-manager --set-enabled Powertools +sudo yum config-manager --set-enabled powertools ``` ### Setup golang environment - TDengine includes a few components like taosAdapter developed by Go language. Please refer to golang.org official documentation for golang environment setup. Please use version 1.14+. For the user in China, we recommend using a proxy to accelerate package downloading. @@ -125,7 +122,7 @@ cmake .. -DBUILD_HTTP=false ### Setup rust environment -TDengine includes a few compoments developed by Rust language. Please refer to rust-lang.org official documentation for rust environment setup. +TDengine includes a few components developed by Rust language. Please refer to rust-lang.org official documentation for rust environment setup. ## Get the source codes @@ -136,7 +133,6 @@ git clone https://github.com/taosdata/TDengine.git cd TDengine ``` - You can modify the file ~/.gitconfig to use ssh protocol instead of https for better download speed. You will need to upload ssh public key to GitHub first. Please refer to GitHub official documentation for detail. ``` @@ -146,14 +142,12 @@ You can modify the file ~/.gitconfig to use ssh protocol instead of https for be ## Special Note - [JDBC Connector](https://github.com/taosdata/taos-connector-jdbc), [Go Connector](https://github.com/taosdata/driver-go),[Python Connector](https://github.com/taosdata/taos-connector-python),[Node.js Connector](https://github.com/taosdata/taos-connector-node),[C# Connector](https://github.com/taosdata/taos-connector-dotnet) ,[Rust Connector](https://github.com/taosdata/taos-connector-rust) and [Grafana plugin](https://github.com/taosdata/grafanaplugin) has been moved to standalone repository. ## Build TDengine ### On Linux platform - You can run the bash script `build.sh` to build both TDengine and taosTools including taosBenchmark and taosdump as below: ```bash @@ -169,7 +163,6 @@ cmake .. -DBUILD_TOOLS=true make ``` - You can use Jemalloc as memory allocator instead of glibc: ``` @@ -218,14 +211,14 @@ cmake .. -G "NMake Makefiles" nmake ``` -### On macOS platform + # Installing @@ -237,7 +230,7 @@ After building successfully, TDengine can be installed by sudo make install ``` -Users can find more information about directories installed on the system in the [directory and files](https://docs.taosdata.com/reference/directory/) section. +Users can find more information about directories installed on the system in the [directory and files](https://docs.taosdata.com/reference/directory/) section. Installing from source code will also configure service management for TDengine.Users can also choose to [install from packages](https://docs.taosdata.com/get-started/package/) for it. @@ -309,7 +302,7 @@ Query OK, 2 row(s) in set (0.001700s) ## Official Connectors -TDengine provides abundant developing tools for users to develop on TDengine. include C/C++、Java、Python、Go、Node.js、C# 、RESTful ,Follow the links below to find your desired connectors and relevant documentation. +TDengine provides abundant developing tools for users to develop on TDengine. Follow the links below to find your desired connectors and relevant documentation. - [Java](https://docs.taosdata.com/reference/connector/java/) - [C/C++](https://docs.taosdata.com/reference/connector/cpp/) diff --git a/docs/zh/12-taos-sql/14-stream.md b/docs/zh/12-taos-sql/14-stream.md deleted file mode 100644 index 28f52be59a..0000000000 --- a/docs/zh/12-taos-sql/14-stream.md +++ /dev/null @@ -1,135 +0,0 @@ ---- -sidebar_label: 流式计算 -title: 流式计算 ---- - - -## 创建流式计算 - -```sql -CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name AS subquery -stream_options: { - TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time] - WATERMARK time -} - -``` - -其中 subquery 是 select 普通查询语法的子集: - -```sql -subquery: SELECT select_list - from_clause - [WHERE condition] - [PARTITION BY tag_list] - [window_clause] -``` - -支持会话窗口、状态窗口与滑动窗口,其中,会话窗口与状态窗口搭配超级表时必须与partition by tbname一起使用 - -```sql -window_clause: { - SESSION(ts_col, tol_val) - | STATE_WINDOW(col) - | INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] -} -``` - -其中,SESSION 是会话窗口,tol_val 是时间间隔的最大范围。在 tol_val 时间间隔范围内的数据都属于同一个窗口,如果连续的两条数据的时间超过 tol_val,则自动开启下一个窗口。 - -窗口的定义与时序数据特色查询中的定义完全相同,详见 [TDengine 特色查询](../distinguished) - -例如,如下语句创建流式计算,同时自动创建名为 avg_vol 的超级表,此流计算以一分钟为时间窗口、30 秒为前向增量统计这些电表的平均电压,并将来自 meters 表的数据的计算结果写入 avg_vol 表,不同 partition 的数据会分别创建子表并写入不同子表。 - -```sql -CREATE STREAM avg_vol_s INTO avg_vol AS -SELECT _wstartts, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVAL(1m) SLIDING(30s); -``` - -## 流式计算的 partition - -可以使用 PARTITION BY TBNAME 或 PARTITION BY tag,对一个流进行多分区的计算,每个分区的时间线与时间窗口是独立的,会各自聚合,并写入到目的表中的不同子表。 - -不带 PARTITION BY 选项时,所有的数据将写入到一张子表。 - -流式计算创建的超级表有唯一的 tag 列 groupId,每个 partition 会被分配唯一 groupId。与 schemaless 写入一致,我们通过 MD5 计算子表名,并自动创建它。 - -## 删除流式计算 - -```sql -DROP STREAM [IF NOT EXISTS] stream_name; -``` - -仅删除流式计算任务,由流式计算写入的数据不会被删除。 - -## 展示流式计算 - -```sql -SHOW STREAMS; -``` - -若要展示更详细的信息,可以使用: - -```sql -SELECT * from performance_schema.`perf_streams`; -``` - -## 流式计算的触发模式 - -在创建流时,可以通过 TRIGGER 指令指定流式计算的触发模式。 - -对于非窗口计算,流式计算的触发是实时的;对于窗口计算,目前提供 3 种触发模式: - -1. AT_ONCE:写入立即触发 - -2. WINDOW_CLOSE:窗口关闭时触发(窗口关闭由事件时间决定,可配合 watermark 使用) - -3. MAX_DELAY time:若窗口关闭,则触发计算。若窗口未关闭,且未关闭时长超过 max delay 指定的时间,则触发计算。 - -由于窗口关闭是由事件时间决定的,如事件流中断、或持续延迟,则事件时间无法更新,可能导致无法得到最新的计算结果。 - -因此,流式计算提供了以事件时间结合处理时间计算的 MAX_DELAY 触发模式。 - -MAX_DELAY 模式在窗口关闭时会立即触发计算。此外,当数据写入后,计算触发的时间超过 max delay 指定的时间,则立即触发计算 - -## 流式计算的窗口关闭 - -流式计算以事件时间(插入记录中的时间戳主键)为基准计算窗口关闭,而非以 TDengine 服务器的时间,以事件时间为基准,可以避免客户端与服务器时间不一致带来的问题,能够解决乱序数据写入等等问题。流式计算还提供了 watermark 来定义容忍的乱序程度。 - -在创建流时,可以在 stream_option 中指定 watermark,它定义了数据乱序的容忍上界。 - -流式计算通过 watermark 来度量对乱序数据的容忍程度,watermark 默认为 0。 - -T = 最新事件时间 - watermark - -每次写入的数据都会以上述公式更新窗口关闭时间,并将窗口结束时间 < T 的所有打开的窗口关闭,若触发模式为 WINDOW_CLOSE 或 MAX_DELAY,则推送窗口聚合结果。 - - -![TDengine 流式计算窗口关闭示意图](./watermark.webp) - - -图中,纵轴表示不同时刻,对于不同时刻,我们画出其对应的 TDengine 收到的数据,即为横轴。 - -横轴上的数据点表示已经收到的数据,其中蓝色的点表示事件时间(即数据中的时间戳主键)最后的数据,该数据点减去定义的 watermark 时间,得到乱序容忍的上界 T。 - -所有结束时间小于 T 的窗口都将被关闭(图中以灰色方框标记)。 - -T2 时刻,乱序数据(黄色的点)到达 TDengine,由于有 watermark 的存在,这些数据进入的窗口并未被关闭,因此可以被正确处理。 - -T3 时刻,最新事件到达,T 向后推移超过了第二个窗口关闭的时间,该窗口被关闭,乱序数据被正确处理。 - -在 window_close 或 max_delay 模式下,窗口关闭直接影响推送结果。在 at_once 模式下,窗口关闭只与内存占用有关。 - - -## 流式计算的过期数据处理策略 - -对于已关闭的窗口,再次落入该窗口中的数据被标记为过期数据. - -TDengine 对于过期数据提供两种处理方式,由 IGNORE EXPIRED 选项指定: - -1. 重新计算,即 IGNORE EXPIRED 0:默认配置,从 TSDB 中重新查找对应窗口的所有数据并重新计算得到最新结果 - -2. 直接丢弃, 即 IGNORE EXPIRED 1:忽略过期数据 - - -无论在哪种模式下,watermark 都应该被妥善设置,来得到正确结果(直接丢弃模式)或避免频繁触发重算带来的性能开销(重新计算模式)。 diff --git a/examples/JDBC/JDBCDemo/pom.xml b/examples/JDBC/JDBCDemo/pom.xml index 8cf0356721..807ceb0f24 100644 --- a/examples/JDBC/JDBCDemo/pom.xml +++ b/examples/JDBC/JDBCDemo/pom.xml @@ -17,7 +17,7 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.34 + 3.0.0 diff --git a/examples/JDBC/SpringJdbcTemplate/pom.xml b/examples/JDBC/SpringJdbcTemplate/pom.xml index eac3dec0a9..6e4941b4f1 100644 --- a/examples/JDBC/SpringJdbcTemplate/pom.xml +++ b/examples/JDBC/SpringJdbcTemplate/pom.xml @@ -47,7 +47,7 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.18 + 3.0.0 diff --git a/examples/JDBC/SpringJdbcTemplate/readme.md b/examples/JDBC/SpringJdbcTemplate/readme.md index b70a6565f8..f59bcdbeb5 100644 --- a/examples/JDBC/SpringJdbcTemplate/readme.md +++ b/examples/JDBC/SpringJdbcTemplate/readme.md @@ -10,7 +10,7 @@ ```xml - + @@ -28,5 +28,5 @@ mvn clean package ``` 打包成功之后,进入 `target/` 目录下,执行以下命令就可运行测试: ```shell -java -jar SpringJdbcTemplate-1.0-SNAPSHOT-jar-with-dependencies.jar +java -jar target/SpringJdbcTemplate-1.0-SNAPSHOT-jar-with-dependencies.jar ``` \ No newline at end of file diff --git a/examples/JDBC/SpringJdbcTemplate/src/main/java/com/taosdata/example/jdbcTemplate/App.java b/examples/JDBC/SpringJdbcTemplate/src/main/java/com/taosdata/example/jdbcTemplate/App.java index 6942d62a83..ce26b7504a 100644 --- a/examples/JDBC/SpringJdbcTemplate/src/main/java/com/taosdata/example/jdbcTemplate/App.java +++ b/examples/JDBC/SpringJdbcTemplate/src/main/java/com/taosdata/example/jdbcTemplate/App.java @@ -28,7 +28,7 @@ public class App { //use database executor.doExecute("use test"); // create table - executor.doExecute("create table if not exists test.weather (ts timestamp, temperature int, humidity float)"); + executor.doExecute("create table if not exists test.weather (ts timestamp, temperature float, humidity int)"); WeatherDao weatherDao = ctx.getBean(WeatherDao.class); Weather weather = new Weather(new Timestamp(new Date().getTime()), random.nextFloat() * 50.0f, random.nextInt(100)); diff --git a/examples/JDBC/SpringJdbcTemplate/src/test/java/com/taosdata/example/jdbcTemplate/BatcherInsertTest.java b/examples/JDBC/SpringJdbcTemplate/src/test/java/com/taosdata/example/jdbcTemplate/BatcherInsertTest.java index 29d0f79fd4..782fcbe0eb 100644 --- a/examples/JDBC/SpringJdbcTemplate/src/test/java/com/taosdata/example/jdbcTemplate/BatcherInsertTest.java +++ b/examples/JDBC/SpringJdbcTemplate/src/test/java/com/taosdata/example/jdbcTemplate/BatcherInsertTest.java @@ -41,7 +41,7 @@ public class BatcherInsertTest { //use database executor.doExecute("use test"); // create table - executor.doExecute("create table if not exists test.weather (ts timestamp, temperature int, humidity float)"); + executor.doExecute("create table if not exists test.weather (ts timestamp, temperature float, humidity int)"); } @Test diff --git a/examples/JDBC/connectionPools/README-cn.md b/examples/JDBC/connectionPools/README-cn.md index 9b26df3c2e..6e589418b1 100644 --- a/examples/JDBC/connectionPools/README-cn.md +++ b/examples/JDBC/connectionPools/README-cn.md @@ -13,13 +13,13 @@ ConnectionPoolDemo的程序逻辑: ### 如何运行这个例子: ```shell script -mvn clean package assembly:single -java -jar target/connectionPools-1.0-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1 +mvn clean package +java -jar target/ConnectionPoolDemo-jar-with-dependencies.jar -host 127.0.0.1 ``` 使用mvn运行ConnectionPoolDemo的main方法,可以指定参数 ```shell script Usage: -java -jar target/connectionPools-1.0-SNAPSHOT-jar-with-dependencies.jar +java -jar target/ConnectionPoolDemo-jar-with-dependencies.jar -host : hostname -poolType -poolSize diff --git a/examples/JDBC/connectionPools/pom.xml b/examples/JDBC/connectionPools/pom.xml index 99a7892a25..61717cf112 100644 --- a/examples/JDBC/connectionPools/pom.xml +++ b/examples/JDBC/connectionPools/pom.xml @@ -18,7 +18,7 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.18 + 3.0.0 diff --git a/examples/JDBC/mybatisplus-demo/pom.xml b/examples/JDBC/mybatisplus-demo/pom.xml index ad6a63e800..5555145958 100644 --- a/examples/JDBC/mybatisplus-demo/pom.xml +++ b/examples/JDBC/mybatisplus-demo/pom.xml @@ -47,7 +47,7 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.18 + 3.0.0 diff --git a/examples/JDBC/mybatisplus-demo/readme b/examples/JDBC/mybatisplus-demo/readme new file mode 100644 index 0000000000..b31b6c34bf --- /dev/null +++ b/examples/JDBC/mybatisplus-demo/readme @@ -0,0 +1,14 @@ +# 使用说明 + +## 创建使用db +```shell +$ taos + +> create database mp_test +``` + +## 执行测试用例 + +```shell +$ mvn clean test +``` \ No newline at end of file diff --git a/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/mapper/WeatherMapper.java b/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/mapper/WeatherMapper.java index 6733cbded9..1f0338db34 100644 --- a/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/mapper/WeatherMapper.java +++ b/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/mapper/WeatherMapper.java @@ -2,7 +2,17 @@ package com.taosdata.example.mybatisplusdemo.mapper; import com.baomidou.mybatisplus.core.mapper.BaseMapper; import com.taosdata.example.mybatisplusdemo.domain.Weather; +import org.apache.ibatis.annotations.Insert; +import org.apache.ibatis.annotations.Update; public interface WeatherMapper extends BaseMapper { + @Update("CREATE TABLE if not exists weather(ts timestamp, temperature float, humidity int, location nchar(100))") + int createTable(); + + @Insert("insert into weather (ts, temperature, humidity, location) values(#{ts}, #{temperature}, #{humidity}, #{location})") + int insertOne(Weather one); + + @Update("drop table if exists weather") + void dropTable(); } diff --git a/examples/JDBC/mybatisplus-demo/src/main/resources/application.yml b/examples/JDBC/mybatisplus-demo/src/main/resources/application.yml index 38180c6d75..985ed1675e 100644 --- a/examples/JDBC/mybatisplus-demo/src/main/resources/application.yml +++ b/examples/JDBC/mybatisplus-demo/src/main/resources/application.yml @@ -2,7 +2,7 @@ spring: datasource: driver-class-name: com.taosdata.jdbc.TSDBDriver url: jdbc:TAOS://localhost:6030/mp_test?charset=UTF-8&locale=en_US.UTF-8&timezone=UTC-8 - user: root + username: root password: taosdata druid: diff --git a/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/TemperatureMapperTest.java b/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/TemperatureMapperTest.java index 4331d15d34..4d9dbf8d2f 100644 --- a/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/TemperatureMapperTest.java +++ b/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/TemperatureMapperTest.java @@ -82,27 +82,15 @@ public class TemperatureMapperTest { Assert.assertEquals(1, affectRows); } - /*** - * test SelectOne - * **/ - @Test - public void testSelectOne() { - QueryWrapper wrapper = new QueryWrapper<>(); - wrapper.eq("location", "beijing"); - Temperature one = mapper.selectOne(wrapper); - System.out.println(one); - Assert.assertNotNull(one); - } - /*** * test select By map * ***/ @Test public void testSelectByMap() { Map map = new HashMap<>(); - map.put("location", "beijing"); + map.put("location", "北京"); List temperatures = mapper.selectByMap(map); - Assert.assertEquals(1, temperatures.size()); + Assert.assertTrue(temperatures.size() > 1); } /*** @@ -120,7 +108,7 @@ public class TemperatureMapperTest { @Test public void testSelectCount() { int count = mapper.selectCount(null); - Assert.assertEquals(5, count); + Assert.assertEquals(10, count); } /**** diff --git a/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/WeatherMapperTest.java b/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/WeatherMapperTest.java index 1699344552..dba8abd1ed 100644 --- a/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/WeatherMapperTest.java +++ b/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/WeatherMapperTest.java @@ -6,6 +6,7 @@ import com.baomidou.mybatisplus.extension.plugins.pagination.Page; import com.taosdata.example.mybatisplusdemo.domain.Weather; import org.junit.Assert; import org.junit.Test; +import org.junit.Before; import org.junit.runner.RunWith; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.test.context.SpringBootTest; @@ -26,6 +27,18 @@ public class WeatherMapperTest { @Autowired private WeatherMapper mapper; + @Before + public void createTable(){ + mapper.dropTable(); + mapper.createTable(); + Weather one = new Weather(); + one.setTs(new Timestamp(1605024000000l)); + one.setTemperature(12.22f); + one.setLocation("望京"); + one.setHumidity(100); + mapper.insertOne(one); + } + @Test public void testSelectList() { List weathers = mapper.selectList(null); @@ -46,20 +59,20 @@ public class WeatherMapperTest { @Test public void testSelectOne() { QueryWrapper wrapper = new QueryWrapper<>(); - wrapper.eq("location", "beijing"); + wrapper.eq("location", "望京"); Weather one = mapper.selectOne(wrapper); System.out.println(one); Assert.assertEquals(12.22f, one.getTemperature(), 0.00f); - Assert.assertEquals("beijing", one.getLocation()); + Assert.assertEquals("望京", one.getLocation()); } - @Test - public void testSelectByMap() { - Map map = new HashMap<>(); - map.put("location", "beijing"); - List weathers = mapper.selectByMap(map); - Assert.assertEquals(1, weathers.size()); - } + // @Test + // public void testSelectByMap() { + // Map map = new HashMap<>(); + // map.put("location", "beijing"); + // List weathers = mapper.selectByMap(map); + // Assert.assertEquals(1, weathers.size()); + // } @Test public void testSelectObjs() { diff --git a/examples/JDBC/readme.md b/examples/JDBC/readme.md index 9a017f4fea..c7d7875308 100644 --- a/examples/JDBC/readme.md +++ b/examples/JDBC/readme.md @@ -10,4 +10,4 @@ | 6 | taosdemo | This is an internal tool for testing Our JDBC-JNI, JDBC-RESTful, RESTful interfaces | -more detail: https://www.taosdata.com/cn//documentation20/connector-java/ \ No newline at end of file +more detail: https://docs.taosdata.com/reference/connector/java/ \ No newline at end of file diff --git a/examples/JDBC/springbootdemo/pom.xml b/examples/JDBC/springbootdemo/pom.xml index 9126813b67..ee15f6013e 100644 --- a/examples/JDBC/springbootdemo/pom.xml +++ b/examples/JDBC/springbootdemo/pom.xml @@ -68,7 +68,7 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.34 + 3.0.0 diff --git a/examples/JDBC/springbootdemo/readme.md b/examples/JDBC/springbootdemo/readme.md index 67a28947d2..a3942a6a51 100644 --- a/examples/JDBC/springbootdemo/readme.md +++ b/examples/JDBC/springbootdemo/readme.md @@ -1,10 +1,11 @@ ## TDengine SpringBoot + Mybatis Demo +## 需要提前创建 test 数据库 ### 配置 application.properties ```properties # datasource config spring.datasource.driver-class-name=com.taosdata.jdbc.TSDBDriver -spring.datasource.url=jdbc:TAOS://127.0.0.1:6030/log +spring.datasource.url=jdbc:TAOS://127.0.0.1:6030/test spring.datasource.username=root spring.datasource.password=taosdata diff --git a/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/controller/WeatherController.java b/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/controller/WeatherController.java index ed720fe6c0..3ee5b597ab 100644 --- a/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/controller/WeatherController.java +++ b/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/controller/WeatherController.java @@ -6,7 +6,6 @@ import org.springframework.beans.factory.annotation.Autowired; import org.springframework.web.bind.annotation.*; import java.util.List; -import java.util.Map; @RequestMapping("/weather") @RestController diff --git a/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/dao/WeatherMapper.xml b/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/dao/WeatherMapper.xml index 91938ca24e..99d5893ec1 100644 --- a/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/dao/WeatherMapper.xml +++ b/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/dao/WeatherMapper.xml @@ -10,8 +10,7 @@ diff --git a/examples/JDBC/springbootdemo/src/main/resources/application.properties b/examples/JDBC/springbootdemo/src/main/resources/application.properties index 06daa81bbb..bf21047395 100644 --- a/examples/JDBC/springbootdemo/src/main/resources/application.properties +++ b/examples/JDBC/springbootdemo/src/main/resources/application.properties @@ -5,7 +5,7 @@ #spring.datasource.password=taosdata # datasource config - JDBC-RESTful spring.datasource.driver-class-name=com.taosdata.jdbc.rs.RestfulDriver -spring.datasource.url=jdbc:TAOS-RS://localhsot:6041/test?timezone=UTC-8&charset=UTF-8&locale=en_US.UTF-8 +spring.datasource.url=jdbc:TAOS-RS://localhost:6041/test?timezone=UTC-8&charset=UTF-8&locale=en_US.UTF-8 spring.datasource.username=root spring.datasource.password=taosdata spring.datasource.druid.initial-size=5 diff --git a/examples/JDBC/taosdemo/pom.xml b/examples/JDBC/taosdemo/pom.xml index 07fd4a3576..724ecc7407 100644 --- a/examples/JDBC/taosdemo/pom.xml +++ b/examples/JDBC/taosdemo/pom.xml @@ -67,7 +67,7 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.20 + 3.0.0 diff --git a/examples/JDBC/taosdemo/readme.md b/examples/JDBC/taosdemo/readme.md index 451fa2960a..e5f4eb132b 100644 --- a/examples/JDBC/taosdemo/readme.md +++ b/examples/JDBC/taosdemo/readme.md @@ -2,9 +2,9 @@ cd tests/examples/JDBC/taosdemo mvn clean package -Dmaven.test.skip=true # 先建表,再插入的 -java -jar target/taosdemo-2.0-jar-with-dependencies.jar -host [hostname] -database [database] -doCreateTable true -superTableSQL "create table weather(ts timestamp, f1 int) tags(t1 nchar(4))" -numOfTables 1000 -numOfRowsPerTable 100000000 -numOfThreadsForInsert 10 -numOfTablesPerSQL 10 -numOfValuesPerSQL 100 +java -jar target/taosdemo-2.0.1-jar-with-dependencies.jar -host [hostname] -database [database] -doCreateTable true -superTableSQL "create table weather(ts timestamp, f1 int) tags(t1 nchar(4))" -numOfTables 1000 -numOfRowsPerTable 100000000 -numOfThreadsForInsert 10 -numOfTablesPerSQL 10 -numOfValuesPerSQL 100 # 不建表,直接插入的 -java -jar target/taosdemo-2.0-jar-with-dependencies.jar -host [hostname] -database [database] -doCreateTable false -superTableSQL "create table weather(ts timestamp, f1 int) tags(t1 nchar(4))" -numOfTables 1000 -numOfRowsPerTable 100000000 -numOfThreadsForInsert 10 -numOfTablesPerSQL 10 -numOfValuesPerSQL 100 +java -jar target/taosdemo-2.0.1-jar-with-dependencies.jar -host [hostname] -database [database] -doCreateTable false -superTableSQL "create table weather(ts timestamp, f1 int) tags(t1 nchar(4))" -numOfTables 1000 -numOfRowsPerTable 100000000 -numOfThreadsForInsert 10 -numOfTablesPerSQL 10 -numOfValuesPerSQL 100 ``` 需求: diff --git a/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosDemoApplication.java b/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosDemoApplication.java index d4f5ff2688..6854054703 100644 --- a/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosDemoApplication.java +++ b/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosDemoApplication.java @@ -32,8 +32,10 @@ public class TaosDemoApplication { System.exit(0); } // 初始化 - final DataSource dataSource = DataSourceFactory.getInstance(config.host, config.port, config.user, config.password); - if (config.executeSql != null && !config.executeSql.isEmpty() && !config.executeSql.replaceAll("\\s", "").isEmpty()) { + final DataSource dataSource = DataSourceFactory.getInstance(config.host, config.port, config.user, + config.password); + if (config.executeSql != null && !config.executeSql.isEmpty() + && !config.executeSql.replaceAll("\\s", "").isEmpty()) { Thread task = new Thread(new SqlExecuteTask(dataSource, config.executeSql)); task.start(); try { @@ -55,7 +57,7 @@ public class TaosDemoApplication { databaseParam.put("keep", Integer.toString(config.keep)); databaseParam.put("days", Integer.toString(config.days)); databaseParam.put("replica", Integer.toString(config.replica)); - //TODO: other database parameters + // TODO: other database parameters databaseService.createDatabase(databaseParam); databaseService.useDatabase(config.database); long end = System.currentTimeMillis(); @@ -70,11 +72,13 @@ public class TaosDemoApplication { if (config.database != null && !config.database.isEmpty()) superTableMeta.setDatabase(config.database); } else if (config.numOfFields == 0) { - String sql = "create table " + config.database + "." + config.superTable + " (ts timestamp, temperature float, humidity int) tags(location nchar(64), groupId int)"; + String sql = "create table " + config.database + "." + config.superTable + + " (ts timestamp, temperature float, humidity int) tags(location nchar(64), groupId int)"; superTableMeta = SuperTableMetaGenerator.generate(sql); } else { // create super table with specified field size and tag size - superTableMeta = SuperTableMetaGenerator.generate(config.database, config.superTable, config.numOfFields, config.prefixOfFields, config.numOfTags, config.prefixOfTags); + superTableMeta = SuperTableMetaGenerator.generate(config.database, config.superTable, config.numOfFields, + config.prefixOfFields, config.numOfTags, config.prefixOfTags); } /**********************************************************************************/ // 建表 @@ -84,7 +88,8 @@ public class TaosDemoApplication { superTableService.create(superTableMeta); if (!config.autoCreateTable) { // 批量建子表 - subTableService.createSubTable(superTableMeta, config.numOfTables, config.prefixOfTable, config.numOfThreadsForCreate); + subTableService.createSubTable(superTableMeta, config.numOfTables, config.prefixOfTable, + config.numOfThreadsForCreate); } } end = System.currentTimeMillis(); @@ -93,7 +98,7 @@ public class TaosDemoApplication { // 插入 long tableSize = config.numOfTables; int threadSize = config.numOfThreadsForInsert; - long startTime = getProperStartTime(config.startTime, config.keep); + long startTime = getProperStartTime(config.startTime, config.days); if (tableSize < threadSize) threadSize = (int) tableSize; @@ -101,13 +106,13 @@ public class TaosDemoApplication { start = System.currentTimeMillis(); // multi threads to insert - int affectedRows = subTableService.insertMultiThreads(superTableMeta, threadSize, tableSize, startTime, gap, config); + int affectedRows = subTableService.insertMultiThreads(superTableMeta, threadSize, tableSize, startTime, gap, + config); end = System.currentTimeMillis(); logger.info("insert " + affectedRows + " rows, time cost: " + (end - start) + " ms"); /**********************************************************************************/ // 查询 - /**********************************************************************************/ // 删除表 if (config.dropTable) { diff --git a/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/QueryService.java b/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/QueryService.java index efabff6afe..ab0a1125d2 100644 --- a/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/QueryService.java +++ b/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/QueryService.java @@ -1,7 +1,5 @@ package com.taosdata.taosdemo.service; -import com.taosdata.jdbc.utils.SqlSyntaxValidator; - import javax.sql.DataSource; import java.sql.*; import java.util.ArrayList; @@ -23,10 +21,6 @@ public class QueryService { Boolean[] ret = new Boolean[sqls.length]; for (int i = 0; i < sqls.length; i++) { ret[i] = true; - if (!SqlSyntaxValidator.isValidForExecuteQuery(sqls[i])) { - ret[i] = false; - continue; - } try (Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement()) { stmt.executeQuery(sqls[i]); } catch (SQLException e) { diff --git a/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/SqlSpeller.java b/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/SqlSpeller.java index a60f0641d3..7651d1e318 100644 --- a/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/SqlSpeller.java +++ b/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/SqlSpeller.java @@ -15,9 +15,12 @@ public class SqlSpeller { StringBuilder sb = new StringBuilder(); sb.append("create database if not exists ").append(map.get("database")).append(" "); if (map.containsKey("keep")) - sb.append("keep ").append(map.get("keep")).append(" "); - if (map.containsKey("days")) - sb.append("days ").append(map.get("days")).append(" "); + sb.append("keep "); + if (map.containsKey("days")) { + sb.append(map.get("days")).append("d "); + } else { + sb.append(" "); + } if (map.containsKey("replica")) sb.append("replica ").append(map.get("replica")).append(" "); if (map.containsKey("cache")) @@ -29,7 +32,7 @@ public class SqlSpeller { if (map.containsKey("maxrows")) sb.append("maxrows ").append(map.get("maxrows")).append(" "); if (map.containsKey("precision")) - sb.append("precision ").append(map.get("precision")).append(" "); + sb.append("precision '").append(map.get("precision")).append("' "); if (map.containsKey("comp")) sb.append("comp ").append(map.get("comp")).append(" "); if (map.containsKey("walLevel")) @@ -46,11 +49,13 @@ public class SqlSpeller { // create table if not exists xx.xx using xx.xx tags(x,x,x) public static String createTableUsingSuperTable(SubTableMeta subTableMeta) { StringBuilder sb = new StringBuilder(); - sb.append("create table if not exists ").append(subTableMeta.getDatabase()).append(".").append(subTableMeta.getName()).append(" "); - sb.append("using ").append(subTableMeta.getDatabase()).append(".").append(subTableMeta.getSupertable()).append(" "); -// String tagStr = subTableMeta.getTags().stream().filter(Objects::nonNull) -// .map(tagValue -> tagValue.getName() + " '" + tagValue.getValue() + "' ") -// .collect(Collectors.joining(",", "(", ")")); + sb.append("create table if not exists ").append(subTableMeta.getDatabase()).append(".") + .append(subTableMeta.getName()).append(" "); + sb.append("using ").append(subTableMeta.getDatabase()).append(".").append(subTableMeta.getSupertable()) + .append(" "); + // String tagStr = subTableMeta.getTags().stream().filter(Objects::nonNull) + // .map(tagValue -> tagValue.getName() + " '" + tagValue.getValue() + "' ") + // .collect(Collectors.joining(",", "(", ")")); sb.append("tags ").append(tagValues(subTableMeta.getTags())); return sb.toString(); } @@ -63,7 +68,7 @@ public class SqlSpeller { return sb.toString(); } - //f1, f2, f3 + // f1, f2, f3 private static String fieldValues(List fields) { return IntStream.range(0, fields.size()).mapToObj(i -> { if (i == 0) { @@ -73,13 +78,13 @@ public class SqlSpeller { } }).collect(Collectors.joining(",", "(", ")")); -// return fields.stream() -// .filter(Objects::nonNull) -// .map(fieldValue -> "'" + fieldValue.getValue() + "'") -// .collect(Collectors.joining(",", "(", ")")); + // return fields.stream() + // .filter(Objects::nonNull) + // .map(fieldValue -> "'" + fieldValue.getValue() + "'") + // .collect(Collectors.joining(",", "(", ")")); } - //(f1, f2, f3),(f1, f2, f3) + // (f1, f2, f3),(f1, f2, f3) private static String rowValues(List rowValues) { return rowValues.stream().filter(Objects::nonNull) .map(rowValue -> fieldValues(rowValue.getFields())) @@ -89,8 +94,10 @@ public class SqlSpeller { // insert into xx.xxx using xx.xx tags(x,x,x) values(x,x,x),(x,x,x)... public static String insertOneTableMultiValuesUsingSuperTable(SubTableValue subTableValue) { StringBuilder sb = new StringBuilder(); - sb.append("insert into ").append(subTableValue.getDatabase()).append(".").append(subTableValue.getName()).append(" "); - sb.append("using ").append(subTableValue.getDatabase()).append(".").append(subTableValue.getSupertable()).append(" "); + sb.append("insert into ").append(subTableValue.getDatabase()).append(".").append(subTableValue.getName()) + .append(" "); + sb.append("using ").append(subTableValue.getDatabase()).append(".").append(subTableValue.getSupertable()) + .append(" "); sb.append("tags ").append(tagValues(subTableValue.getTags()) + " "); sb.append("values ").append(rowValues(subTableValue.getValues())); return sb.toString(); @@ -126,7 +133,8 @@ public class SqlSpeller { // create table if not exists xx.xx (f1 xx,f2 xx...) tags(t1 xx, t2 xx...) public static String createSuperTable(SuperTableMeta tableMetadata) { StringBuilder sb = new StringBuilder(); - sb.append("create table if not exists ").append(tableMetadata.getDatabase()).append(".").append(tableMetadata.getName()); + sb.append("create table if not exists ").append(tableMetadata.getDatabase()).append(".") + .append(tableMetadata.getName()); String fields = tableMetadata.getFields().stream() .filter(Objects::nonNull).map(field -> field.getName() + " " + field.getType() + " ") .collect(Collectors.joining(",", "(", ")")); @@ -139,10 +147,10 @@ public class SqlSpeller { return sb.toString(); } - public static String createTable(TableMeta tableMeta) { StringBuilder sb = new StringBuilder(); - sb.append("create table if not exists ").append(tableMeta.getDatabase()).append(".").append(tableMeta.getName()).append(" "); + sb.append("create table if not exists ").append(tableMeta.getDatabase()).append(".").append(tableMeta.getName()) + .append(" "); String fields = tableMeta.getFields().stream() .filter(Objects::nonNull).map(field -> field.getName() + " " + field.getType() + " ") .collect(Collectors.joining(",", "(", ")")); @@ -179,16 +187,17 @@ public class SqlSpeller { public static String insertMultiTableMultiValuesWithColumns(List tables) { StringBuilder sb = new StringBuilder(); sb.append("insert into ").append(tables.stream().filter(Objects::nonNull) - .map(table -> table.getDatabase() + "." + table.getName() + " " + columnNames(table.getColumns()) + " values " + rowValues(table.getValues())) + .map(table -> table.getDatabase() + "." + table.getName() + " " + columnNames(table.getColumns()) + + " values " + rowValues(table.getValues())) .collect(Collectors.joining(" "))); return sb.toString(); } public static String insertMultiTableMultiValues(List tables) { StringBuilder sb = new StringBuilder(); - sb.append("insert into ").append(tables.stream().filter(Objects::nonNull).map(table -> - table.getDatabase() + "." + table.getName() + " values " + rowValues(table.getValues()) - ).collect(Collectors.joining(" "))); + sb.append("insert into ").append(tables.stream().filter(Objects::nonNull) + .map(table -> table.getDatabase() + "." + table.getName() + " values " + rowValues(table.getValues())) + .collect(Collectors.joining(" "))); return sb.toString(); } } diff --git a/examples/JDBC/taosdemo/src/main/resources/application.properties b/examples/JDBC/taosdemo/src/main/resources/application.properties index 488185196f..4f550f6523 100644 --- a/examples/JDBC/taosdemo/src/main/resources/application.properties +++ b/examples/JDBC/taosdemo/src/main/resources/application.properties @@ -1,5 +1,5 @@ -jdbc.driver=com.taosdata.jdbc.rs.RestfulDriver -#jdbc.driver=com.taosdata.jdbc.TSDBDriver +# jdbc.driver=com.taosdata.jdbc.rs.RestfulDriver +jdbc.driver=com.taosdata.jdbc.TSDBDriver hikari.maximum-pool-size=20 hikari.minimum-idle=20 hikari.max-lifetime=0 \ No newline at end of file diff --git a/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/TableServiceTest.java b/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/TableServiceTest.java deleted file mode 100644 index 1f52198d68..0000000000 --- a/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/TableServiceTest.java +++ /dev/null @@ -1,31 +0,0 @@ -package com.taosdata.taosdemo.service; - -import com.taosdata.taosdemo.domain.TableMeta; -import org.junit.Before; -import org.junit.Test; - -import java.util.ArrayList; -import java.util.List; - -public class TableServiceTest { - private TableService tableService; - - private List tables; - - @Before - public void before() { - tables = new ArrayList<>(); - for (int i = 0; i < 1; i++) { - TableMeta tableMeta = new TableMeta(); - tableMeta.setDatabase("test"); - tableMeta.setName("weather" + (i + 1)); - tables.add(tableMeta); - } - } - - @Test - public void testCreate() { - tableService.create(tables); - } - -} \ No newline at end of file diff --git a/examples/c/stream_demo.c b/examples/c/stream_demo.c index 2fcf4dd62c..1c9d11b755 100644 --- a/examples/c/stream_demo.c +++ b/examples/c/stream_demo.c @@ -13,6 +13,7 @@ * along with this program. If not, see . */ +// clang-format off #include #include #include @@ -94,13 +95,8 @@ int32_t create_stream() { } taos_free_result(pRes); - /*const char* sql = "select min(k), max(k), sum(k) from tu1";*/ - /*const char* sql = "select min(k), max(k), sum(k) as sum_of_k from st1";*/ - /*const char* sql = "select sum(k) from tu1 interval(10m)";*/ - /*pRes = tmq_create_stream(pConn, "stream1", "out1", sql);*/ pRes = taos_query(pConn, - "create stream stream1 trigger max_delay 10s watermark 10s into outstb as select _wstart start, " - "count(k) from st1 partition by tbname interval(20s) "); + "create stream stream1 trigger at_once watermark 10s into outstb as select _wstart start, avg(k) from st1 partition by tbname interval(10s)"); if (taos_errno(pRes) != 0) { printf("failed to create stream stream1, reason:%s\n", taos_errstr(pRes)); return -1; diff --git a/include/common/tcommon.h b/include/common/tcommon.h index dbe020f7ec..a071516fbf 100644 --- a/include/common/tcommon.h +++ b/include/common/tcommon.h @@ -44,6 +44,30 @@ enum { ) // clang-format on +typedef struct { + TSKEY ts; + uint64_t groupId; +} SWinKey; + +static inline int SWinKeyCmpr(const void* pKey1, int kLen1, const void* pKey2, int kLen2) { + SWinKey* pWin1 = (SWinKey*)pKey1; + SWinKey* pWin2 = (SWinKey*)pKey2; + + if (pWin1->groupId > pWin2->groupId) { + return 1; + } else if (pWin1->groupId < pWin2->groupId) { + return -1; + } + + if (pWin1->ts > pWin2->ts) { + return 1; + } else if (pWin1->ts < pWin2->ts) { + return -1; + } + + return 0; +} + enum { TMQ_MSG_TYPE__DUMMY = 0, TMQ_MSG_TYPE__POLL_RSP, @@ -181,7 +205,7 @@ typedef struct SColumn { int16_t slotId; char name[TSDB_COL_NAME_LEN]; - int8_t flag; // column type: normal column, tag, or user-input column (integer/float/string) + int16_t colType; // column type: normal column, tag, or window column int16_t type; int32_t bytes; uint8_t precision; diff --git a/include/common/tglobal.h b/include/common/tglobal.h index cd74ffd477..03e15ed8e7 100644 --- a/include/common/tglobal.h +++ b/include/common/tglobal.h @@ -66,6 +66,7 @@ extern int32_t tsNumOfVnodeStreamThreads; extern int32_t tsNumOfVnodeFetchThreads; extern int32_t tsNumOfVnodeWriteThreads; extern int32_t tsNumOfVnodeSyncThreads; +extern int32_t tsNumOfVnodeRsmaThreads; extern int32_t tsNumOfQnodeQueryThreads; extern int32_t tsNumOfQnodeFetchThreads; extern int32_t tsNumOfSnodeSharedThreads; @@ -130,6 +131,7 @@ extern int32_t tsMqRebalanceInterval; extern int32_t tsTtlUnit; extern int32_t tsTtlPushInterval; extern int32_t tsGrantHBInterval; +extern int32_t tsUptimeInterval; #define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize) diff --git a/include/common/tmsg.h b/include/common/tmsg.h index f870bd161f..8f199c72f7 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -2667,31 +2667,6 @@ typedef struct { int32_t padding; } SRSmaExecMsg; -typedef struct { - int64_t suid; - int8_t level; -} SRSmaFetchMsg; - -static FORCE_INLINE int32_t tEncodeSRSmaFetchMsg(SEncoder* pCoder, const SRSmaFetchMsg* pReq) { - if (tStartEncode(pCoder) < 0) return -1; - - if (tEncodeI64(pCoder, pReq->suid) < 0) return -1; - if (tEncodeI8(pCoder, pReq->level) < 0) return -1; - - tEndEncode(pCoder); - return 0; -} - -static FORCE_INLINE int32_t tDecodeSRSmaFetchMsg(SDecoder* pCoder, SRSmaFetchMsg* pReq) { - if (tStartDecode(pCoder) < 0) return -1; - - if (tDecodeI64(pCoder, &pReq->suid) < 0) return -1; - if (tDecodeI8(pCoder, &pReq->level) < 0) return -1; - - tEndDecode(pCoder); - return 0; -} - typedef struct { int8_t version; // for compatibility(default 0) int8_t intervalUnit; // MACRO: TIME_UNIT_XXX diff --git a/include/common/tmsgdef.h b/include/common/tmsgdef.h index 16d5965759..006ba7f21b 100644 --- a/include/common/tmsgdef.h +++ b/include/common/tmsgdef.h @@ -170,6 +170,7 @@ enum { TD_DEF_MSG_TYPE(TDMT_MND_SPLIT_VGROUP, "split-vgroup", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_SHOW_VARIABLES, "show-variables", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_SERVER_VERSION, "server-version", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_UPTIME_TIMER, "uptime-timer", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_MAX_MSG, "mnd-max", NULL, NULL) TD_NEW_MSG_SEG(TDMT_VND_MSG) @@ -201,7 +202,7 @@ enum { TD_DEF_MSG_TYPE(TDMT_VND_CANCEL_SMA, "vnode-cancel-sma", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_DROP_SMA, "vnode-drop-sma", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_SUBMIT_RSMA, "vnode-submit-rsma", SSubmitReq, SSubmitRsp) - TD_DEF_MSG_TYPE(TDMT_VND_FETCH_RSMA, "vnode-fetch-rsma", SRSmaFetchMsg, NULL) + TD_DEF_MSG_TYPE(TDMT_VND_FETCH_RSMA, "vnode-fetch-rsma", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_EXEC_RSMA, "vnode-exec-rsma", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_DELETE, "delete-data", SVDeleteReq, SVDeleteRsp) TD_DEF_MSG_TYPE(TDMT_VND_BATCH_DEL, "batch-delete", SBatchDeleteReq, NULL) diff --git a/include/libs/executor/executor.h b/include/libs/executor/executor.h index a64815f14f..1ce88905c2 100644 --- a/include/libs/executor/executor.h +++ b/include/libs/executor/executor.h @@ -29,7 +29,7 @@ typedef void* DataSinkHandle; struct SRpcMsg; struct SSubplan; -typedef struct SReadHandle { +typedef struct { void* tqReader; void* meta; void* config; @@ -41,6 +41,7 @@ typedef struct SReadHandle { bool initTableReader; bool initTqReader; int32_t numOfVgroups; + void* pStateBackend; } SReadHandle; // in queue mode, data streams are seperated by msg @@ -78,8 +79,8 @@ int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numO /** * @brief Cleanup SSDataBlock for StreamScanInfo - * - * @param tinfo + * + * @param tinfo */ void tdCleanupStreamInputDataBlock(qTaskInfo_t tinfo); @@ -163,7 +164,7 @@ int32_t qGetQualifiedTableIdList(void* pTableList, const char* tagCond, int32_t void qProcessRspMsg(void* parent, struct SRpcMsg* pMsg, struct SEpSet* pEpSet); -int32_t qGetExplainExecInfo(qTaskInfo_t tinfo, SArray* pExecInfoList/*,int32_t* resNum, SExplainExecInfo** pRes*/); +int32_t qGetExplainExecInfo(qTaskInfo_t tinfo, SArray* pExecInfoList /*,int32_t* resNum, SExplainExecInfo** pRes*/); int32_t qSerializeTaskStatus(qTaskInfo_t tinfo, char** pOutput, int32_t* len); diff --git a/include/libs/function/function.h b/include/libs/function/function.h index e708a2c42d..d5da306fd2 100644 --- a/include/libs/function/function.h +++ b/include/libs/function/function.h @@ -142,6 +142,7 @@ typedef struct SqlFunctionCtx { struct SSDataBlock *pDstBlock; // used by indifinite rows function to set selectivity int32_t curBufPage; bool increase; + bool isStream; char udfName[TSDB_FUNC_NAME_LEN]; } SqlFunctionCtx; diff --git a/include/libs/nodes/querynodes.h b/include/libs/nodes/querynodes.h index e1f86bae58..3a1eaf289e 100644 --- a/include/libs/nodes/querynodes.h +++ b/include/libs/nodes/querynodes.h @@ -57,7 +57,9 @@ typedef enum EColumnType { COLUMN_TYPE_COLUMN = 1, COLUMN_TYPE_TAG, COLUMN_TYPE_TBNAME, - COLUMN_TYPE_WINDOW_PC, + COLUMN_TYPE_WINDOW_START, + COLUMN_TYPE_WINDOW_END, + COLUMN_TYPE_WINDOW_DURATION, COLUMN_TYPE_GROUP_KEY } EColumnType; @@ -276,6 +278,7 @@ typedef struct SSelectStmt { bool hasLastRowFunc; bool hasTimeLineFunc; bool hasUdaf; + bool hasStateKey; bool onlyHasKeepOrderFunc; bool groupSort; } SSelectStmt; diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index 384c6a289f..2c27509008 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -263,6 +263,14 @@ typedef struct { SArray* checkpointVer; } SStreamRecoveringState; +// incremental state storage +typedef struct { + SStreamTask* pOwner; + TDB* db; + TTB* pStateDb; + TXN txn; +} SStreamState; + typedef struct SStreamTask { int64_t streamId; int32_t taskId; @@ -312,6 +320,10 @@ typedef struct SStreamTask { // msg handle SMsgCb* pMsgCb; + + // state backend + SStreamState* pState; + } SStreamTask; int32_t tEncodeStreamEpInfo(SEncoder* pEncoder, const SStreamChildEpInfo* pInfo); @@ -507,7 +519,7 @@ typedef struct SStreamMeta { char* path; TDB* db; TTB* pTaskDb; - TTB* pStateDb; + TTB* pCheckpointDb; SHashObj* pTasks; SHashObj* pRecoverStatus; void* ahandle; @@ -528,6 +540,37 @@ int32_t streamMetaCommit(SStreamMeta* pMeta); int32_t streamMetaRollBack(SStreamMeta* pMeta); int32_t streamLoadTasks(SStreamMeta* pMeta); +SStreamState* streamStateOpen(char* path, SStreamTask* pTask); +void streamStateClose(SStreamState* pState); +int32_t streamStateBegin(SStreamState* pState); +int32_t streamStateCommit(SStreamState* pState); +int32_t streamStateAbort(SStreamState* pState); + +typedef struct { + TBC* pCur; +} SStreamStateCur; + +#if 1 +int32_t streamStatePut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen); +int32_t streamStateGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen); +int32_t streamStateDel(SStreamState* pState, const SWinKey* key); +void streamFreeVal(void* val); + +SStreamStateCur* streamStateGetCur(SStreamState* pState, const SWinKey* key); +SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const SWinKey* key); +SStreamStateCur* streamStateSeekKeyPrev(SStreamState* pState, const SWinKey* key); +void streamStateFreeCur(SStreamStateCur* pCur); + +int32_t streamStateGetKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen); + +int32_t streamStateSeekFirst(SStreamState* pState, SStreamStateCur* pCur); +int32_t streamStateSeekLast(SStreamState* pState, SStreamStateCur* pCur); + +int32_t streamStateCurNext(SStreamState* pState, SStreamStateCur* pCur); +int32_t streamStateCurPrev(SStreamState* pState, SStreamStateCur* pCur); + +#endif + #ifdef __cplusplus } #endif diff --git a/include/util/taoserror.h b/include/util/taoserror.h index 12d6127165..e39172d74e 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -49,7 +49,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_RPC_TIMEOUT TAOS_DEF_ERROR_CODE(0, 0x0019) //common & util -#define TSDB_CODE_TIME_UNSYNCED TAOS_DEF_ERROR_CODE(0, 0x0013) +#define TSDB_CODE_TIME_UNSYNCED TAOS_DEF_ERROR_CODE(0, 0x0013) #define TSDB_CODE_APP_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0014) #define TSDB_CODE_OPS_NOT_SUPPORT TAOS_DEF_ERROR_CODE(0, 0x0100) @@ -222,7 +222,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_MND_INVALID_DB_OPTION TAOS_DEF_ERROR_CODE(0, 0x0382) #define TSDB_CODE_MND_INVALID_DB TAOS_DEF_ERROR_CODE(0, 0x0383) #define TSDB_CODE_MND_TOO_MANY_DATABASES TAOS_DEF_ERROR_CODE(0, 0x0385) -#define TSDB_CODE_MND_DB_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0388) +#define TSDB_CODE_MND_DB_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0388) #define TSDB_CODE_MND_INVALID_DB_ACCT TAOS_DEF_ERROR_CODE(0, 0x0389) #define TSDB_CODE_MND_DB_OPTION_UNCHANGED TAOS_DEF_ERROR_CODE(0, 0x038A) #define TSDB_CODE_MND_DB_INDEX_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x038B) @@ -433,7 +433,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_TQ_NO_DISK_PERMISSIONS TAOS_DEF_ERROR_CODE(0, 0x0A03) #define TSDB_CODE_TQ_FILE_CORRUPTED TAOS_DEF_ERROR_CODE(0, 0x0A04) #define TSDB_CODE_TQ_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x0A05) -#define TSDB_CODE_TQ_FILE_ALREADY_EXISTS TAOS_DEF_ERROR_CODE(0, 0x0A06) +#define TSDB_CODE_TQ_FILE_ALREADY_EXISTS TAOS_DEF_ERROR_CODE(0, 0x0A06) #define TSDB_CODE_TQ_FAILED_TO_CREATE_DIR TAOS_DEF_ERROR_CODE(0, 0x0A07) #define TSDB_CODE_TQ_META_NO_SUCH_KEY TAOS_DEF_ERROR_CODE(0, 0x0A08) #define TSDB_CODE_TQ_META_KEY_NOT_IN_TXN TAOS_DEF_ERROR_CODE(0, 0x0A09) @@ -490,7 +490,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_PAR_WRONG_NUMBER_OF_SELECT TAOS_DEF_ERROR_CODE(0, 0x2609) #define TSDB_CODE_PAR_GROUPBY_LACK_EXPRESSION TAOS_DEF_ERROR_CODE(0, 0x260A) #define TSDB_CODE_PAR_NOT_SELECTED_EXPRESSION TAOS_DEF_ERROR_CODE(0, 0x260B) -#define TSDB_CODE_PAR_NOT_SINGLE_GROUP TAOS_DEF_ERROR_CODE(0, 0x260C) +#define TSDB_CODE_PAR_NOT_SINGLE_GROUP TAOS_DEF_ERROR_CODE(0, 0x260C) #define TSDB_CODE_PAR_TAGS_NOT_MATCHED TAOS_DEF_ERROR_CODE(0, 0x260D) #define TSDB_CODE_PAR_INVALID_TAG_NAME TAOS_DEF_ERROR_CODE(0, 0x260E) #define TSDB_CODE_PAR_NAME_OR_PASSWD_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x2610) diff --git a/include/util/tdef.h b/include/util/tdef.h index 6ce1571656..2bc821b873 100644 --- a/include/util/tdef.h +++ b/include/util/tdef.h @@ -386,7 +386,7 @@ typedef enum ELogicConditionType { #define TSDB_DEFAULT_EXPLAIN_VERBOSE false -#define TSDB_EXPLAIN_RESULT_ROW_SIZE 512 +#define TSDB_EXPLAIN_RESULT_ROW_SIZE (16*1024) #define TSDB_EXPLAIN_RESULT_COLUMN_NAME "QUERY_PLAN" #define TSDB_MAX_FIELD_LEN 16384 diff --git a/include/util/tqueue.h b/include/util/tqueue.h index 0f4f1db9ee..da409a90bb 100644 --- a/include/util/tqueue.h +++ b/include/util/tqueue.h @@ -76,6 +76,7 @@ void taosFreeQall(STaosQall *qall); int32_t taosReadAllQitems(STaosQueue *queue, STaosQall *qall); int32_t taosGetQitem(STaosQall *qall, void **ppItem); void taosResetQitems(STaosQall *qall); +int32_t taosQallItemSize(STaosQall *qall); STaosQset *taosOpenQset(); void taosCloseQset(STaosQset *qset); diff --git a/packaging/release.bat b/packaging/release.bat index ffd3a68048..591227382f 100644 --- a/packaging/release.bat +++ b/packaging/release.bat @@ -40,10 +40,12 @@ if not exist %work_dir%\debug\ver-%2-x86 ( ) cd %work_dir%\debug\ver-%2-x64 call vcvarsall.bat x64 -cmake ../../ -G "NMake Makefiles JOM" -DCMAKE_MAKE_PROGRAM=jom -DBUILD_TOOLS=true -DBUILD_HTTP=false -DVERNUMBER=%2 -DCPUTYPE=x64 +cmake ../../ -G "NMake Makefiles JOM" -DCMAKE_MAKE_PROGRAM=jom -DBUILD_TOOLS=true -DBUILD_HTTP=false -DBUILD_TEST=false -DVERNUMBER=%2 -DCPUTYPE=x64 cmake --build . rd /s /Q C:\TDengine cmake --install . +for /r c:\TDengine %%i in (*.dll) do signtool sign /f D:\\123.pfx /p taosdata %%i +for /r c:\TDengine %%i in (*.exe) do signtool sign /f D:\\123.pfx /p taosdata %%i if not %errorlevel% == 0 ( call :RUNFAILED build x64 failed & exit /b 1) cd %package_dir% iscc /DMyAppInstallName="%packagServerName_x64%" /DMyAppVersion="%2" /DMyAppExcludeSource="" tools\tdengine.iss /O..\release @@ -51,19 +53,7 @@ if not %errorlevel% == 0 ( call :RUNFAILED package %packagServerName_x64% faile iscc /DMyAppInstallName="%packagClientName_x64%" /DMyAppVersion="%2" /DMyAppExcludeSource="taosd.exe" tools\tdengine.iss /O..\release if not %errorlevel% == 0 ( call :RUNFAILED package %packagClientName_x64% failed & exit /b 1) -cd %work_dir%\debug\ver-%2-x86 -call vcvarsall.bat x86 -cmake ../../ -G "NMake Makefiles JOM" -DCMAKE_MAKE_PROGRAM=jom -DBUILD_TOOLS=true -DBUILD_HTTP=false -DVERNUMBER=%2 -DCPUTYPE=x86 -cmake --build . -rd /s /Q C:\TDengine -cmake --install . -if not %errorlevel% == 0 ( call :RUNFAILED build x86 failed & exit /b 1) -cd %package_dir% -@REM iscc /DMyAppInstallName="%packagServerName_x86%" /DMyAppVersion="%2" /DMyAppExcludeSource="" tools\tdengine.iss /O..\release -@REM if not %errorlevel% == 0 ( call :RUNFAILED package %packagServerName_x86% failed & exit /b 1) -iscc /DMyAppInstallName="%packagClientName_x86%" /DMyAppVersion="%2" /DMyAppExcludeSource="taosd.exe" tools\tdengine.iss /O..\release -if not %errorlevel% == 0 ( call :RUNFAILED package %packagClientName_x86% failed & exit /b 1) - +for /r ..\release %%i in (*.exe) do signtool sign /f d:\\123.pfx /p taosdata %%i goto EXIT0 :USAGE diff --git a/source/client/inc/clientInt.h b/source/client/inc/clientInt.h index f275ae0885..855dfb15ee 100644 --- a/source/client/inc/clientInt.h +++ b/source/client/inc/clientInt.h @@ -96,7 +96,12 @@ typedef struct { typedef struct SQueryExecMetric { int64_t start; // start timestamp, us - int64_t parsed; // start to parse, us + int64_t syntaxStart; // start to parse, us + int64_t syntaxEnd; // end to parse, us + int64_t ctgStart; // start to parse, us + int64_t ctgEnd; // end to parse, us + int64_t semanticEnd; + int64_t execEnd; int64_t send; // start to send to server, us int64_t rsp; // receive response from server, us } SQueryExecMetric; diff --git a/source/client/inc/clientLog.h b/source/client/inc/clientLog.h index d47edcd795..ec0a41a68f 100644 --- a/source/client/inc/clientLog.h +++ b/source/client/inc/clientLog.h @@ -29,6 +29,7 @@ extern "C" { #define tscDebug(...) do { if (cDebugFlag & DEBUG_DEBUG) { taosPrintLog("TSC ", DEBUG_DEBUG, cDebugFlag, __VA_ARGS__); }} while(0) #define tscTrace(...) do { if (cDebugFlag & DEBUG_TRACE) { taosPrintLog("TSC ", DEBUG_TRACE, cDebugFlag, __VA_ARGS__); }} while(0) #define tscDebugL(...) do { if (cDebugFlag & DEBUG_DEBUG) { taosPrintLongString("TSC ", DEBUG_DEBUG, cDebugFlag, __VA_ARGS__); }} while(0) +#define tscPerf(...) do { taosPrintLog("TSC ", 0, cDebugFlag, __VA_ARGS__); } while(0) #ifdef __cplusplus } diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c index ff1b9322c9..1342e89b52 100644 --- a/source/client/src/clientEnv.c +++ b/source/client/src/clientEnv.c @@ -69,14 +69,25 @@ static void deregisterRequest(SRequestObj *pRequest) { int32_t currentInst = atomic_sub_fetch_64((int64_t *)&pActivity->currentRequests, 1); int32_t num = atomic_sub_fetch_32(&pTscObj->numOfReqs, 1); - int64_t duration = taosGetTimestampUs() - pRequest->metric.start; + int64_t nowUs = taosGetTimestampUs(); + int64_t duration = nowUs - pRequest->metric.start; tscDebug("0x%" PRIx64 " free Request from connObj: 0x%" PRIx64 ", reqId:0x%" PRIx64 " elapsed:%" PRIu64 " ms, current:%d, app current:%d", pRequest->self, pTscObj->id, pRequest->requestId, duration / 1000, num, currentInst); if (QUERY_NODE_VNODE_MODIF_STMT == pRequest->stmtType) { + tscPerf("insert duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64 "us, exec:%" PRId64 "us", + duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart, + pRequest->metric.ctgEnd - pRequest->metric.ctgStart, + pRequest->metric.semanticEnd - pRequest->metric.ctgEnd, + pRequest->metric.execEnd - pRequest->metric.semanticEnd); atomic_add_fetch_64((int64_t *)&pActivity->insertElapsedTime, duration); } else if (QUERY_NODE_SELECT_STMT == pRequest->stmtType) { + tscPerf("select duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64 "us, exec:%" PRId64 "us", + duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart, + pRequest->metric.ctgEnd - pRequest->metric.ctgStart, + pRequest->metric.semanticEnd - pRequest->metric.ctgEnd, + pRequest->metric.execEnd - pRequest->metric.semanticEnd); atomic_add_fetch_64((int64_t *)&pActivity->queryElapsedTime, duration); } @@ -330,7 +341,6 @@ void doDestroyRequest(void *p) { schedulerFreeJob(&pRequest->body.queryJob, 0); taosMemoryFreeClear(pRequest->msgBuf); - taosMemoryFreeClear(pRequest->sqlstr); taosMemoryFreeClear(pRequest->pDb); doFreeReqResultInfo(&pRequest->body.resInfo); @@ -349,6 +359,7 @@ void doDestroyRequest(void *p) { taosMemoryFree(pRequest->body.param); } + taosMemoryFreeClear(pRequest->sqlstr); taosMemoryFree(pRequest); tscTrace("end to destroy request %" PRIx64 " p:%p", reqId, pRequest); } @@ -393,7 +404,9 @@ void taos_init_imp(void) { schedulerInit(); tscDebug("starting to initialize TAOS driver"); +#ifndef WINDOWS taosSetCoreDump(true); +#endif initTaskQueue(); fmFuncMgtInit(); diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 9c086fc83e..998b9cee5c 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -238,6 +238,9 @@ int32_t parseSql(SRequestObj* pRequest, bool topicQuery, SQuery** pQuery, SStmtC TSWAP(pRequest->targetTableList, (*pQuery)->pTargetTableList); } + taosArrayDestroy(cxt.pTableMetaPos); + taosArrayDestroy(cxt.pTableVgroupPos); + return code; } @@ -839,6 +842,8 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) { } schedulerFreeJob(&pRequest->body.queryJob, 0); + + pRequest->metric.execEnd = taosGetTimestampUs(); } taosMemoryFree(pResult); diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index 0e95cd4d99..31ae443d5b 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -674,6 +674,8 @@ static void destorySqlParseWrapper(SqlParseWrapper *pWrapper) { taosArrayDestroy(pWrapper->catalogReq.pIndex); taosArrayDestroy(pWrapper->catalogReq.pUser); taosArrayDestroy(pWrapper->catalogReq.pTableIndex); + taosArrayDestroy(pWrapper->pCtx->pTableMetaPos); + taosArrayDestroy(pWrapper->pCtx->pTableVgroupPos); taosMemoryFree(pWrapper->pCtx); taosMemoryFree(pWrapper); } @@ -683,6 +685,8 @@ void retrieveMetaCallback(SMetaData *pResultMeta, void *param, int32_t code) { SQuery *pQuery = pWrapper->pQuery; SRequestObj *pRequest = pWrapper->pRequest; + pRequest->metric.ctgEnd = taosGetTimestampUs(); + if (code == TSDB_CODE_SUCCESS) { code = qAnalyseSqlSemantic(pWrapper->pCtx, &pWrapper->catalogReq, pResultMeta, pQuery); pRequest->stableQuery = pQuery->stableQuery; @@ -691,6 +695,8 @@ void retrieveMetaCallback(SMetaData *pResultMeta, void *param, int32_t code) { } } + pRequest->metric.semanticEnd = taosGetTimestampUs(); + if (code == TSDB_CODE_SUCCESS) { if (pQuery->haveResultSet) { setResSchemaInfo(&pRequest->body.resInfo, pQuery->pResSchema, pQuery->numOfResCols); @@ -782,12 +788,16 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) { SQuery *pQuery = NULL; + pRequest->metric.syntaxStart = taosGetTimestampUs(); + SCatalogReq catalogReq = {.forceUpdate = updateMetaForce, .qNodeRequired = qnodeRequired(pRequest)}; code = qParseSqlSyntax(pCxt, &pQuery, &catalogReq); if (code != TSDB_CODE_SUCCESS) { goto _error; } + pRequest->metric.syntaxEnd = taosGetTimestampUs(); + if (!updateMetaForce) { STscObj *pTscObj = pRequest->pTscObj; SAppClusterSummary *pActivity = &pTscObj->pAppInfo->summary; @@ -814,6 +824,8 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) { .requestObjRefId = pCxt->requestRid, .mgmtEps = pCxt->mgmtEpSet}; + pRequest->metric.ctgStart = taosGetTimestampUs(); + code = catalogAsyncGetAllMeta(pCxt->pCatalog, &conn, &catalogReq, retrieveMetaCallback, pWrapper, &pRequest->body.queryJob); pCxt = NULL; diff --git a/source/common/src/systable.c b/source/common/src/systable.c index 65041e1f12..68a77a9f33 100644 --- a/source/common/src/systable.c +++ b/source/common/src/systable.c @@ -66,8 +66,9 @@ static const SSysDbTableSchema bnodesSchema[] = { }; static const SSysDbTableSchema clusterSchema[] = { - {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, + {.name = "id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, {.name = "name", .bytes = TSDB_CLUSTER_ID_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "uptime", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, }; diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index adc5af1a17..bb2729c776 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -61,6 +61,7 @@ int32_t tsNumOfVnodeStreamThreads = 2; int32_t tsNumOfVnodeFetchThreads = 4; int32_t tsNumOfVnodeWriteThreads = 2; int32_t tsNumOfVnodeSyncThreads = 2; +int32_t tsNumOfVnodeRsmaThreads = 2; int32_t tsNumOfQnodeQueryThreads = 4; int32_t tsNumOfQnodeFetchThreads = 4; int32_t tsNumOfSnodeSharedThreads = 2; @@ -76,7 +77,7 @@ bool tsMonitorComp = false; // telem bool tsEnableTelem = true; -int32_t tsTelemInterval = 86400; +int32_t tsTelemInterval = 43200; char tsTelemServer[TSDB_FQDN_LEN] = "telemetry.taosdata.com"; uint16_t tsTelemPort = 80; @@ -164,6 +165,7 @@ int32_t tsMqRebalanceInterval = 2; int32_t tsTtlUnit = 86400; int32_t tsTtlPushInterval = 86400; int32_t tsGrantHBInterval = 60; +int32_t tsUptimeInterval = 300; // seconds #ifndef _STORAGE int32_t taosSetTfsCfg(SConfig *pCfg) { @@ -377,6 +379,10 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { tsNumOfVnodeSyncThreads = TMAX(tsNumOfVnodeSyncThreads, 16); if (cfgAddInt32(pCfg, "numOfVnodeSyncThreads", tsNumOfVnodeSyncThreads, 1, 1024, 0) != 0) return -1; + tsNumOfVnodeRsmaThreads = tsNumOfCores; + tsNumOfVnodeRsmaThreads = TMAX(tsNumOfVnodeRsmaThreads, 4); + if (cfgAddInt32(pCfg, "numOfVnodeRsmaThreads", tsNumOfVnodeRsmaThreads, 1, 1024, 0) != 0) return -1; + tsNumOfQnodeQueryThreads = tsNumOfCores * 2; tsNumOfQnodeQueryThreads = TMAX(tsNumOfQnodeQueryThreads, 4); if (cfgAddInt32(pCfg, "numOfQnodeQueryThreads", tsNumOfQnodeQueryThreads, 1, 1024, 0) != 0) return -1; @@ -538,6 +544,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tsNumOfVnodeFetchThreads = cfgGetItem(pCfg, "numOfVnodeFetchThreads")->i32; tsNumOfVnodeWriteThreads = cfgGetItem(pCfg, "numOfVnodeWriteThreads")->i32; tsNumOfVnodeSyncThreads = cfgGetItem(pCfg, "numOfVnodeSyncThreads")->i32; + tsNumOfVnodeRsmaThreads = cfgGetItem(pCfg, "numOfVnodeRsmaThreads")->i32; tsNumOfQnodeQueryThreads = cfgGetItem(pCfg, "numOfQnodeQueryThreads")->i32; tsNumOfQnodeFetchThreads = cfgGetItem(pCfg, "numOfQnodeFetchThreads")->i32; tsNumOfSnodeSharedThreads = cfgGetItem(pCfg, "numOfSnodeSharedThreads")->i32; @@ -782,6 +789,8 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) { tsNumOfVnodeWriteThreads = cfgGetItem(pCfg, "numOfVnodeWriteThreads")->i32; } else if (strcasecmp("numOfVnodeSyncThreads", name) == 0) { tsNumOfVnodeSyncThreads = cfgGetItem(pCfg, "numOfVnodeSyncThreads")->i32; + } else if (strcasecmp("numOfVnodeRsmaThreads", name) == 0) { + tsNumOfVnodeRsmaThreads = cfgGetItem(pCfg, "numOfVnodeRsmaThreads")->i32; } else if (strcasecmp("numOfQnodeQueryThreads", name) == 0) { tsNumOfQnodeQueryThreads = cfgGetItem(pCfg, "numOfQnodeQueryThreads")->i32; } else if (strcasecmp("numOfQnodeFetchThreads", name) == 0) { diff --git a/source/dnode/mnode/impl/inc/mndCluster.h b/source/dnode/mnode/impl/inc/mndCluster.h index 0de253fb6a..2cb41edd7c 100644 --- a/source/dnode/mnode/impl/inc/mndCluster.h +++ b/source/dnode/mnode/impl/inc/mndCluster.h @@ -27,6 +27,7 @@ void mndCleanupCluster(SMnode *pMnode); int32_t mndGetClusterName(SMnode *pMnode, char *clusterName, int32_t len); int64_t mndGetClusterId(SMnode *pMnode); int64_t mndGetClusterCreateTime(SMnode *pMnode); +float mndGetClusterUpTime(SMnode *pMnode); #ifdef __cplusplus } diff --git a/source/dnode/mnode/impl/inc/mndDef.h b/source/dnode/mnode/impl/inc/mndDef.h index 8cff7fe48e..ea05215fe9 100644 --- a/source/dnode/mnode/impl/inc/mndDef.h +++ b/source/dnode/mnode/impl/inc/mndDef.h @@ -179,6 +179,7 @@ typedef struct { char name[TSDB_CLUSTER_ID_LEN]; int64_t createdTime; int64_t updateTime; + int32_t upTime; } SClusterObj; typedef struct { diff --git a/source/dnode/mnode/impl/src/mndCluster.c b/source/dnode/mnode/impl/src/mndCluster.c index a82bf739f5..7d633f90bd 100644 --- a/source/dnode/mnode/impl/src/mndCluster.c +++ b/source/dnode/mnode/impl/src/mndCluster.c @@ -19,7 +19,7 @@ #include "mndTrans.h" #define CLUSTER_VER_NUMBE 1 -#define CLUSTER_RESERVE_SIZE 64 +#define CLUSTER_RESERVE_SIZE 60 static SSdbRaw *mndClusterActionEncode(SClusterObj *pCluster); static SSdbRow *mndClusterActionDecode(SSdbRaw *pRaw); @@ -29,6 +29,7 @@ static int32_t mndClusterActionUpdate(SSdb *pSdb, SClusterObj *pOldCluster, SCl static int32_t mndCreateDefaultCluster(SMnode *pMnode); static int32_t mndRetrieveClusters(SRpcMsg *pMsg, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows); static void mndCancelGetNextCluster(SMnode *pMnode, void *pIter); +static int32_t mndProcessUptimeTimer(SRpcMsg *pReq); int32_t mndInitCluster(SMnode *pMnode) { SSdbTable table = { @@ -42,8 +43,10 @@ int32_t mndInitCluster(SMnode *pMnode) { .deleteFp = (SdbDeleteFp)mndClusterActionDelete, }; + mndSetMsgHandle(pMnode, TDMT_MND_UPTIME_TIMER, mndProcessUptimeTimer); mndAddShowRetrieveHandle(pMnode, TSDB_MGMT_TABLE_CLUSTER, mndRetrieveClusters); mndAddShowFreeIterHandle(pMnode, TSDB_MGMT_TABLE_CLUSTER, mndCancelGetNextCluster); + return sdbSetTable(pMnode->pSdb, table); } @@ -62,40 +65,69 @@ int32_t mndGetClusterName(SMnode *pMnode, char *clusterName, int32_t len) { return 0; } -int64_t mndGetClusterId(SMnode *pMnode) { - SSdb *pSdb = pMnode->pSdb; - void *pIter = NULL; - int64_t clusterId = -1; +static SClusterObj *mndAcquireCluster(SMnode *pMnode) { + SSdb *pSdb = pMnode->pSdb; + void *pIter = NULL; while (1) { SClusterObj *pCluster = NULL; pIter = sdbFetch(pSdb, SDB_CLUSTER, pIter, (void **)&pCluster); if (pIter == NULL) break; + return pCluster; + } + + return NULL; +} + +static void mndReleaseCluster(SMnode *pMnode, SClusterObj *pCluster) { + SSdb *pSdb = pMnode->pSdb; + sdbRelease(pSdb, pCluster); +} + +int64_t mndGetClusterId(SMnode *pMnode) { + int64_t clusterId = 0; + SClusterObj *pCluster = mndAcquireCluster(pMnode); + if (pCluster != NULL) { clusterId = pCluster->id; - sdbRelease(pSdb, pCluster); + mndReleaseCluster(pMnode, pCluster); } return clusterId; } int64_t mndGetClusterCreateTime(SMnode *pMnode) { - SSdb *pSdb = pMnode->pSdb; - void *pIter = NULL; - int64_t createTime = INT64_MAX; - - while (1) { - SClusterObj *pCluster = NULL; - pIter = sdbFetch(pSdb, SDB_CLUSTER, pIter, (void **)&pCluster); - if (pIter == NULL) break; - + int64_t createTime = 0; + SClusterObj *pCluster = mndAcquireCluster(pMnode); + if (pCluster != NULL) { createTime = pCluster->createdTime; - sdbRelease(pSdb, pCluster); + mndReleaseCluster(pMnode, pCluster); } return createTime; } +static int32_t mndGetClusterUpTimeImp(SClusterObj *pCluster) { +#if 0 + int32_t upTime = taosGetTimestampSec() - pCluster->updateTime / 1000; + upTime = upTime + pCluster->upTime; + return upTime; +#else + return pCluster->upTime; +#endif +} + +float mndGetClusterUpTime(SMnode *pMnode) { + int64_t upTime = 0; + SClusterObj *pCluster = mndAcquireCluster(pMnode); + if (pCluster != NULL) { + upTime = mndGetClusterUpTimeImp(pCluster); + mndReleaseCluster(pMnode, pCluster); + } + + return upTime / 86400.0f; +} + static SSdbRaw *mndClusterActionEncode(SClusterObj *pCluster) { terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -107,6 +139,7 @@ static SSdbRaw *mndClusterActionEncode(SClusterObj *pCluster) { SDB_SET_INT64(pRaw, dataPos, pCluster->createdTime, _OVER) SDB_SET_INT64(pRaw, dataPos, pCluster->updateTime, _OVER) SDB_SET_BINARY(pRaw, dataPos, pCluster->name, TSDB_CLUSTER_ID_LEN, _OVER) + SDB_SET_INT32(pRaw, dataPos, pCluster->upTime, _OVER) SDB_SET_RESERVE(pRaw, dataPos, CLUSTER_RESERVE_SIZE, _OVER) terrno = 0; @@ -144,6 +177,7 @@ static SSdbRow *mndClusterActionDecode(SSdbRaw *pRaw) { SDB_GET_INT64(pRaw, dataPos, &pCluster->createdTime, _OVER) SDB_GET_INT64(pRaw, dataPos, &pCluster->updateTime, _OVER) SDB_GET_BINARY(pRaw, dataPos, pCluster->name, TSDB_CLUSTER_ID_LEN, _OVER) + SDB_GET_INT32(pRaw, dataPos, &pCluster->upTime, _OVER) SDB_GET_RESERVE(pRaw, dataPos, CLUSTER_RESERVE_SIZE, _OVER) terrno = 0; @@ -162,6 +196,7 @@ _OVER: static int32_t mndClusterActionInsert(SSdb *pSdb, SClusterObj *pCluster) { mTrace("cluster:%" PRId64 ", perform insert action, row:%p", pCluster->id, pCluster); pSdb->pMnode->clusterId = pCluster->id; + pCluster->updateTime = taosGetTimestampMs(); return 0; } @@ -171,7 +206,10 @@ static int32_t mndClusterActionDelete(SSdb *pSdb, SClusterObj *pCluster) { } static int32_t mndClusterActionUpdate(SSdb *pSdb, SClusterObj *pOld, SClusterObj *pNew) { - mTrace("cluster:%" PRId64 ", perform update action, old row:%p new row:%p", pOld->id, pOld, pNew); + mTrace("cluster:%" PRId64 ", perform update action, old row:%p new row:%p, uptime from %d to %d", pOld->id, pOld, + pNew, pOld->upTime, pNew->upTime); + pOld->upTime = pNew->upTime; + pOld->updateTime = taosGetTimestampMs(); return 0; } @@ -242,6 +280,10 @@ static int32_t mndRetrieveClusters(SRpcMsg *pMsg, SShowObj *pShow, SSDataBlock * pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppend(pColInfo, numOfRows, buf, false); + int32_t upTime = mndGetClusterUpTimeImp(pCluster); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataAppend(pColInfo, numOfRows, (const char *)&upTime, false); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppend(pColInfo, numOfRows, (const char *)&pCluster->createdTime, false); @@ -257,3 +299,40 @@ static void mndCancelGetNextCluster(SMnode *pMnode, void *pIter) { SSdb *pSdb = pMnode->pSdb; sdbCancelFetch(pSdb, pIter); } + +static int32_t mndProcessUptimeTimer(SRpcMsg *pReq) { + SMnode *pMnode = pReq->info.node; + SClusterObj clusterObj = {0}; + SClusterObj *pCluster = mndAcquireCluster(pMnode); + if (pCluster != NULL) { + memcpy(&clusterObj, pCluster, sizeof(SClusterObj)); + clusterObj.upTime += tsUptimeInterval; + mndReleaseCluster(pMnode, pCluster); + } + + if (clusterObj.id <= 0) { + mError("can't get cluster info while update uptime"); + return 0; + } + + mTrace("update cluster uptime to %" PRId64, clusterObj.upTime); + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq); + if (pTrans == NULL) return -1; + + SSdbRaw *pCommitRaw = mndClusterActionEncode(&clusterObj); + if (pCommitRaw == NULL || mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) { + mError("trans:%d, failed to append commit log since %s", pTrans->id, terrstr()); + mndTransDrop(pTrans); + return -1; + } + sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY); + + if (mndTransPrepare(pMnode, pTrans) != 0) { + mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr()); + mndTransDrop(pTrans); + return -1; + } + + mndTransDrop(pTrans); + return 0; +} diff --git a/source/dnode/mnode/impl/src/mndMain.c b/source/dnode/mnode/impl/src/mndMain.c index c3296ac5c1..65a539bc90 100644 --- a/source/dnode/mnode/impl/src/mndMain.c +++ b/source/dnode/mnode/impl/src/mndMain.c @@ -100,6 +100,16 @@ static void mndGrantHeartBeat(SMnode *pMnode) { } } +static void mndIncreaseUpTime(SMnode *pMnode) { + int32_t contLen = 0; + void *pReq = mndBuildTimerMsg(&contLen); + if (pReq != NULL) { + SRpcMsg rpcMsg = { + .msgType = TDMT_MND_UPTIME_TIMER, .pCont = pReq, .contLen = contLen, .info.ahandle = (void *)0x9528}; + tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg); + } +} + static void *mndThreadFp(void *param) { SMnode *pMnode = param; int64_t lastTime = 0; @@ -122,13 +132,17 @@ static void *mndThreadFp(void *param) { mndCalMqRebalance(pMnode); } - if (lastTime % (tsTelemInterval * 10) == 0) { + if (lastTime % (tsTelemInterval * 10) == 1) { mndPullupTelem(pMnode); } if (lastTime % (tsGrantHBInterval * 10) == 0) { mndGrantHeartBeat(pMnode); } + + if ((lastTime % (tsUptimeInterval * 10)) == ((tsUptimeInterval - 1) * 10)) { + mndIncreaseUpTime(pMnode); + } } return NULL; @@ -556,7 +570,8 @@ static int32_t mndCheckMnodeState(SRpcMsg *pMsg) { } if (mndAcquireRpcRef(pMsg->info.node) == 0) return 0; if (pMsg->msgType == TDMT_MND_MQ_TIMER || pMsg->msgType == TDMT_MND_TELEM_TIMER || - pMsg->msgType == TDMT_MND_TRANS_TIMER || pMsg->msgType == TDMT_MND_TTL_TIMER) { + pMsg->msgType == TDMT_MND_TRANS_TIMER || pMsg->msgType == TDMT_MND_TTL_TIMER || + pMsg->msgType == TDMT_MND_UPTIME_TIMER) { return -1; } @@ -705,7 +720,8 @@ int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgr if (pObj->id == pMnode->selfDnodeId) { pClusterInfo->first_ep_dnode_id = pObj->id; tstrncpy(pClusterInfo->first_ep, pObj->pDnode->ep, sizeof(pClusterInfo->first_ep)); - pClusterInfo->master_uptime = (ms - pObj->stateStartTime) / (86400000.0f); + pClusterInfo->master_uptime = mndGetClusterUpTime(pMnode); + // pClusterInfo->master_uptime = (ms - pObj->stateStartTime) / (86400000.0f); tstrncpy(desc.role, syncStr(TAOS_SYNC_STATE_LEADER), sizeof(desc.role)); } else { tstrncpy(desc.role, syncStr(pObj->state), sizeof(desc.role)); diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c index e0f2b83160..ebec3d5ea6 100644 --- a/source/dnode/mnode/impl/src/mndStb.c +++ b/source/dnode/mnode/impl/src/mndStb.c @@ -442,6 +442,8 @@ static void *mndBuildVCreateStbReq(SMnode *pMnode, SVgObj *pVgroup, SStbObj *pSt if (req.rollup) { req.rsmaParam.maxdelay[0] = pStb->maxdelay[0]; req.rsmaParam.maxdelay[1] = pStb->maxdelay[1]; + req.rsmaParam.watermark[0] = pStb->watermark[0]; + req.rsmaParam.watermark[1] = pStb->watermark[1]; if (pStb->ast1Len > 0) { if (mndConvertRsmaTask(&req.rsmaParam.qmsg[0], &req.rsmaParam.qmsgLen[0], pStb->pAst1, pStb->uid, STREAM_TRIGGER_WINDOW_CLOSE, req.rsmaParam.watermark[0]) < 0) { diff --git a/source/dnode/mnode/impl/src/mndSync.c b/source/dnode/mnode/impl/src/mndSync.c index 8e8cff853c..b7129cf56e 100644 --- a/source/dnode/mnode/impl/src/mndSync.c +++ b/source/dnode/mnode/impl/src/mndSync.c @@ -68,7 +68,7 @@ void mndSyncCommitMsg(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbM if (pMgmt->errCode != 0) { mError("trans:%d, failed to propose since %s, post sem", transId, tstrerror(pMgmt->errCode)); } else { - mInfo("trans:%d, is proposed and post sem", transId, tstrerror(pMgmt->errCode)); + mDebug("trans:%d, is proposed and post sem", transId, tstrerror(pMgmt->errCode)); } pMgmt->transId = 0; taosWUnLockLatch(&pMgmt->lock); @@ -118,7 +118,7 @@ void mndReConfig(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SReConfigCbMeta cbM SSyncMgmt *pMgmt = &pMnode->syncMgmt; pMgmt->errCode = cbMeta.code; - mInfo("trans:-1, sync reconfig is proposed, saved:%d code:0x%x, index:%" PRId64 " term:%" PRId64, pMgmt->transId, + mDebug("trans:-1, sync reconfig is proposed, saved:%d code:0x%x, index:%" PRId64 " term:%" PRId64, pMgmt->transId, cbMeta.code, cbMeta.index, cbMeta.term); taosWLockLatch(&pMgmt->lock); @@ -126,7 +126,7 @@ void mndReConfig(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SReConfigCbMeta cbM if (pMgmt->errCode != 0) { mError("trans:-1, failed to propose sync reconfig since %s, post sem", tstrerror(pMgmt->errCode)); } else { - mInfo("trans:-1, sync reconfig is proposed, saved:%d code:0x%x, index:%" PRId64 " term:%" PRId64 " post sem", + mDebug("trans:-1, sync reconfig is proposed, saved:%d code:0x%x, index:%" PRId64 " term:%" PRId64 " post sem", pMgmt->transId, cbMeta.code, cbMeta.index, cbMeta.term); } pMgmt->transId = 0; @@ -228,7 +228,7 @@ int32_t mndInitSync(SMnode *pMnode) { syncInfo.isStandBy = pMgmt->standby; syncInfo.snapshotStrategy = SYNC_STRATEGY_STANDARD_SNAPSHOT; - mInfo("start to open mnode sync, standby:%d", pMgmt->standby); + mDebug("start to open mnode sync, standby:%d", pMgmt->standby); if (pMgmt->standby || pMgmt->replica.id > 0) { SSyncCfg *pCfg = &syncInfo.syncCfg; pCfg->replicaNum = 1; @@ -236,7 +236,7 @@ int32_t mndInitSync(SMnode *pMnode) { SNodeInfo *pNode = &pCfg->nodeInfo[0]; tstrncpy(pNode->nodeFqdn, pMgmt->replica.fqdn, sizeof(pNode->nodeFqdn)); pNode->nodePort = pMgmt->replica.port; - mInfo("mnode ep:%s:%u", pNode->nodeFqdn, pNode->nodePort); + mDebug("mnode ep:%s:%u", pNode->nodeFqdn, pNode->nodePort); } tsem_init(&pMgmt->syncSem, 0, 0); diff --git a/source/dnode/mnode/impl/src/mndTelem.c b/source/dnode/mnode/impl/src/mndTelem.c index 27814fe5be..93f7531a27 100644 --- a/source/dnode/mnode/impl/src/mndTelem.c +++ b/source/dnode/mnode/impl/src/mndTelem.c @@ -131,7 +131,9 @@ static int32_t mndProcessTelemTimer(SRpcMsg* pReq) { char* pCont = mndBuildTelemetryReport(pMnode); if (pCont != NULL) { if (taosSendHttpReport(tsTelemServer, tsTelemPort, pCont, strlen(pCont), HTTP_FLAT) != 0) { - mError("failed to send telemetry msg"); + mError("failed to send telemetry report"); + } else { + mTrace("succeed to send telemetry report"); } taosMemoryFree(pCont); } diff --git a/source/dnode/mnode/impl/src/mndTrans.c b/source/dnode/mnode/impl/src/mndTrans.c index 17b4336465..c77a80cc82 100644 --- a/source/dnode/mnode/impl/src/mndTrans.c +++ b/source/dnode/mnode/impl/src/mndTrans.c @@ -1308,7 +1308,7 @@ static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans) { if (pTrans->policy == TRN_POLICY_ROLLBACK) { if (pTrans->lastAction != 0) { STransAction *pAction = taosArrayGet(pTrans->redoActions, pTrans->lastAction); - if (pAction->retryCode != 0 && pAction->retryCode != pAction->errCode) { + if (pAction->retryCode != 0 && pAction->retryCode == pAction->errCode) { if (pTrans->failedTimes < 6) { mError("trans:%d, stage keep on redoAction since action:%d code:0x%x not 0x%x, failedTimes:%d", pTrans->id, pTrans->lastAction, pTrans->code, pAction->retryCode, pTrans->failedTimes); diff --git a/source/dnode/mnode/impl/test/sma/CMakeLists.txt b/source/dnode/mnode/impl/test/sma/CMakeLists.txt index 3f9ec123a8..a55b45ca11 100644 --- a/source/dnode/mnode/impl/test/sma/CMakeLists.txt +++ b/source/dnode/mnode/impl/test/sma/CMakeLists.txt @@ -5,7 +5,9 @@ target_link_libraries( PUBLIC sut ) -add_test( - NAME smaTest - COMMAND smaTest -) +if(NOT ${TD_WINDOWS}) + add_test( + NAME smaTest + COMMAND smaTest + ) +endif(NOT ${TD_WINDOWS}) diff --git a/source/dnode/mnode/impl/test/stb/CMakeLists.txt b/source/dnode/mnode/impl/test/stb/CMakeLists.txt index dcfbe658fc..e3a3fc2e79 100644 --- a/source/dnode/mnode/impl/test/stb/CMakeLists.txt +++ b/source/dnode/mnode/impl/test/stb/CMakeLists.txt @@ -5,7 +5,9 @@ target_link_libraries( PUBLIC sut ) -add_test( - NAME stbTest - COMMAND stbTest -) \ No newline at end of file +if(NOT ${TD_WINDOWS}) + add_test( + NAME stbTest + COMMAND stbTest + ) +endif(NOT ${TD_WINDOWS}) \ No newline at end of file diff --git a/source/dnode/vnode/src/inc/sma.h b/source/dnode/vnode/src/inc/sma.h index c43772062e..abfffc045f 100644 --- a/source/dnode/vnode/src/inc/sma.h +++ b/source/dnode/vnode/src/inc/sma.h @@ -32,7 +32,7 @@ extern "C" { #define smaTrace(...) do { if (smaDebugFlag & DEBUG_TRACE) { taosPrintLog("SMA ", DEBUG_TRACE, tsdbDebugFlag, __VA_ARGS__); }} while(0) // clang-format on -#define RSMA_TASK_INFO_HASH_SLOT 8 +#define RSMA_TASK_INFO_HASH_SLOT (8) typedef struct SSmaEnv SSmaEnv; typedef struct SSmaStat SSmaStat; @@ -48,9 +48,12 @@ typedef struct SQTaskFWriter SQTaskFWriter; struct SSmaEnv { SRWLatch lock; int8_t type; + int8_t flag; // 0x01 inClose SSmaStat *pStat; }; +#define SMA_ENV_FLG_CLOSE ((int8_t)0x1) + typedef struct { int8_t inited; int32_t rsetId; @@ -90,14 +93,13 @@ struct SRSmaStat { SSma *pSma; int64_t commitAppliedVer; // vnode applied version for async commit int64_t refId; // shared by fetch tasks - volatile int64_t qBufSize; // queue buffer size + volatile int64_t nBufItems; // number of items in queue buffer SRWLatch lock; // r/w lock for rsma fs(e.g. qtaskinfo) int8_t triggerStat; // shared by fetch tasks int8_t commitStat; // 0 not in committing, 1 in committing - int8_t execStat; // 0 not in exec , 1 in exec SArray *aTaskFile; // qTaskFiles committed recently(for recovery/snapshot r/w) SHashObj *infoHash; // key: suid, value: SRSmaInfo - SHashObj *fetchHash; // key: suid, value: L1 or L2 or L1|L2 + tsem_t notEmpty; // has items in queue buffer }; struct SSmaStat { @@ -106,31 +108,34 @@ struct SSmaStat { SRSmaStat rsmaStat; // rollup sma }; T_REF_DECLARE() + char data[]; }; #define SMA_STAT_TSMA(s) (&(s)->tsmaStat) #define SMA_STAT_RSMA(s) (&(s)->rsmaStat) #define RSMA_INFO_HASH(r) ((r)->infoHash) -#define RSMA_FETCH_HASH(r) ((r)->fetchHash) #define RSMA_TRIGGER_STAT(r) (&(r)->triggerStat) #define RSMA_COMMIT_STAT(r) (&(r)->commitStat) #define RSMA_REF_ID(r) ((r)->refId) #define RSMA_FS_LOCK(r) (&(r)->lock) struct SRSmaInfoItem { - int8_t level; + int8_t level : 4; + int8_t fetchLevel : 4; int8_t triggerStat; - uint16_t interval; // second - int32_t maxDelay; + uint16_t nSkipped; + int32_t maxDelay; // ms tmr_h tmrId; }; struct SRSmaInfo { STSchema *pTSchema; int64_t suid; - int64_t refId; // refId of SRSmaStat - uint64_t delFlag : 1; - uint64_t lastReceived : 63; // second + int64_t refId; // refId of SRSmaStat + int64_t lastRecv; // ms + int8_t assigned; // 0 idle, 1 assgined for exec + int8_t delFlag; + int16_t padding; T_REF_DECLARE() SRSmaInfoItem items[TSDB_RETENTION_L2]; void *taskInfo[TSDB_RETENTION_L2]; // qTaskInfo_t diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h index 8bc82928ed..0b51b61c3a 100644 --- a/source/dnode/vnode/src/inc/vnodeInt.h +++ b/source/dnode/vnode/src/inc/vnodeInt.h @@ -198,8 +198,6 @@ int32_t smaAsyncPreCommit(SSma* pSma); int32_t smaAsyncCommit(SSma* pSma); int32_t smaAsyncPostCommit(SSma* pSma); int32_t smaDoRetention(SSma* pSma, int64_t now); -int32_t smaProcessFetch(SSma* pSma, void* pMsg); -int32_t smaProcessExec(SSma* pSma, void* pMsg); int32_t tdProcessTSmaCreate(SSma* pSma, int64_t version, const char* msg); int32_t tdProcessTSmaInsert(SSma* pSma, int64_t indexUid, const char* msg); diff --git a/source/dnode/vnode/src/sma/smaCommit.c b/source/dnode/vnode/src/sma/smaCommit.c index 8b92475035..ca5367f397 100644 --- a/source/dnode/vnode/src/sma/smaCommit.c +++ b/source/dnode/vnode/src/sma/smaCommit.c @@ -109,7 +109,7 @@ int32_t smaBegin(SSma *pSma) { /** * @brief pre-commit for rollup sma(sync commit). * 1) set trigger stat of rsma timer TASK_TRIGGER_STAT_PAUSED. - * 2) wait all triggered fetch tasks finished + * 2) wait for all triggered fetch tasks to finish * 3) perform persist task for qTaskInfo * * @param pSma @@ -127,14 +127,14 @@ static int32_t tdProcessRSmaSyncPreCommitImpl(SSma *pSma) { // step 1: set rsma stat paused atomic_store_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_PAUSED); - // step 2: wait all triggered fetch tasks finished + // step 2: wait for all triggered fetch tasks to finish int32_t nLoops = 0; while (1) { if (T_REF_VAL_GET(pStat) == 0) { - smaDebug("vgId:%d, rsma fetch tasks all finished", SMA_VID(pSma)); + smaDebug("vgId:%d, rsma fetch tasks are all finished", SMA_VID(pSma)); break; } else { - smaDebug("vgId:%d, rsma fetch tasks not all finished yet", SMA_VID(pSma)); + smaDebug("vgId:%d, rsma fetch tasks are not all finished yet", SMA_VID(pSma)); } ++nLoops; if (nLoops > 1000) { @@ -316,15 +316,17 @@ static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) { // step 1: set rsma stat atomic_store_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_PAUSED); atomic_store_8(RSMA_COMMIT_STAT(pRSmaStat), 1); + pRSmaStat->commitAppliedVer = pSma->pVnode->state.applied; + ASSERT(pRSmaStat->commitAppliedVer > 0); - // step 2: wait all triggered fetch tasks finished + // step 2: wait for all triggered fetch tasks to finish int32_t nLoops = 0; while (1) { if (T_REF_VAL_GET(pStat) == 0) { - smaDebug("vgId:%d, rsma fetch tasks all finished", SMA_VID(pSma)); + smaDebug("vgId:%d, rsma commit, fetch tasks are all finished", SMA_VID(pSma)); break; } else { - smaDebug("vgId:%d, rsma fetch tasks not all finished yet", SMA_VID(pSma)); + smaDebug("vgId:%d, rsma commit, fetch tasks are not all finished yet", SMA_VID(pSma)); } ++nLoops; if (nLoops > 1000) { @@ -338,30 +340,29 @@ static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) { * 1) This is high cost task and should not put in asyncPreCommit originally. * 2) But, if put in asyncCommit, would trigger taskInfo cloning frequently. */ - nLoops = 0; - smaInfo("vgId:%d, start to wait for rsma qtask free, TID:%p", SMA_VID(pSma), (void *)taosGetSelfPthreadId()); - - int8_t old; - while (1) { - old = atomic_val_compare_exchange_8(&pRSmaStat->execStat, 0, 1); - if (old == 0) break; - if (++nLoops > 1000) { - sched_yield(); - nLoops = 0; - smaDebug("vgId:%d, wait for rsma qtask free, TID:%p", SMA_VID(pSma), (void *)taosGetSelfPthreadId()); - } - } - - smaInfo("vgId:%d, end to wait for rsma qtask free, TID:%p", SMA_VID(pSma), (void *)taosGetSelfPthreadId()); - if (tdRSmaProcessExecImpl(pSma, RSMA_EXEC_COMMIT) < 0) { - atomic_store_8(&pRSmaStat->execStat, 0); return TSDB_CODE_FAILED; } + smaInfo("vgId:%d, rsma commit, wait for all items to be consumed, TID:%p", SMA_VID(pSma), (void*)taosGetSelfPthreadId()); + nLoops = 0; + while (atomic_load_64(&pRSmaStat->nBufItems) > 0) { + ++nLoops; + if (nLoops > 1000) { + sched_yield(); + nLoops = 0; + } + } + smaInfo("vgId:%d, rsma commit, all items are consumed, TID:%p", SMA_VID(pSma), (void *)taosGetSelfPthreadId()); + if (tdRSmaPersistExecImpl(pRSmaStat, RSMA_INFO_HASH(pRSmaStat)) < 0) { + return TSDB_CODE_FAILED; + } + smaInfo("vgId:%d, rsma commit, operator state commited, TID:%p", SMA_VID(pSma), (void *)taosGetSelfPthreadId()); + +#if 0 // consuming task of qTaskInfo clone // step 4: swap queue/qall and iQueue/iQall // lock - taosWLockLatch(SMA_ENV_LOCK(pEnv)); + // taosWLockLatch(SMA_ENV_LOCK(pEnv)); ASSERT(RSMA_INFO_HASH(pRSmaStat)); @@ -376,13 +377,9 @@ static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) { pIter = taosHashIterate(RSMA_INFO_HASH(pRSmaStat), pIter); } - atomic_store_64(&pRSmaStat->qBufSize, 0); - atomic_store_8(&pRSmaStat->execStat, 0); // unlock - taosWUnLockLatch(SMA_ENV_LOCK(pEnv)); - - // step 5: others - pRSmaStat->commitAppliedVer = pSma->pVnode->state.applied; + // taosWUnLockLatch(SMA_ENV_LOCK(pEnv)); +#endif return TSDB_CODE_SUCCESS; } @@ -398,13 +395,14 @@ static int32_t tdProcessRSmaAsyncCommitImpl(SSma *pSma) { if (!pSmaEnv) { return TSDB_CODE_SUCCESS; } - +#if 0 SRSmaStat *pRSmaStat = (SRSmaStat *)SMA_ENV_STAT(pSmaEnv); // perform persist task for qTaskInfo operator if (tdRSmaPersistExecImpl(pRSmaStat, RSMA_INFO_HASH(pRSmaStat)) < 0) { return TSDB_CODE_FAILED; } +#endif return TSDB_CODE_SUCCESS; } @@ -426,10 +424,10 @@ static int32_t tdProcessRSmaAsyncPostCommitImpl(SSma *pSma) { // step 1: merge qTaskInfo and iQTaskInfo // lock - taosWLockLatch(SMA_ENV_LOCK(pEnv)); + // taosWLockLatch(SMA_ENV_LOCK(pEnv)); - void *pIter = taosHashIterate(RSMA_INFO_HASH(pRSmaStat), NULL); - while (pIter) { + void *pIter = NULL; + while ((pIter = taosHashIterate(RSMA_INFO_HASH(pRSmaStat), pIter))) { tb_uid_t *pSuid = (tb_uid_t *)taosHashGetKey(pIter, NULL); SRSmaInfo *pRSmaInfo = *(SRSmaInfo **)pIter; if (RSMA_INFO_IS_DEL(pRSmaInfo)) { @@ -447,14 +445,13 @@ static int32_t tdProcessRSmaAsyncPostCommitImpl(SSma *pSma) { SMA_VID(pSma), refVal, *pSuid); } - pIter = taosHashIterate(RSMA_INFO_HASH(pRSmaStat), pIter); continue; } - +#if 0 if (pRSmaInfo->taskInfo[0]) { if (pRSmaInfo->iTaskInfo[0]) { SRSmaInfo *pRSmaInfo = *(SRSmaInfo **)pRSmaInfo->iTaskInfo[0]; - tdFreeRSmaInfo(pSma, pRSmaInfo, true); + tdFreeRSmaInfo(pSma, pRSmaInfo, false); pRSmaInfo->iTaskInfo[0] = NULL; } } else { @@ -463,8 +460,7 @@ static int32_t tdProcessRSmaAsyncPostCommitImpl(SSma *pSma) { taosHashPut(RSMA_INFO_HASH(pRSmaStat), pSuid, sizeof(tb_uid_t), pIter, sizeof(pIter)); smaDebug("vgId:%d, rsma async post commit, migrated from iRsmaInfoHash for table:%" PRIi64, SMA_VID(pSma), *pSuid); - - pIter = taosHashIterate(RSMA_INFO_HASH(pRSmaStat), pIter); +#endif } for (int32_t i = 0; i < taosArrayGetSize(rsmaDeleted); ++i) { @@ -480,10 +476,9 @@ static int32_t tdProcessRSmaAsyncPostCommitImpl(SSma *pSma) { taosHashRemove(RSMA_INFO_HASH(pRSmaStat), pSuid, sizeof(tb_uid_t)); } taosArrayDestroy(rsmaDeleted); - // TODO: remove suid in files? // unlock - taosWUnLockLatch(SMA_ENV_LOCK(pEnv)); + // taosWUnLockLatch(SMA_ENV_LOCK(pEnv)); // step 2: cleanup outdated qtaskinfo files tdCleanupQTaskInfoFiles(pSma, pRSmaStat); diff --git a/source/dnode/vnode/src/sma/smaEnv.c b/source/dnode/vnode/src/sma/smaEnv.c index f51aad22bd..32a419022a 100644 --- a/source/dnode/vnode/src/sma/smaEnv.c +++ b/source/dnode/vnode/src/sma/smaEnv.c @@ -23,11 +23,13 @@ extern SSmaMgmt smaMgmt; // declaration of static functions -static int32_t tdInitSmaStat(SSmaStat **pSmaStat, int8_t smaType, const SSma *pSma); -static SSmaEnv *tdNewSmaEnv(const SSma *pSma, int8_t smaType, const char *path); -static int32_t tdInitSmaEnv(SSma *pSma, int8_t smaType, const char *path, SSmaEnv **pEnv); -static void *tdFreeTSmaStat(STSmaStat *pStat); -static void tdDestroyRSmaStat(void *pRSmaStat); +static int32_t tdNewSmaEnv(SSma *pSma, int8_t smaType, SSmaEnv **ppEnv); +static int32_t tdInitSmaEnv(SSma *pSma, int8_t smaType, SSmaEnv **ppEnv); +static int32_t tdInitSmaStat(SSmaStat **pSmaStat, int8_t smaType, const SSma *pSma); +static int32_t tdRsmaStartExecutor(const SSma *pSma); +static int32_t tdRsmaStopExecutor(const SSma *pSma); +static void *tdFreeTSmaStat(STSmaStat *pStat); +static void tdDestroyRSmaStat(void *pRSmaStat); /** * @brief rsma init @@ -97,35 +99,42 @@ void smaCleanUp() { } } -static SSmaEnv *tdNewSmaEnv(const SSma *pSma, int8_t smaType, const char *path) { +static int32_t tdNewSmaEnv(SSma *pSma, int8_t smaType, SSmaEnv **ppEnv) { SSmaEnv *pEnv = NULL; pEnv = (SSmaEnv *)taosMemoryCalloc(1, sizeof(SSmaEnv)); + *ppEnv = pEnv; if (!pEnv) { terrno = TSDB_CODE_OUT_OF_MEMORY; - return NULL; + return TSDB_CODE_FAILED; } SMA_ENV_TYPE(pEnv) = smaType; taosInitRWLatch(&(pEnv->lock)); + (smaType == TSDB_SMA_TYPE_TIME_RANGE) ? atomic_store_ptr(&SMA_TSMA_ENV(pSma), *ppEnv) + : atomic_store_ptr(&SMA_RSMA_ENV(pSma), *ppEnv); + if (tdInitSmaStat(&SMA_ENV_STAT(pEnv), smaType, pSma) != TSDB_CODE_SUCCESS) { tdFreeSmaEnv(pEnv); - return NULL; + *ppEnv = NULL; + (smaType == TSDB_SMA_TYPE_TIME_RANGE) ? atomic_store_ptr(&SMA_TSMA_ENV(pSma), NULL) + : atomic_store_ptr(&SMA_RSMA_ENV(pSma), NULL); + return TSDB_CODE_FAILED; } - return pEnv; + return TSDB_CODE_SUCCESS; } -static int32_t tdInitSmaEnv(SSma *pSma, int8_t smaType, const char *path, SSmaEnv **pEnv) { - if (!pEnv) { +static int32_t tdInitSmaEnv(SSma *pSma, int8_t smaType, SSmaEnv **ppEnv) { + if (!ppEnv) { terrno = TSDB_CODE_INVALID_PTR; return TSDB_CODE_FAILED; } - if (!(*pEnv)) { - if (!(*pEnv = tdNewSmaEnv(pSma, smaType, path))) { + if (!(*ppEnv)) { + if (tdNewSmaEnv(pSma, smaType, ppEnv) != TSDB_CODE_SUCCESS) { return TSDB_CODE_FAILED; } } @@ -199,7 +208,7 @@ static int32_t tdInitSmaStat(SSmaStat **pSmaStat, int8_t smaType, const SSma *pS * tdInitSmaStat invoked in other multithread environment later. */ if (!(*pSmaStat)) { - *pSmaStat = (SSmaStat *)taosMemoryCalloc(1, sizeof(SSmaStat)); + *pSmaStat = (SSmaStat *)taosMemoryCalloc(1, sizeof(SSmaStat) + sizeof(TdThread) * tsNumOfVnodeRsmaThreads); if (!(*pSmaStat)) { terrno = TSDB_CODE_OUT_OF_MEMORY; return TSDB_CODE_FAILED; @@ -209,6 +218,7 @@ static int32_t tdInitSmaStat(SSmaStat **pSmaStat, int8_t smaType, const SSma *pS SRSmaStat *pRSmaStat = (SRSmaStat *)(*pSmaStat); pRSmaStat->pSma = (SSma *)pSma; atomic_store_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_INIT); + tsem_init(&pRSmaStat->notEmpty, 0, 0); // init smaMgmt smaInit(); @@ -231,9 +241,7 @@ static int32_t tdInitSmaStat(SSmaStat **pSmaStat, int8_t smaType, const SSma *pS return TSDB_CODE_FAILED; } - RSMA_FETCH_HASH(pRSmaStat) = taosHashInit( - RSMA_TASK_INFO_HASH_SLOT, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_ENTRY_LOCK); - if (!RSMA_FETCH_HASH(pRSmaStat)) { + if (tdRsmaStartExecutor(pSma) < 0) { return TSDB_CODE_FAILED; } } else if (smaType == TSDB_SMA_TYPE_TIME_RANGE) { @@ -267,6 +275,7 @@ static void tdDestroyRSmaStat(void *pRSmaStat) { smaDebug("vgId:%d, destroy rsma stat %p", SMA_VID(pSma), pRSmaStat); // step 1: set rsma trigger stat cancelled atomic_store_8(RSMA_TRIGGER_STAT(pStat), TASK_TRIGGER_STAT_CANCELLED); + tsem_destroy(&(pStat->notEmpty)); // step 2: destroy the rsma info and associated fetch tasks if (taosHashGetSize(RSMA_INFO_HASH(pStat)) > 0) { @@ -279,17 +288,14 @@ static void tdDestroyRSmaStat(void *pRSmaStat) { } taosHashCleanup(RSMA_INFO_HASH(pStat)); - // step 3: destroy the rsma fetch hash - taosHashCleanup(RSMA_FETCH_HASH(pStat)); - - // step 4: wait all triggered fetch tasks finished + // step 3: wait for all triggered fetch tasks to finish int32_t nLoops = 0; while (1) { if (T_REF_VAL_GET((SSmaStat *)pStat) == 0) { - smaDebug("vgId:%d, rsma fetch tasks all finished", SMA_VID(pSma)); + smaDebug("vgId:%d, rsma fetch tasks are all finished", SMA_VID(pSma)); break; } else { - smaDebug("vgId:%d, rsma fetch tasks not all finished yet", SMA_VID(pSma)); + smaDebug("vgId:%d, rsma fetch tasks are not all finished yet", SMA_VID(pSma)); } ++nLoops; if (nLoops > 1000) { @@ -298,6 +304,9 @@ static void tdDestroyRSmaStat(void *pRSmaStat) { } } + // step 4: + tdRsmaStopExecutor(pSma); + // step 5: free pStat taosMemoryFreeClear(pStat); } @@ -388,17 +397,70 @@ int32_t tdCheckAndInitSmaEnv(SSma *pSma, int8_t smaType) { pEnv = (smaType == TSDB_SMA_TYPE_TIME_RANGE) ? atomic_load_ptr(&SMA_TSMA_ENV(pSma)) : atomic_load_ptr(&SMA_RSMA_ENV(pSma)); if (!pEnv) { - char rname[TSDB_FILENAME_LEN] = {0}; - - if (tdInitSmaEnv(pSma, smaType, rname, &pEnv) < 0) { + if (tdInitSmaEnv(pSma, smaType, &pEnv) < 0) { tdUnLockSma(pSma); return TSDB_CODE_FAILED; } - - (smaType == TSDB_SMA_TYPE_TIME_RANGE) ? atomic_store_ptr(&SMA_TSMA_ENV(pSma), pEnv) - : atomic_store_ptr(&SMA_RSMA_ENV(pSma), pEnv); } tdUnLockSma(pSma); return TSDB_CODE_SUCCESS; }; + +void *tdRSmaExecutorFunc(void *param) { + setThreadName("vnode-rsma"); + + tdRSmaProcessExecImpl((SSma *)param, RSMA_EXEC_OVERFLOW); + return NULL; +} + +static int32_t tdRsmaStartExecutor(const SSma *pSma) { + TdThreadAttr thAttr = {0}; + taosThreadAttrInit(&thAttr); + taosThreadAttrSetDetachState(&thAttr, PTHREAD_CREATE_JOINABLE); + + SSmaEnv *pEnv = SMA_RSMA_ENV(pSma); + SSmaStat *pStat = SMA_ENV_STAT(pEnv); + TdThread *pthread = (TdThread *)&pStat->data; + + for (int32_t i = 0; i < tsNumOfVnodeRsmaThreads; ++i) { + if (taosThreadCreate(&pthread[i], &thAttr, tdRSmaExecutorFunc, (void *)pSma) != 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + smaError("vgId:%d, failed to create pthread for rsma since %s", SMA_VID(pSma), terrstr()); + return -1; + } + smaDebug("vgId:%d, success to create pthread for rsma", SMA_VID(pSma)); + } + + taosThreadAttrDestroy(&thAttr); + return 0; +} + +static int32_t tdRsmaStopExecutor(const SSma *pSma) { + if (pSma && VND_IS_RSMA(pSma->pVnode)) { + SSmaEnv *pEnv = NULL; + SSmaStat *pStat = NULL; + SRSmaStat *pRSmaStat = NULL; + TdThread *pthread = NULL; + + if (!(pEnv = SMA_RSMA_ENV(pSma)) || !(pStat = SMA_ENV_STAT(pEnv))) { + return 0; + } + + pEnv->flag |= SMA_ENV_FLG_CLOSE; + pRSmaStat = (SRSmaStat *)pStat; + pthread = (TdThread *)&pStat->data; + + for (int32_t i = 0; i < tsNumOfVnodeRsmaThreads; ++i) { + tsem_post(&(pRSmaStat->notEmpty)); + } + + for (int32_t i = 0; i < tsNumOfVnodeRsmaThreads; ++i) { + if (taosCheckPthreadValid(pthread[i])) { + smaDebug("vgId:%d, start to join pthread for rsma:%" PRId64, SMA_VID(pSma), pthread[i]); + taosThreadJoin(pthread[i], NULL); + } + } + } + return 0; +} \ No newline at end of file diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c index 9b3b0cb63d..426ab521fd 100644 --- a/source/dnode/vnode/src/sma/smaRollup.c +++ b/source/dnode/vnode/src/sma/smaRollup.c @@ -15,10 +15,12 @@ #include "sma.h" -#define RSMA_QTASKINFO_BUFSIZE (32768) -#define RSMA_QTASKINFO_HEAD_LEN (sizeof(int32_t) + sizeof(int8_t) + sizeof(int64_t)) // len + type + suid -#define RSMA_QTASKEXEC_BUFSIZE (1048576) -#define RSMA_SUBMIT_BATCH_SIZE (1024) +#define RSMA_QTASKINFO_BUFSIZE (32768) // size +#define RSMA_QTASKINFO_HEAD_LEN (sizeof(int32_t) + sizeof(int8_t) + sizeof(int64_t)) // len + type + suid +#define RSMA_QTASKEXEC_SMOOTH_SIZE (100) // cnt +#define RSMA_SUBMIT_BATCH_SIZE (1024) // cnt +#define RSMA_FETCH_DELAY_MAX (900000) // ms +#define RSMA_FETCH_ACTIVE_MAX (1800) // ms SSmaMgmt smaMgmt = { .inited = 0, @@ -40,11 +42,10 @@ static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t msgSiz static SRSmaInfo *tdAcquireRSmaInfoBySuid(SSma *pSma, int64_t suid); static void tdReleaseRSmaInfo(SSma *pSma, SRSmaInfo *pInfo); static void tdFreeRSmaSubmitItems(SArray *pItems); -static int32_t tdRSmaConsumeAndFetch(SSma *pSma, int64_t suid, int8_t level, SArray *pSubmitArr); -static int32_t tdRSmaFetchAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSmaInfoItem *pItem, STSchema *pTSchema, - int64_t suid); +static int32_t tdRSmaFetchAllResult(SSma *pSma, SRSmaInfo *pInfo, SArray *pSubmitArr); +static int32_t tdRSmaExecAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSmaInfoItem *pItem, STSchema *pTSchema, + int64_t suid); static void tdRSmaFetchTrigger(void *param, void *tmrId); -static int32_t tdRSmaFetchSend(SSma *pSma, SRSmaInfo *pInfo, int8_t level); static int32_t tdRSmaQTaskInfoIterInit(SRSmaQTaskInfoIter *pIter, STFile *pTFile); static int32_t tdRSmaQTaskInfoIterNextBlock(SRSmaQTaskInfoIter *pIter, bool *isFinish); static int32_t tdRSmaQTaskInfoRestore(SSma *pSma, int8_t type, SRSmaQTaskInfoIter *pIter); @@ -620,7 +621,7 @@ static int32_t tdFetchSubmitReqSuids(SSubmitReq *pMsg, STbUidStore *pStore) { */ int32_t smaDoRetention(SSma *pSma, int64_t now) { int32_t code = TSDB_CODE_SUCCESS; - if (VND_IS_RSMA(pSma->pVnode)) { + if (!VND_IS_RSMA(pSma->pVnode)) { return code; } @@ -635,8 +636,8 @@ _end: return code; } -static int32_t tdRSmaFetchAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSmaInfoItem *pItem, STSchema *pTSchema, - int64_t suid) { +static int32_t tdRSmaExecAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSmaInfoItem *pItem, STSchema *pTSchema, + int64_t suid) { SArray *pResList = taosArrayInit(1, POINTER_BYTES); if (pResList == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -729,22 +730,26 @@ static int32_t tdExecuteRSmaImplAsync(SSma *pSma, const void *pMsg, int32_t inpu taosWriteQitem(pInfo->queue, qItem); + pInfo->lastRecv = taosGetTimestampMs(); + SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pSma); - int64_t bufSize = atomic_add_fetch_64(&pRSmaStat->qBufSize, pReq->header.contLen); + + int64_t nItems = atomic_fetch_add_64(&pRSmaStat->nBufItems, 1); + + if (atomic_load_8(&pInfo->assigned) == 0) { + tsem_post(&(pRSmaStat->notEmpty)); + } // smoothing consume - int32_t n = bufSize / RSMA_QTASKEXEC_BUFSIZE; + int32_t n = nItems / RSMA_QTASKEXEC_SMOOTH_SIZE; if (n > 1) { if (n > 10) { n = 10; } - taosMsleep(n << 4); - if (n > 2) { + taosMsleep(n << 3); + if (n > 5) { smaWarn("vgId:%d, pInfo->queue itemSize:%d, memSize:%" PRIi64 ", sleep %d ms", SMA_VID(pSma), - taosQueueItemSize(pInfo->queue), taosQueueMemorySize(pInfo->queue), n << 4); - } else { - smaDebug("vgId:%d, pInfo->queue itemSize:%d, memSize:%" PRIi64 ", sleep %d ms", SMA_VID(pSma), - taosQueueItemSize(pInfo->queue), taosQueueMemorySize(pInfo->queue), n << 4); + taosQueueItemSize(pInfo->queue), taosQueueMemorySize(pInfo->queue), n << 3); } } @@ -812,7 +817,7 @@ static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t msgSize, } SRSmaInfoItem *pItem = RSMA_INFO_ITEM(pInfo, idx); - tdRSmaFetchAndSubmitResult(pSma, qTaskInfo, pItem, pInfo->pTSchema, pInfo->suid); + tdRSmaExecAndSubmitResult(pSma, qTaskInfo, pItem, pInfo->pTSchema, pInfo->suid); return TSDB_CODE_SUCCESS; } @@ -840,25 +845,25 @@ static SRSmaInfo *tdAcquireRSmaInfoBySuid(SSma *pSma, int64_t suid) { return NULL; } - taosRLockLatch(SMA_ENV_LOCK(pEnv)); + // taosRLockLatch(SMA_ENV_LOCK(pEnv)); pRSmaInfo = taosHashGet(RSMA_INFO_HASH(pStat), &suid, sizeof(tb_uid_t)); if (pRSmaInfo && (pRSmaInfo = *(SRSmaInfo **)pRSmaInfo)) { if (RSMA_INFO_IS_DEL(pRSmaInfo)) { - taosRUnLockLatch(SMA_ENV_LOCK(pEnv)); + // taosRUnLockLatch(SMA_ENV_LOCK(pEnv)); return NULL; } if (!pRSmaInfo->taskInfo[0]) { if (tdCloneRSmaInfo(pSma, pRSmaInfo) < 0) { - taosRUnLockLatch(SMA_ENV_LOCK(pEnv)); + // taosRUnLockLatch(SMA_ENV_LOCK(pEnv)); return NULL; } } tdRefRSmaInfo(pSma, pRSmaInfo); - taosRUnLockLatch(SMA_ENV_LOCK(pEnv)); + // taosRUnLockLatch(SMA_ENV_LOCK(pEnv)); ASSERT(pRSmaInfo->suid == suid); return pRSmaInfo; } - taosRUnLockLatch(SMA_ENV_LOCK(pEnv)); + // taosRUnLockLatch(SMA_ENV_LOCK(pEnv)); return NULL; } @@ -908,58 +913,13 @@ static int32_t tdExecuteRSmaAsync(SSma *pSma, const void *pMsg, int32_t inputTyp return TSDB_CODE_SUCCESS; } -static int32_t tdRSmaExecCheck(SSma *pSma) { - SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pSma); - int64_t bufSize = atomic_load_64(&pRSmaStat->qBufSize); - - if (bufSize < RSMA_QTASKEXEC_BUFSIZE) { - smaDebug("vgId:%d, bufSize is %d but has no chance to exec as less than %d", SMA_VID(pSma), bufSize, - RSMA_QTASKEXEC_BUFSIZE); - return TSDB_CODE_SUCCESS; - } - - if (atomic_val_compare_exchange_8(&pRSmaStat->execStat, 0, 1) == 1) { - smaDebug("vgId:%d, bufSize is %d but has no chance to exec as qTaskInfo occupied by another task", SMA_VID(pSma), - bufSize); - return TSDB_CODE_SUCCESS; - } - - smaDebug("vgId:%d, bufSize is %d and has chance to exec as qTaskInfo is free now", SMA_VID(pSma), bufSize); - - SRSmaExecMsg fetchMsg; - int32_t contLen = sizeof(SMsgHead); - void *pBuf = rpcMallocCont(0 + contLen); - - ((SMsgHead *)pBuf)->vgId = SMA_VID(pSma); - ((SMsgHead *)pBuf)->contLen = sizeof(SMsgHead); - - SRpcMsg rpcMsg = { - .code = 0, - .msgType = TDMT_VND_EXEC_RSMA, - .pCont = pBuf, - .contLen = contLen, - }; - - if ((terrno = tmsgPutToQueue(&pSma->pVnode->msgCb, QUERY_QUEUE, &rpcMsg)) != 0) { - smaError("vgId:%d, failed to put rsma exec msg into query-queue since %s", SMA_VID(pSma), terrstr()); - goto _err; - } - - smaDebug("vgId:%d, success to put rsma fetch msg into query-queue", SMA_VID(pSma)); - - return TSDB_CODE_SUCCESS; -_err: - atomic_store_8(&pRSmaStat->execStat, 0); - return TSDB_CODE_FAILED; -} - int32_t tdProcessRSmaSubmit(SSma *pSma, void *pMsg, int32_t inputType) { SSmaEnv *pEnv = SMA_RSMA_ENV(pSma); if (!pEnv) { // only applicable when rsma env exists return TSDB_CODE_SUCCESS; } - + STbUidStore uidStore = {0}; SRetention *pRetention = SMA_RETENTION(pSma); if (!RETENTION_VALID(pRetention + 1)) { // return directly if retention level 1 is invalid @@ -967,25 +927,30 @@ int32_t tdProcessRSmaSubmit(SSma *pSma, void *pMsg, int32_t inputType) { } if (inputType == STREAM_INPUT__DATA_SUBMIT) { - STbUidStore uidStore = {0}; - tdFetchSubmitReqSuids(pMsg, &uidStore); + if (tdFetchSubmitReqSuids(pMsg, &uidStore) < 0) { + goto _err; + } if (uidStore.suid != 0) { - tdExecuteRSmaAsync(pSma, pMsg, inputType, uidStore.suid); - - void *pIter = taosHashIterate(uidStore.uidHash, NULL); - while (pIter) { - tb_uid_t *pTbSuid = (tb_uid_t *)taosHashGetKey(pIter, NULL); - tdExecuteRSmaAsync(pSma, pMsg, inputType, *pTbSuid); - pIter = taosHashIterate(uidStore.uidHash, pIter); + if (tdExecuteRSmaAsync(pSma, pMsg, inputType, uidStore.suid) < 0) { + goto _err; } - tdUidStoreDestory(&uidStore); - - tdRSmaExecCheck(pSma); + void *pIter = NULL; + while ((pIter = taosHashIterate(uidStore.uidHash, pIter))) { + tb_uid_t *pTbSuid = (tb_uid_t *)taosHashGetKey(pIter, NULL); + if (tdExecuteRSmaAsync(pSma, pMsg, inputType, *pTbSuid) < 0) { + goto _err; + } + } } } + tdUidStoreDestory(&uidStore); return TSDB_CODE_SUCCESS; +_err: + tdUidStoreDestory(&uidStore); + smaError("vgId:%d, failed to process rsma submit since: %s", SMA_VID(pSma), terrstr()); + return TSDB_CODE_FAILED; } /** @@ -1416,7 +1381,10 @@ int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat, SHashObj *pInfoHash) { } for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) { +#if 0 qTaskInfo_t taskInfo = RSMA_INFO_IQTASK(pRSmaInfo, i); +#endif + qTaskInfo_t taskInfo = RSMA_INFO_QTASK(pRSmaInfo, i); if (!taskInfo) { smaDebug("vgId:%d, rsma, table %" PRIi64 " level %d qTaskInfo is NULL", vid, pRSmaInfo->suid, i + 1); continue; @@ -1553,7 +1521,18 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) { smaDebug("vgId:%d, rsma fetch task started for level:%" PRIi8 " suid:%" PRIi64 " since stat is active", SMA_VID(pSma), pItem->level, pRSmaInfo->suid); // async process - tdRSmaFetchSend(pSma, pRSmaInfo, pItem->level); + pItem->fetchLevel = pItem->level; +#if 0 + SRSmaInfo *qInfo = tdAcquireRSmaInfoBySuid(pSma, pRSmaInfo->suid); + SRSmaInfoItem *qItem = RSMA_INFO_ITEM(qInfo, pItem->level - 1); + ASSERT(qItem->level == pItem->level); + ASSERT(qItem->fetchLevel == pItem->fetchLevel); +#endif + if (atomic_load_8(&pRSmaInfo->assigned) == 0) { + tsem_post(&(pStat->notEmpty)); + } + smaInfo("vgId:%d, rsma fetch task planned for level:%" PRIi8 " suid:%" PRIi64, SMA_VID(pSma), pItem->level, + pRSmaInfo->suid); } break; case TASK_TRIGGER_STAT_PAUSED: { smaDebug("vgId:%d, rsma fetch task not start for level:%" PRIi8 " suid:%" PRIi64 " since stat is paused", @@ -1568,8 +1547,8 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) { SMA_VID(pSma), pItem->level, pRSmaInfo->suid); } break; default: { - smaWarn("vgId:%d, rsma fetch task not start for level:%" PRIi8 " suid:%" PRIi64 " since stat is unknown", - SMA_VID(pSma), pItem->level, pRSmaInfo->suid); + smaDebug("vgId:%d, rsma fetch task not start for level:%" PRIi8 " suid:%" PRIi64 " since stat is unknown", + SMA_VID(pSma), pItem->level, pRSmaInfo->suid); } break; } @@ -1578,183 +1557,64 @@ _end: tdReleaseSmaRef(smaMgmt.rsetId, pRSmaInfo->refId); } -/** - * @brief put rsma fetch msg to fetch queue - * - * @param pSma - * @param pInfo - * @param level - * @return int32_t - */ -static int32_t tdRSmaFetchSend(SSma *pSma, SRSmaInfo *pInfo, int8_t level) { - SRSmaFetchMsg fetchMsg = {.suid = pInfo->suid, .level = level}; - int32_t ret = 0; - int32_t contLen = 0; - SEncoder encoder = {0}; - tEncodeSize(tEncodeSRSmaFetchMsg, &fetchMsg, contLen, ret); - if (ret < 0) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - tEncoderClear(&encoder); - goto _err; - } - - void *pBuf = rpcMallocCont(contLen + sizeof(SMsgHead)); - tEncoderInit(&encoder, POINTER_SHIFT(pBuf, sizeof(SMsgHead)), contLen); - if (tEncodeSRSmaFetchMsg(&encoder, &fetchMsg) < 0) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - tEncoderClear(&encoder); - } - tEncoderClear(&encoder); - - ((SMsgHead *)pBuf)->vgId = SMA_VID(pSma); - ((SMsgHead *)pBuf)->contLen = contLen + sizeof(SMsgHead); - - SRpcMsg rpcMsg = { - .code = 0, - .msgType = TDMT_VND_FETCH_RSMA, - .pCont = pBuf, - .contLen = contLen + sizeof(SMsgHead), - }; - - if ((terrno = tmsgPutToQueue(&pSma->pVnode->msgCb, QUERY_QUEUE, &rpcMsg)) != 0) { - smaError("vgId:%d, failed to put rsma fetch msg into fetch-queue for suid:%" PRIi64 " level:%" PRIi8 " since %s", - SMA_VID(pSma), pInfo->suid, level, terrstr()); - goto _err; - } - - smaDebug("vgId:%d, success to put rsma fetch msg into fetch-queue for suid:%" PRIi64 " level:%" PRIi8, SMA_VID(pSma), - pInfo->suid, level); - - return TSDB_CODE_SUCCESS; -_err: - return TSDB_CODE_FAILED; -} - -/** - * @brief fetch rsma data of level 2/3 and submit - * - * @param pSma - * @param pMsg - * @return int32_t - */ -int32_t smaProcessFetch(SSma *pSma, void *pMsg) { - SRpcMsg *pRpcMsg = (SRpcMsg *)pMsg; - SRSmaFetchMsg req = {0}; - SDecoder decoder = {0}; - void *pBuf = NULL; - SRSmaStat *pRSmaStat = NULL; - if (!pRpcMsg || pRpcMsg->contLen < sizeof(SMsgHead)) { - terrno = TSDB_CODE_RSMA_FETCH_MSG_MSSED_UP; - goto _err; - } - - pBuf = POINTER_SHIFT(pRpcMsg->pCont, sizeof(SMsgHead)); - - tDecoderInit(&decoder, pBuf, pRpcMsg->contLen); - if (tDecodeSRSmaFetchMsg(&decoder, &req) < 0) { - terrno = TSDB_CODE_INVALID_MSG; - goto _err; - } - - pRSmaStat = SMA_RSMA_STAT(pSma); - - if (atomic_val_compare_exchange_8(&pRSmaStat->execStat, 0, 1) == 0) { - SArray *pSubmitArr = NULL; - if (!(pSubmitArr = taosArrayInit(RSMA_SUBMIT_BATCH_SIZE, POINTER_BYTES))) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - atomic_store_8(&pRSmaStat->execStat, 0); - goto _err; - } - tdRSmaConsumeAndFetch(pSma, req.suid, req.level, pSubmitArr); - atomic_store_8(&pRSmaStat->execStat, 0); - taosArrayDestroy(pSubmitArr); - } else { - int8_t level = req.level; - int8_t *val = taosHashGet(RSMA_FETCH_HASH(pRSmaStat), &req.suid, sizeof(req.suid)); - if (val) { - level |= (*val); - } - ASSERT(level >= 1 && level <= 3); - taosHashPut(RSMA_FETCH_HASH(pRSmaStat), &req.suid, sizeof(req.suid), &level, sizeof(level)); - } - - tDecoderClear(&decoder); - smaDebug("vgId:%d, success to process rsma fetch msg for suid:%" PRIi64 " level:%" PRIi8, SMA_VID(pSma), req.suid, - req.level); - return TSDB_CODE_SUCCESS; -_err: - tDecoderClear(&decoder); - smaError("vgId:%d, failed to process rsma fetch msg since %s", SMA_VID(pSma), terrstr()); - return TSDB_CODE_FAILED; -} - static void tdFreeRSmaSubmitItems(SArray *pItems) { + ASSERT(taosArrayGetSize(pItems) > 0); for (int32_t i = 0; i < taosArrayGetSize(pItems); ++i) { taosFreeQitem(*(void **)taosArrayGet(pItems, i)); } + taosArrayClear(pItems); } -static int32_t tdRSmaConsumeAndFetch(SSma *pSma, int64_t suid, int8_t level, SArray *pSubmitArr) { - SRSmaInfo *pInfo = tdAcquireRSmaInfoBySuid(pSma, suid); - if (!pInfo) { - return TSDB_CODE_SUCCESS; - } - - // step 1: consume submit req - int64_t qMemSize = 0; - if ((qMemSize = taosQueueMemorySize(pInfo->queue) > 0)) { - taosReadAllQitems(pInfo->queue, pInfo->qall); // queue has mutex lock - - SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pSma); - atomic_fetch_sub_64(&pRSmaStat->qBufSize, qMemSize); - - taosArrayClear(pSubmitArr); - - while (1) { - void *msg = NULL; - taosGetQitem(pInfo->qall, (void **)&msg); - if (msg) { - if (taosArrayPush(pSubmitArr, &msg) < 0) { - tdFreeRSmaSubmitItems(pSubmitArr); - goto _err; - } - } else { - break; - } - } - - int32_t size = taosArrayGetSize(pSubmitArr); - if (size > 0) { - for (int32_t i = 1; i <= TSDB_RETENTION_L2; ++i) { - if (tdExecuteRSmaImpl(pSma, pSubmitArr->pData, size, STREAM_INPUT__MERGED_SUBMIT, pInfo, RSMA_EXEC_TIMEOUT, i) < - 0) { - tdFreeRSmaSubmitItems(pSubmitArr); - goto _err; - } - } - - tdFreeRSmaSubmitItems(pSubmitArr); - } - } - - // step 2: fetch rsma result +/** + * @brief fetch rsma result(consider the efficiency and functionality) + * + * @param pSma + * @param pInfo + * @param pSubmitArr + * @return int32_t + */ +static int32_t tdRSmaFetchAllResult(SSma *pSma, SRSmaInfo *pInfo, SArray *pSubmitArr) { SSDataBlock dataBlock = {.info.type = STREAM_GET_ALL}; for (int8_t i = 1; i <= TSDB_RETENTION_L2; ++i) { - if (level & i) { + SRSmaInfoItem *pItem = RSMA_INFO_ITEM(pInfo, i - 1); + if (pItem->fetchLevel) { + pItem->fetchLevel = 0; qTaskInfo_t taskInfo = RSMA_INFO_QTASK(pInfo, i - 1); if (!taskInfo) { continue; } + + int64_t curMs = taosGetTimestampMs(); + if ((pItem->nSkipped * pItem->maxDelay) > RSMA_FETCH_DELAY_MAX) { + smaInfo("vgId:%d, suid:%" PRIi64 " level:%" PRIi8 " nSkipped:%" PRIi8 " maxDelay:%d, fetch executed", + SMA_VID(pSma), pInfo->suid, i, pItem->nSkipped, pItem->maxDelay); + } else if (((curMs - pInfo->lastRecv) < RSMA_FETCH_ACTIVE_MAX)) { + ++pItem->nSkipped; + smaDebug("vgId:%d, suid:%" PRIi64 " level:%" PRIi8 " curMs:%" PRIi64 " lastRecv:%" PRIi64 ", fetch skipped ", + SMA_VID(pSma), pInfo->suid, i, curMs, pInfo->lastRecv); + continue; + } else { + smaInfo("vgId:%d, suid:%" PRIi64 " level:%" PRIi8 " curMs:%" PRIi64 " lastRecv:%" PRIi64 ", fetch executed ", + SMA_VID(pSma), pInfo->suid, i, curMs, pInfo->lastRecv); + } + + pItem->nSkipped = 0; + if ((terrno = qSetMultiStreamInput(taskInfo, &dataBlock, 1, STREAM_INPUT__DATA_BLOCK)) < 0) { goto _err; } - SRSmaInfoItem *pItem = RSMA_INFO_ITEM(pInfo, i - 1); - if (tdRSmaFetchAndSubmitResult(pSma, taskInfo, pItem, pInfo->pTSchema, suid) < 0) { + if (tdRSmaExecAndSubmitResult(pSma, taskInfo, pItem, pInfo->pTSchema, pInfo->suid) < 0) { tdCleanupStreamInputDataBlock(taskInfo); goto _err; } tdCleanupStreamInputDataBlock(taskInfo); + smaInfo("vgId:%d, suid:%" PRIi64 " level:%" PRIi8 " nSkipped:%" PRIi8 " maxDelay:%d, fetch finished", + SMA_VID(pSma), pInfo->suid, i, pItem->nSkipped, pItem->maxDelay); + } else { + smaDebug("vgId:%d, suid:%" PRIi64 " level:%" PRIi8 " nSkipped:%" PRIi8 + " maxDelay:%d, fetch not executed as fetch level is %" PRIi8, + SMA_VID(pSma), pInfo->suid, i, pItem->nSkipped, pItem->maxDelay, pItem->fetchLevel); } } @@ -1766,6 +1626,45 @@ _err: return TSDB_CODE_FAILED; } +static int32_t tdRSmaBatchExec(SSma *pSma, SRSmaInfo *pInfo, STaosQall *qall, SArray *pSubmitArr, ERsmaExecType type) { + taosArrayClear(pSubmitArr); + while (1) { + void *msg = NULL; + taosGetQitem(qall, (void **)&msg); + if (msg) { + if (taosArrayPush(pSubmitArr, &msg) < 0) { + tdFreeRSmaSubmitItems(pSubmitArr); + goto _err; + } + } else { + break; + } + } + + int32_t size = taosArrayGetSize(pSubmitArr); + if (size > 0) { + for (int32_t i = 1; i <= TSDB_RETENTION_L2; ++i) { + if (tdExecuteRSmaImpl(pSma, pSubmitArr->pData, size, STREAM_INPUT__MERGED_SUBMIT, pInfo, type, i) < 0) { + tdFreeRSmaSubmitItems(pSubmitArr); + goto _err; + } + } + tdFreeRSmaSubmitItems(pSubmitArr); + } + return TSDB_CODE_SUCCESS; +_err: + while (1) { + void *msg = NULL; + taosGetQitem(qall, (void **)&msg); + if (msg) { + taosFreeQitem(msg); + } else { + break; + } + } + return TSDB_CODE_FAILED; +} + /** * @brief * @@ -1773,11 +1672,12 @@ _err: * @param type * @return int32_t */ + int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type) { + SVnode *pVnode = pSma->pVnode; SSmaEnv *pEnv = SMA_RSMA_ENV(pSma); SRSmaStat *pRSmaStat = (SRSmaStat *)SMA_ENV_STAT(pEnv); SHashObj *infoHash = NULL; - SArray *pSubmitQArr = NULL; SArray *pSubmitArr = NULL; bool isFetchAll = false; @@ -1786,135 +1686,111 @@ int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type) { goto _err; } - if (type == RSMA_EXEC_OVERFLOW) { - taosRLockLatch(SMA_ENV_LOCK(pEnv)); - if (atomic_load_64(&pRSmaStat->qBufSize) < RSMA_QTASKEXEC_BUFSIZE) { - taosRUnLockLatch(SMA_ENV_LOCK(pEnv)); - return TSDB_CODE_SUCCESS; - } - taosRUnLockLatch(SMA_ENV_LOCK(pEnv)); - } - - if (!(pSubmitQArr = taosArrayInit(taosHashGetSize(infoHash), sizeof(SRSmaExecQItem)))) { + if (!(pSubmitArr = + taosArrayInit(TMIN(RSMA_SUBMIT_BATCH_SIZE, atomic_load_64(&pRSmaStat->nBufItems)), POINTER_BYTES))) { terrno = TSDB_CODE_OUT_OF_MEMORY; goto _err; } - if (!(pSubmitArr = taosArrayInit(RSMA_SUBMIT_BATCH_SIZE, POINTER_BYTES))) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - goto _err; - } + while (true) { + // step 1: rsma exec - consume data in buffer queue for all suids + if (type == RSMA_EXEC_OVERFLOW || type == RSMA_EXEC_COMMIT) { + void *pIter = NULL; + while ((pIter = taosHashIterate(infoHash, pIter))) { + SRSmaInfo *pInfo = *(SRSmaInfo **)pIter; + if (atomic_val_compare_exchange_8(&pInfo->assigned, 0, 1) == 0) { + if ((taosQueueItemSize(pInfo->queue) > 0) || RSMA_INFO_ITEM(pInfo, 0)->fetchLevel || + RSMA_INFO_ITEM(pInfo, 1)->fetchLevel) { + int32_t batchCnt = -1; + int32_t batchMax = taosHashGetSize(infoHash) / tsNumOfVnodeRsmaThreads; + bool occupied = (batchMax <= 1); + if (batchMax > 1) { + batchMax = 100 / batchMax; + } + while (occupied || (++batchCnt < batchMax)) { // greedy mode + taosReadAllQitems(pInfo->queue, pInfo->qall); // queue has mutex lock + int32_t qallItemSize = taosQallItemSize(pInfo->qall); + if (qallItemSize > 0) { + tdRSmaBatchExec(pSma, pInfo, pInfo->qall, pSubmitArr, type); + smaDebug("vgId:%d, batchSize:%d, execType:%" PRIi8, SMA_VID(pSma), qallItemSize, type); + } - // step 1: rsma exec - consume data in buffer queue for all suids - SRSmaExecQItem qItem = {0}; - void *pIter = taosHashIterate(infoHash, NULL); // infoHash has r/w lock - if (type == RSMA_EXEC_OVERFLOW) { - while (pIter) { - SRSmaInfo *pInfo = *(SRSmaInfo **)pIter; - if (taosQueueItemSize(pInfo->queue)) { - taosReadAllQitems(pInfo->queue, pInfo->qall); // queue has mutex lock - qItem.qall = &pInfo->qall; - qItem.pRSmaInfo = pIter; - taosArrayPush(pSubmitQArr, &qItem); - } - ASSERT(taosQueueItemSize(pInfo->queue) == 0); - pIter = taosHashIterate(infoHash, pIter); - } - } else if (type == RSMA_EXEC_COMMIT) { - while (pIter) { - SRSmaInfo *pInfo = *(SRSmaInfo **)pIter; - if (taosQueueItemSize(pInfo->iQueue)) { - taosReadAllQitems(pInfo->iQueue, pInfo->iQall); - qItem.qall = &pInfo->iQall; - qItem.pRSmaInfo = pIter; - taosArrayPush(pSubmitQArr, &qItem); - } - ASSERT(taosQueueItemSize(pInfo->iQueue) == 0); - pIter = taosHashIterate(infoHash, pIter); - } - } else { - ASSERT(0); - } - atomic_store_64(&pRSmaStat->qBufSize, 0); + if (type == RSMA_EXEC_OVERFLOW) { + tdRSmaFetchAllResult(pSma, pInfo, pSubmitArr); + } - int32_t qSize = taosArrayGetSize(pSubmitQArr); - for (int32_t i = 0; i < qSize; ++i) { - SRSmaExecQItem *pItem = taosArrayGet(pSubmitQArr, i); - while (1) { - void *msg = NULL; - taosGetQitem(*(STaosQall **)pItem->qall, (void **)&msg); - if (msg) { - if (taosArrayPush(pSubmitArr, &msg) < 0) { - tdFreeRSmaSubmitItems(pSubmitArr); - goto _err; + if (qallItemSize > 0) { + atomic_fetch_sub_64(&pRSmaStat->nBufItems, qallItemSize); + continue; + } else if (RSMA_INFO_ITEM(pInfo, 0)->fetchLevel || RSMA_INFO_ITEM(pInfo, 1)->fetchLevel) { + continue; + } + + break; + } + } + ASSERT(1 == atomic_val_compare_exchange_8(&pInfo->assigned, 1, 0)); } - } else { + } + if (type == RSMA_EXEC_COMMIT) { + if (atomic_load_64(&pRSmaStat->nBufItems) <= 0) { + break; + } else { + // commit should wait for all items be consumed + continue; + } + } + } +#if 0 + else if (type == RSMA_EXEC_COMMIT) { + while (pIter) { + SRSmaInfo *pInfo = *(SRSmaInfo **)pIter; + if (taosQueueItemSize(pInfo->iQueue)) { + if (atomic_val_compare_exchange_8(&pInfo->assigned, 0, 1) == 0) { + taosReadAllQitems(pInfo->iQueue, pInfo->iQall); // queue has mutex lock + int32_t qallItemSize = taosQallItemSize(pInfo->iQall); + if (qallItemSize > 0) { + atomic_fetch_sub_64(&pRSmaStat->nBufItems, qallItemSize); + nIdle = 0; + + // batch exec + tdRSmaBatchExec(pSma, pInfo, pInfo->qall, pSubmitArr, type); + } + + // tdRSmaFetchAllResult(pSma, pInfo, pSubmitArr); + ASSERT(1 == atomic_val_compare_exchange_8(&pInfo->assigned, 1, 0)); + } + } + ASSERT(taosQueueItemSize(pInfo->iQueue) == 0); + pIter = taosHashIterate(infoHash, pIter); + } + break; + } +#endif + else { + ASSERT(0); + } + + if (atomic_load_64(&pRSmaStat->nBufItems) <= 0) { + if (pEnv->flag & SMA_ENV_FLG_CLOSE) { + break; + } + + tsem_wait(&pRSmaStat->notEmpty); + + if ((pEnv->flag & SMA_ENV_FLG_CLOSE) && (atomic_load_64(&pRSmaStat->nBufItems) <= 0)) { + smaInfo("vgId:%d, exec task end, flag:%" PRIi8 ", nBufItems:%" PRIi64, SMA_VID(pSma), pEnv->flag, + atomic_load_64(&pRSmaStat->nBufItems)); break; } } - int32_t size = taosArrayGetSize(pSubmitArr); - if (size > 0) { - SRSmaInfo *pInfo = *(SRSmaInfo **)pItem->pRSmaInfo; - for (int32_t i = 1; i <= TSDB_RETENTION_L2; ++i) { - if (tdExecuteRSmaImpl(pSma, pSubmitArr->pData, size, STREAM_INPUT__MERGED_SUBMIT, pInfo, type, i) < 0) { - tdFreeRSmaSubmitItems(pSubmitArr); - goto _err; - } - } - tdFreeRSmaSubmitItems(pSubmitArr); - taosArrayClear(pSubmitArr); - } - } - - // step 2: rsma fetch - consume data in buffer queue for suids triggered by timer - if (taosHashGetSize(RSMA_FETCH_HASH(pRSmaStat)) <= 0) { - goto _end; - } - pIter = taosHashIterate(RSMA_FETCH_HASH(pRSmaStat), NULL); - if (pIter) { - tdRSmaConsumeAndFetch(pSma, *(int64_t *)taosHashGetKey(pIter, NULL), *(int8_t *)pIter, pSubmitArr); - while ((pIter = taosHashIterate(RSMA_FETCH_HASH(pRSmaStat), pIter))) { - tdRSmaConsumeAndFetch(pSma, *(int64_t *)taosHashGetKey(pIter, NULL), *(int8_t *)pIter, pSubmitArr); - } - } + } // end of while(true) _end: taosArrayDestroy(pSubmitArr); - taosArrayDestroy(pSubmitQArr); return TSDB_CODE_SUCCESS; _err: taosArrayDestroy(pSubmitArr); - taosArrayDestroy(pSubmitQArr); - return TSDB_CODE_FAILED; -} - -/** - * @brief exec rsma level 1data, fetch result of level 2/3 and submit - * - * @param pSma - * @param pMsg - * @return int32_t - */ -int32_t smaProcessExec(SSma *pSma, void *pMsg) { - SRpcMsg *pRpcMsg = (SRpcMsg *)pMsg; - SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pSma); - - if (!pRpcMsg || pRpcMsg->contLen < sizeof(SMsgHead)) { - terrno = TSDB_CODE_RSMA_FETCH_MSG_MSSED_UP; - goto _err; - } - smaDebug("vgId:%d, begin to process rsma exec msg by TID:%p", SMA_VID(pSma), (void *)taosGetSelfPthreadId()); - if (tdRSmaProcessExecImpl(pSma, RSMA_EXEC_OVERFLOW) < 0) { - goto _err; - } - - atomic_store_8(&pRSmaStat->execStat, 0); - smaDebug("vgId:%d, success to process rsma exec msg by TID:%p", SMA_VID(pSma), (void *)taosGetSelfPthreadId()); - return TSDB_CODE_SUCCESS; -_err: - atomic_store_8(&pRSmaStat->execStat, 0); - smaError("vgId:%d, failed to process rsma exec msg by TID:%p since %s", SMA_VID(pSma), (void *)taosGetSelfPthreadId(), - terrstr()); return TSDB_CODE_FAILED; } diff --git a/source/dnode/vnode/src/sma/smaUtil.c b/source/dnode/vnode/src/sma/smaUtil.c index da70222485..d771797963 100644 --- a/source/dnode/vnode/src/sma/smaUtil.c +++ b/source/dnode/vnode/src/sma/smaUtil.c @@ -375,6 +375,9 @@ int32_t tdCloneRSmaInfo(SSma *pSma, SRSmaInfo *pInfo) { if (TABLE_IS_ROLLUP(mr.me.flags)) { param = &mr.me.stbEntry.rsmaParam; for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) { + if (!pInfo->iTaskInfo[i]) { + continue; + } if (tdCloneQTaskInfo(pSma, pInfo->taskInfo[i], pInfo->iTaskInfo[i], param, pInfo->suid, i) < 0) { goto _err; } diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index c6bc8e6e59..3ff59ac2c0 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -79,6 +79,10 @@ STQ* tqOpen(const char* path, SVnode* pVnode) { ASSERT(0); } + if (streamLoadTasks(pTq->pStreamMeta) < 0) { + ASSERT(0); + } + return pTq; } @@ -648,17 +652,28 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask) { // expand executor if (pTask->taskLevel == TASK_LEVEL__SOURCE) { + pTask->pState = streamStateOpen(pTq->pStreamMeta->path, pTask); + if (pTask->pState == NULL) { + return -1; + } + SReadHandle handle = { .meta = pTq->pVnode->pMeta, .vnode = pTq->pVnode, .initTqReader = 1, + .pStateBackend = pTask->pState, }; pTask->exec.executor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &handle); ASSERT(pTask->exec.executor); } else if (pTask->taskLevel == TASK_LEVEL__AGG) { + pTask->pState = streamStateOpen(pTq->pStreamMeta->path, pTask); + if (pTask->pState == NULL) { + return -1; + } SReadHandle mgHandle = { .vnode = NULL, .numOfVgroups = (int32_t)taosArrayGetSize(pTask->childEpInfo), + .pStateBackend = pTask->pState, }; pTask->exec.executor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &mgHandle); ASSERT(pTask->exec.executor); diff --git a/source/dnode/vnode/src/tsdb/tsdbCache.c b/source/dnode/vnode/src/tsdb/tsdbCache.c index ed25783e9f..b9f3897674 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCache.c +++ b/source/dnode/vnode/src/tsdb/tsdbCache.c @@ -422,6 +422,8 @@ typedef struct { STsdb *pTsdb; // [input] SBlockIdx *pBlockIdxExp; // [input] STSchema *pTSchema; // [input] + tb_uid_t suid; + tb_uid_t uid; int32_t nFileSet; int32_t iFileSet; SArray *aDFileSet; @@ -494,6 +496,8 @@ static int32_t getNextRowFromFSLast(void *iter, TSDBROW **ppRow) { if (!state->pBlockDataL) { state->pBlockDataL = &state->blockDataL; + + tBlockDataCreate(state->pBlockDataL); } code = tBlockDataInit(state->pBlockDataL, suid, suid ? 0 : uid, state->pTSchema); if (code) goto _err; @@ -593,6 +597,9 @@ typedef struct SFSNextRowIter { SFSNEXTROWSTATES state; // [input] STsdb *pTsdb; // [input] SBlockIdx *pBlockIdxExp; // [input] + STSchema *pTSchema; // [input] + tb_uid_t suid; + tb_uid_t uid; int32_t nFileSet; int32_t iFileSet; SArray *aDFileSet; @@ -685,6 +692,10 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) { tMapDataGetItemByIdx(&state->blockMap, state->iBlock, &block, tGetBlock); /* code = tsdbReadBlockData(state->pDataFReader, &state->blockIdx, &block, &state->blockData, NULL, NULL); */ + tBlockDataReset(state->pBlockData); + code = tBlockDataInit(state->pBlockData, state->suid, state->uid, state->pTSchema); + if (code) goto _err; + code = tsdbReadDataBlock(state->pDataFReader, &block, state->pBlockData); if (code) goto _err; @@ -958,16 +969,21 @@ static int32_t nextRowIterOpen(CacheNextRowIter *pIter, tb_uid_t uid, STsdb *pTs pIter->idx = (SBlockIdx){.suid = suid, .uid = uid}; - pIter->fsLastState.state = (SFSLASTNEXTROWSTATES) SFSNEXTROW_FS; + pIter->fsLastState.state = (SFSLASTNEXTROWSTATES)SFSNEXTROW_FS; pIter->fsLastState.pTsdb = pTsdb; pIter->fsLastState.aDFileSet = pIter->pReadSnap->fs.aDFileSet; pIter->fsLastState.pBlockIdxExp = &pIter->idx; pIter->fsLastState.pTSchema = pTSchema; + pIter->fsLastState.suid = suid; + pIter->fsLastState.uid = uid; pIter->fsState.state = SFSNEXTROW_FS; pIter->fsState.pTsdb = pTsdb; pIter->fsState.aDFileSet = pIter->pReadSnap->fs.aDFileSet; pIter->fsState.pBlockIdxExp = &pIter->idx; + pIter->fsState.pTSchema = pTSchema; + pIter->fsState.suid = suid; + pIter->fsState.uid = uid; pIter->input[0] = (TsdbNextRowState){&pIter->memRow, true, false, &pIter->memState, getNextRowFromMem, NULL}; pIter->input[1] = (TsdbNextRowState){&pIter->imemRow, true, false, &pIter->imemState, getNextRowFromMem, NULL}; diff --git a/source/dnode/vnode/src/vnd/vnodeCommit.c b/source/dnode/vnode/src/vnd/vnodeCommit.c index 64f223b974..8c73499229 100644 --- a/source/dnode/vnode/src/vnd/vnodeCommit.c +++ b/source/dnode/vnode/src/vnd/vnodeCommit.c @@ -220,13 +220,6 @@ int vnodeCommit(SVnode *pVnode) { vInfo("vgId:%d, start to commit, commit ID:%" PRId64 " version:%" PRId64, TD_VID(pVnode), pVnode->state.commitID, pVnode->state.applied); - // preCommit - // smaSyncPreCommit(pVnode->pSma); - smaAsyncPreCommit(pVnode->pSma); - - vnodeBufPoolUnRef(pVnode->inUse); - pVnode->inUse = NULL; - pVnode->state.commitTerm = pVnode->state.applyTerm; // save info @@ -241,6 +234,16 @@ int vnodeCommit(SVnode *pVnode) { } walBeginSnapshot(pVnode->pWal, pVnode->state.applied); + // preCommit + // smaSyncPreCommit(pVnode->pSma); + if(smaAsyncPreCommit(pVnode->pSma) < 0){ + ASSERT(0); + return -1; + } + + vnodeBufPoolUnRef(pVnode->inUse); + pVnode->inUse = NULL; + // commit each sub-system if (metaCommit(pVnode->pMeta) < 0) { ASSERT(0); @@ -248,7 +251,10 @@ int vnodeCommit(SVnode *pVnode) { } if (VND_IS_RSMA(pVnode)) { - smaAsyncCommit(pVnode->pSma); + if (smaAsyncCommit(pVnode->pSma) < 0) { + ASSERT(0); + return -1; + } if (tsdbCommit(VND_RSMA0(pVnode)) < 0) { ASSERT(0); @@ -285,7 +291,10 @@ int vnodeCommit(SVnode *pVnode) { // postCommit // smaSyncPostCommit(pVnode->pSma); - smaAsyncPostCommit(pVnode->pSma); + if (smaAsyncPostCommit(pVnode->pSma) < 0) { + ASSERT(0); + return -1; + } // apply the commit (TODO) walEndSnapshot(pVnode->pWal); diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index c73d2ccfd5..495220b5de 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -301,10 +301,6 @@ int32_t vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) { return qWorkerProcessQueryMsg(&handle, pVnode->pQuery, pMsg, 0); case TDMT_SCH_QUERY_CONTINUE: return qWorkerProcessCQueryMsg(&handle, pVnode->pQuery, pMsg, 0); - case TDMT_VND_FETCH_RSMA: - return smaProcessFetch(pVnode->pSma, pMsg); - case TDMT_VND_EXEC_RSMA: - return smaProcessExec(pVnode->pSma, pMsg); default: vError("unknown msg type:%d in query queue", pMsg->msgType); return TSDB_CODE_VND_APP_ERROR; @@ -382,14 +378,14 @@ static int32_t vnodeProcessTrimReq(SVnode *pVnode, int64_t version, void *pReq, int32_t code = 0; SVTrimDbReq trimReq = {0}; - vInfo("vgId:%d, trim vnode request will be processed, time:%d", pVnode->config.vgId, trimReq.timestamp); - // decode if (tDeserializeSVTrimDbReq(pReq, len, &trimReq) != 0) { code = TSDB_CODE_INVALID_MSG; goto _exit; } + vInfo("vgId:%d, trim vnode request will be processed, time:%d", pVnode->config.vgId, trimReq.timestamp); + // process code = tsdbDoRetention(pVnode->pTsdb, trimReq.timestamp); if (code) goto _exit; diff --git a/source/libs/catalog/src/catalog.c b/source/libs/catalog/src/catalog.c index 933e65e582..b6e958e192 100644 --- a/source/libs/catalog/src/catalog.c +++ b/source/libs/catalog/src/catalog.c @@ -893,7 +893,7 @@ int32_t catalogChkTbMetaVersion(SCatalog* pCtg, SRequestConnInfo *pConn, SArray* CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT); } - SName name; + SName name = {0}; int32_t sver = 0; int32_t tver = 0; int32_t tbNum = taosArrayGetSize(pTables); diff --git a/source/libs/command/inc/commandInt.h b/source/libs/command/inc/commandInt.h index 53d118e1ad..706985f894 100644 --- a/source/libs/command/inc/commandInt.h +++ b/source/libs/command/inc/commandInt.h @@ -100,7 +100,6 @@ extern "C" { typedef struct SExplainGroup { int32_t nodeNum; int32_t physiPlanExecNum; - int32_t physiPlanNum; int32_t physiPlanExecIdx; SRWLatch lock; SSubplan *plan; diff --git a/source/libs/command/src/explain.c b/source/libs/command/src/explain.c index 9da9168555..967c682b0b 100644 --- a/source/libs/command/src/explain.c +++ b/source/libs/command/src/explain.c @@ -296,8 +296,6 @@ int32_t qExplainGenerateResNode(SPhysiNode *pNode, SExplainGroup *group, SExplai QRY_ERR_JRET(qExplainGenerateResChildren(pNode, group, &resNode->pChildren)); - ++group->physiPlanNum; - *pResNode = resNode; return TSDB_CODE_SUCCESS; @@ -1548,12 +1546,6 @@ int32_t qExplainAppendGroupResRows(void *pCtx, int32_t groupId, int32_t level) { QRY_ERR_RET(qExplainGenerateResNode(group->plan->pNode, group, &node)); - if ((EXPLAIN_MODE_ANALYZE == ctx->mode) && (group->physiPlanNum != group->physiPlanExecNum)) { - qError("physiPlanNum %d mismatch with physiExecNum %d in group %d", group->physiPlanNum, group->physiPlanExecNum, - groupId); - QRY_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); - } - QRY_ERR_JRET(qExplainResNodeToRows(node, ctx, level)); _return: @@ -1578,12 +1570,9 @@ int32_t qExplainGetRspFromCtx(void *ctx, SRetrieveTableRsp **pRsp) { SColumnInfoData *pInfoData = taosArrayGet(pBlock->pDataBlock, 0); - char buf[1024] = {0}; for (int32_t i = 0; i < rowNum; ++i) { SQueryExplainRowInfo *row = taosArrayGet(pCtx->rows, i); - varDataCopy(buf, row->buf); - ASSERT(varDataTLen(row->buf) == row->len); - colDataAppend(pInfoData, i, buf, false); + colDataAppend(pInfoData, i, row->buf, false); } pBlock->info.rows = rowNum; diff --git a/source/libs/executor/inc/executil.h b/source/libs/executor/inc/executil.h index e287bcc882..a25933d15e 100644 --- a/source/libs/executor/inc/executil.h +++ b/source/libs/executor/inc/executil.h @@ -119,6 +119,7 @@ SSDataBlock* createResDataBlock(SDataBlockDescNode* pNode); EDealRes doTranslateTagExpr(SNode** pNode, void* pContext); int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, SNode* pTagIndexCond, STableListInfo* pListInfo); int32_t getGroupIdFromTagsVal(void* pMeta, uint64_t uid, SNodeList* pGroupNode, char* keyBuf, uint64_t* pGroupId); +int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableListInfo* pTableListInfo); size_t getTableTagsBufLen(const SNodeList* pGroups); SArray* createSortInfo(SNodeList* pNodeList); diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index 73f7781c04..a34ba804ed 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -150,6 +150,7 @@ typedef struct { SQueryTableDataCond tableCond; int64_t recoverStartVer; int64_t recoverEndVer; + SStreamState* pState; } SStreamTaskInfo; typedef struct { @@ -1016,7 +1017,7 @@ bool functionNeedToExecute(SqlFunctionCtx* pCtx); bool isOverdue(TSKEY ts, STimeWindowAggSupp* pSup); bool isCloseWindow(STimeWindow* pWin, STimeWindowAggSupp* pSup); bool isDeletedWindow(STimeWindow* pWin, uint64_t groupId, SAggSupporter* pSup); -void appendOneRow(SSDataBlock* pBlock, TSKEY* pStartTs, TSKEY* pEndTs, uint64_t* pUid); +void appendOneRow(SSDataBlock* pBlock, TSKEY* pStartTs, TSKEY* pEndTs, int32_t uidCol, uint64_t* pID); void printDataBlock(SSDataBlock* pBlock, const char* flag); int32_t finalizeResultRowIntoResultDataBlock(SDiskbasedBuf* pBuf, SResultRowPosition* resultRowPosition, diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 734b63b94d..b89579a017 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -284,17 +284,17 @@ int32_t isQualifiedTable(STableKeyInfo* info, SNode* pTagCond, void* metaHandle, return TSDB_CODE_SUCCESS; } -typedef struct tagFilterAssist{ - SHashObj *colHash; +typedef struct tagFilterAssist { + SHashObj* colHash; int32_t index; - SArray *cInfoList; -}tagFilterAssist; + SArray* cInfoList; +} tagFilterAssist; static EDealRes getColumn(SNode** pNode, void* pContext) { SColumnNode* pSColumnNode = NULL; if (QUERY_NODE_COLUMN == nodeType((*pNode))) { pSColumnNode = *(SColumnNode**)pNode; - }else if(QUERY_NODE_FUNCTION == nodeType((*pNode))){ + } else if (QUERY_NODE_FUNCTION == nodeType((*pNode))) { SFunctionNode* pFuncNode = *(SFunctionNode**)(pNode); if (pFuncNode->funcType == FUNCTION_TYPE_TBNAME) { pSColumnNode = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN); @@ -307,24 +307,26 @@ static EDealRes getColumn(SNode** pNode, void* pContext) { pSColumnNode->node.resType.bytes = TSDB_TABLE_FNAME_LEN - 1 + VARSTR_HEADER_SIZE; nodesDestroyNode(*pNode); *pNode = (SNode*)pSColumnNode; - }else{ + } else { return DEAL_RES_CONTINUE; } - }else{ + } else { return DEAL_RES_CONTINUE; } - tagFilterAssist *pData = (tagFilterAssist *)pContext; - void *data = taosHashGet(pData->colHash, &pSColumnNode->colId, sizeof(pSColumnNode->colId)); - if(!data){ + tagFilterAssist* pData = (tagFilterAssist*)pContext; + void* data = taosHashGet(pData->colHash, &pSColumnNode->colId, sizeof(pSColumnNode->colId)); + if (!data) { taosHashPut(pData->colHash, &pSColumnNode->colId, sizeof(pSColumnNode->colId), pNode, sizeof((*pNode))); pSColumnNode->slotId = pData->index++; - SColumnInfo cInfo = {.colId = pSColumnNode->colId, .type = pSColumnNode->node.resType.type, .bytes = pSColumnNode->node.resType.bytes}; + SColumnInfo cInfo = {.colId = pSColumnNode->colId, + .type = pSColumnNode->node.resType.type, + .bytes = pSColumnNode->node.resType.bytes}; #if TAG_FILTER_DEBUG qDebug("tagfilter build column info, slotId:%d, colId:%d, type:%d", pSColumnNode->slotId, cInfo.colId, cInfo.type); #endif taosArrayPush(pData->cInfoList, &cInfo); - }else{ + } else { SColumnNode* col = *(SColumnNode**)data; pSColumnNode->slotId = col->slotId; } @@ -339,9 +341,9 @@ static int32_t createResultData(SDataType* pType, int32_t numOfRows, SScalarPara return terrno; } - pColumnData->info.type = pType->type; - pColumnData->info.bytes = pType->bytes; - pColumnData->info.scale = pType->scale; + pColumnData->info.type = pType->type; + pColumnData->info.bytes = pType->bytes; + pColumnData->info.scale = pType->scale; pColumnData->info.precision = pType->precision; int32_t code = colInfoDataEnsureCapacity(pColumnData, numOfRows); @@ -356,27 +358,27 @@ static int32_t createResultData(SDataType* pType, int32_t numOfRows, SScalarPara return TSDB_CODE_SUCCESS; } -static SColumnInfoData* getColInfoResult(void* metaHandle, uint64_t suid, SArray* uidList, SNode* pTagCond){ - int32_t code = TSDB_CODE_SUCCESS; - SArray* pBlockList = NULL; +static SColumnInfoData* getColInfoResult(void* metaHandle, uint64_t suid, SArray* uidList, SNode* pTagCond) { + int32_t code = TSDB_CODE_SUCCESS; + SArray* pBlockList = NULL; SSDataBlock* pResBlock = NULL; - SHashObj * tags = NULL; + SHashObj* tags = NULL; SScalarParam output = {0}; tagFilterAssist ctx = {0}; ctx.colHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_SMALLINT), false, HASH_NO_LOCK); - if(ctx.colHash == NULL){ + if (ctx.colHash == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; goto end; } ctx.index = 0; ctx.cInfoList = taosArrayInit(4, sizeof(SColumnInfo)); - if(ctx.cInfoList == NULL){ + if (ctx.cInfoList == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; goto end; } - nodesRewriteExprPostOrder(&pTagCond, getColumn, (void *)&ctx); + nodesRewriteExprPostOrder(&pTagCond, getColumn, (void*)&ctx); pResBlock = createDataBlock(); if (pResBlock == NULL) { @@ -390,20 +392,21 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, uint64_t suid, SArray blockDataAppendColInfo(pResBlock, &colInfo); } -// int64_t stt = taosGetTimestampUs(); + // int64_t stt = taosGetTimestampUs(); tags = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); code = metaGetTableTags(metaHandle, suid, uidList, tags); if (code != TSDB_CODE_SUCCESS) { + qError("failed to get table tags from meta, reason:%s, suid:%" PRIu64, tstrerror(code), suid); terrno = code; goto end; } int32_t rows = taosArrayGetSize(uidList); - if(rows == 0){ + if (rows == 0) { goto end; } -// int64_t stt1 = taosGetTimestampUs(); -// qDebug("generate tag meta rows:%d, cost:%ld us", rows, stt1-stt); + // int64_t stt1 = taosGetTimestampUs(); + // qDebug("generate tag meta rows:%d, cost:%ld us", rows, stt1-stt); code = blockDataEnsureCapacity(pResBlock, rows); if (code != TSDB_CODE_SUCCESS) { @@ -411,48 +414,46 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, uint64_t suid, SArray goto end; } -// int64_t st = taosGetTimestampUs(); + // int64_t st = taosGetTimestampUs(); for (int32_t i = 0; i < rows; i++) { int64_t* uid = taosArrayGet(uidList, i); - void* tag = taosHashGet(tags, uid, sizeof(int64_t)); - if (suid != 0) { - ASSERT(tag); - } - for(int32_t j = 0; j < taosArrayGetSize(pResBlock->pDataBlock); j++){ + for (int32_t j = 0; j < taosArrayGetSize(pResBlock->pDataBlock); j++) { SColumnInfoData* pColInfo = (SColumnInfoData*)taosArrayGet(pResBlock->pDataBlock, j); - if(pColInfo->info.colId == -1){ // tbname + if (pColInfo->info.colId == -1) { // tbname char str[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; metaGetTableNameByUid(metaHandle, *uid, str); colDataAppend(pColInfo, i, str, false); #if TAG_FILTER_DEBUG - qDebug("tagfilter uid:%ld, tbname:%s", *uid, str+2); + qDebug("tagfilter uid:%ld, tbname:%s", *uid, str + 2); #endif - }else{ + } else { + void* tag = taosHashGet(tags, uid, sizeof(int64_t)); + ASSERT(tag); STagVal tagVal = {0}; tagVal.cid = pColInfo->info.colId; const char* p = metaGetTableTagVal(tag, pColInfo->info.type, &tagVal); - if (p == NULL || (pColInfo->info.type == TSDB_DATA_TYPE_JSON && ((STag*)p)->nTag == 0)){ + if (p == NULL || (pColInfo->info.type == TSDB_DATA_TYPE_JSON && ((STag*)p)->nTag == 0)) { colDataAppend(pColInfo, i, p, true); } else if (pColInfo->info.type == TSDB_DATA_TYPE_JSON) { colDataAppend(pColInfo, i, p, false); } else if (IS_VAR_DATA_TYPE(pColInfo->info.type)) { - char *tmp = taosMemoryCalloc(tagVal.nData + VARSTR_HEADER_SIZE + 1, 1); + char* tmp = taosMemoryCalloc(tagVal.nData + VARSTR_HEADER_SIZE + 1, 1); varDataSetLen(tmp, tagVal.nData); memcpy(tmp + VARSTR_HEADER_SIZE, tagVal.pData, tagVal.nData); colDataAppend(pColInfo, i, tmp, false); #if TAG_FILTER_DEBUG - qDebug("tagfilter varch:%s", tmp+2); + qDebug("tagfilter varch:%s", tmp + 2); #endif taosMemoryFree(tmp); } else { colDataAppend(pColInfo, i, (const char*)&tagVal.i64, false); #if TAG_FILTER_DEBUG - if(pColInfo->info.type == TSDB_DATA_TYPE_INT){ + if (pColInfo->info.type == TSDB_DATA_TYPE_INT) { qDebug("tagfilter int:%d", *(int*)(&tagVal.i64)); - }else if(pColInfo->info.type == TSDB_DATA_TYPE_DOUBLE){ - qDebug("tagfilter double:%f", *(double *)(&tagVal.i64)); + } else if (pColInfo->info.type == TSDB_DATA_TYPE_DOUBLE) { + qDebug("tagfilter double:%f", *(double*)(&tagVal.i64)); } #endif } @@ -461,8 +462,8 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, uint64_t suid, SArray } pResBlock->info.rows = rows; -// int64_t st1 = taosGetTimestampUs(); -// qDebug("generate tag block rows:%d, cost:%ld us", rows, st1-st); + // int64_t st1 = taosGetTimestampUs(); + // qDebug("generate tag block rows:%d, cost:%ld us", rows, st1-st); pBlockList = taosArrayInit(2, POINTER_BYTES); taosArrayPush(pBlockList, &pResBlock); @@ -470,15 +471,19 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, uint64_t suid, SArray SDataType type = {.type = TSDB_DATA_TYPE_BOOL, .bytes = sizeof(bool)}; code = createResultData(&type, rows, &output); if (code != TSDB_CODE_SUCCESS) { + terrno = code; + qError("failed to create result, reason:%s", tstrerror(code)); goto end; } code = scalarCalculate(pTagCond, pBlockList, &output); - if(code != TSDB_CODE_SUCCESS){ + if (code != TSDB_CODE_SUCCESS) { + qError("failed to calculate scalar, reason:%s", tstrerror(code)); terrno = code; + goto end; } -// int64_t st2 = taosGetTimestampUs(); -// qDebug("calculate tag block rows:%d, cost:%ld us", rows, st2-st1); + // int64_t st2 = taosGetTimestampUs(); + // qDebug("calculate tag block rows:%d, cost:%ld us", rows, st2-st1); end: taosHashCleanup(tags); @@ -489,6 +494,241 @@ end: return output.columnData; } +static void releaseColInfoData(void* pCol) { + if (pCol) { + SColumnInfoData* col = (SColumnInfoData*)pCol; + colDataDestroy(col); + taosMemoryFree(col); + } +} + +int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableListInfo* pTableListInfo) { + int32_t code = TSDB_CODE_SUCCESS; + SArray* pBlockList = NULL; + SSDataBlock* pResBlock = NULL; + SHashObj* tags = NULL; + SArray* uidList = NULL; + void* keyBuf = NULL; + SArray* groupData = NULL; + + int32_t rows = taosArrayGetSize(pTableListInfo->pTableList); + if (rows == 0) { + return TDB_CODE_SUCCESS; + } + + tagFilterAssist ctx = {0}; + ctx.colHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_SMALLINT), false, HASH_NO_LOCK); + if (ctx.colHash == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto end; + } + ctx.index = 0; + ctx.cInfoList = taosArrayInit(4, sizeof(SColumnInfo)); + if (ctx.cInfoList == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto end; + } + + SNode* pNode = NULL; + FOREACH(pNode, group) { + nodesRewriteExprPostOrder(&pNode, getColumn, (void*)&ctx); + REPLACE_NODE(pNode); + } + + pResBlock = createDataBlock(); + if (pResBlock == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto end; + } + + for (int32_t i = 0; i < taosArrayGetSize(ctx.cInfoList); ++i) { + SColumnInfoData colInfo = {{0}, 0}; + colInfo.info = *(SColumnInfo*)taosArrayGet(ctx.cInfoList, i); + blockDataAppendColInfo(pResBlock, &colInfo); + } + + uidList = taosArrayInit(rows, sizeof(uint64_t)); + for (int32_t i = 0; i < rows; ++i) { + STableKeyInfo* pkeyInfo = taosArrayGet(pTableListInfo->pTableList, i); + taosArrayPush(uidList, &pkeyInfo->uid); + } + + // int64_t stt = taosGetTimestampUs(); + tags = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); + code = metaGetTableTags(metaHandle, pTableListInfo->suid, uidList, tags); + if (code != TSDB_CODE_SUCCESS) { + goto end; + } + + // int64_t stt1 = taosGetTimestampUs(); + // qDebug("generate tag meta rows:%d, cost:%ld us", rows, stt1-stt); + + code = blockDataEnsureCapacity(pResBlock, rows); + if (code != TSDB_CODE_SUCCESS) { + goto end; + } + + // int64_t st = taosGetTimestampUs(); + for (int32_t i = 0; i < rows; i++) { + int64_t* uid = taosArrayGet(uidList, i); + for (int32_t j = 0; j < taosArrayGetSize(pResBlock->pDataBlock); j++) { + SColumnInfoData* pColInfo = (SColumnInfoData*)taosArrayGet(pResBlock->pDataBlock, j); + + if (pColInfo->info.colId == -1) { // tbname + char str[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; + metaGetTableNameByUid(metaHandle, *uid, str); + colDataAppend(pColInfo, i, str, false); +#if TAG_FILTER_DEBUG + qDebug("tagfilter uid:%ld, tbname:%s", *uid, str + 2); +#endif + } else { + void* tag = taosHashGet(tags, uid, sizeof(int64_t)); + ASSERT(tag); + STagVal tagVal = {0}; + tagVal.cid = pColInfo->info.colId; + const char* p = metaGetTableTagVal(tag, pColInfo->info.type, &tagVal); + + if (p == NULL || (pColInfo->info.type == TSDB_DATA_TYPE_JSON && ((STag*)p)->nTag == 0)) { + colDataAppend(pColInfo, i, p, true); + } else if (pColInfo->info.type == TSDB_DATA_TYPE_JSON) { + colDataAppend(pColInfo, i, p, false); + } else if (IS_VAR_DATA_TYPE(pColInfo->info.type)) { + char* tmp = taosMemoryCalloc(tagVal.nData + VARSTR_HEADER_SIZE + 1, 1); + varDataSetLen(tmp, tagVal.nData); + memcpy(tmp + VARSTR_HEADER_SIZE, tagVal.pData, tagVal.nData); + colDataAppend(pColInfo, i, tmp, false); +#if TAG_FILTER_DEBUG + qDebug("tagfilter varch:%s", tmp + 2); +#endif + taosMemoryFree(tmp); + } else { + colDataAppend(pColInfo, i, (const char*)&tagVal.i64, false); +#if TAG_FILTER_DEBUG + if (pColInfo->info.type == TSDB_DATA_TYPE_INT) { + qDebug("tagfilter int:%d", *(int*)(&tagVal.i64)); + } else if (pColInfo->info.type == TSDB_DATA_TYPE_DOUBLE) { + qDebug("tagfilter double:%f", *(double*)(&tagVal.i64)); + } +#endif + } + } + } + } + pResBlock->info.rows = rows; + + // int64_t st1 = taosGetTimestampUs(); + // qDebug("generate tag block rows:%d, cost:%ld us", rows, st1-st); + + pBlockList = taosArrayInit(2, POINTER_BYTES); + taosArrayPush(pBlockList, &pResBlock); + + groupData = taosArrayInit(2, POINTER_BYTES); + FOREACH(pNode, group) { + SScalarParam output = {0}; + + switch (nodeType(pNode)) { + case QUERY_NODE_VALUE: + break; + case QUERY_NODE_COLUMN: + case QUERY_NODE_OPERATOR: + case QUERY_NODE_FUNCTION: { + SExprNode* expNode = (SExprNode*)pNode; + code = createResultData(&expNode->resType, rows, &output); + if (code != TSDB_CODE_SUCCESS) { + goto end; + } + break; + } + default: + code = TSDB_CODE_OPS_NOT_SUPPORT; + goto end; + } + if (nodeType(pNode) == QUERY_NODE_COLUMN) { + SColumnNode* pSColumnNode = (SColumnNode*)pNode; + SColumnInfoData* pColInfo = (SColumnInfoData*)taosArrayGet(pResBlock->pDataBlock, pSColumnNode->slotId); + code = colDataAssign(output.columnData, pColInfo, rows, NULL); + } else if (nodeType(pNode) == QUERY_NODE_VALUE) { + continue; + } else { + code = scalarCalculate(pNode, pBlockList, &output); + } + if (code != TSDB_CODE_SUCCESS) { + releaseColInfoData(output.columnData); + goto end; + } + taosArrayPush(groupData, &output.columnData); + } + + int32_t keyLen = 0; + SNode* node; + FOREACH(node, group) { + SExprNode* pExpr = (SExprNode*)node; + keyLen += pExpr->resType.bytes; + } + + int32_t nullFlagSize = sizeof(int8_t) * LIST_LENGTH(group); + keyLen += nullFlagSize; + + keyBuf = taosMemoryCalloc(1, keyLen); + if (keyBuf == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto end; + } + for (int i = 0; i < rows; i++) { + STableKeyInfo* info = taosArrayGet(pTableListInfo->pTableList, i); + + char* isNull = (char*)keyBuf; + char* pStart = (char*)keyBuf + sizeof(int8_t) * LIST_LENGTH(group); + for (int j = 0; j < taosArrayGetSize(groupData); j++) { + SColumnInfoData* pValue = (SColumnInfoData*)taosArrayGetP(groupData, j); + + if (colDataIsNull_s(pValue, i)) { + isNull[j] = 1; + } else { + isNull[j] = 0; + char* data = colDataGetData(pValue, i); + if (pValue->info.type == TSDB_DATA_TYPE_JSON) { + if (tTagIsJson(data)) { + code = TSDB_CODE_QRY_JSON_IN_GROUP_ERROR; + goto end; + } + if (tTagIsJsonNull(data)) { + isNull[j] = 1; + continue; + } + int32_t len = getJsonValueLen(data); + memcpy(pStart, data, len); + pStart += len; + } else if (IS_VAR_DATA_TYPE(pValue->info.type)) { + memcpy(pStart, data, varDataTLen(data)); + pStart += varDataTLen(data); + } else { + memcpy(pStart, data, pValue->info.bytes); + pStart += pValue->info.bytes; + } + } + } + + int32_t len = (int32_t)(pStart - (char*)keyBuf); + info->groupId = calcGroupId(keyBuf, len); + taosHashPut(pTableListInfo->map, &(info->uid), sizeof(uint64_t), &info->groupId, sizeof(uint64_t)); + } + + // int64_t st2 = taosGetTimestampUs(); + // qDebug("calculate tag block rows:%d, cost:%ld us", rows, st2-st1); + +end: + taosMemoryFreeClear(keyBuf); + taosHashCleanup(tags); + taosHashCleanup(ctx.colHash); + taosArrayDestroy(ctx.cInfoList); + blockDataDestroy(pResBlock); + taosArrayDestroy(pBlockList); + taosArrayDestroy(uidList); + taosArrayDestroyP(groupData, releaseColInfoData); + return code; +} + int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, SNode* pTagIndexCond, STableListInfo* pListInfo) { int32_t code = TSDB_CODE_SUCCESS; @@ -507,7 +747,7 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, SIndexMetaArg metaArg = { .metaEx = metaHandle, .idx = tsdbGetIdx(metaHandle), .ivtIdx = tsdbGetIvtIdx(metaHandle), .suid = tableUid}; -// int64_t stt = taosGetTimestampUs(); + // int64_t stt = taosGetTimestampUs(); SIdxFltStatus status = SFLT_NOT_INDEX; code = doFilterTag(pTagIndexCond, &metaArg, res, &status); if (code != 0 || status == SFLT_NOT_INDEX) { @@ -515,23 +755,25 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, code = TDB_CODE_SUCCESS; } -// int64_t stt1 = taosGetTimestampUs(); -// qDebug("generate table list, cost:%ld us", stt1-stt); - }else if(!pTagCond){ + // int64_t stt1 = taosGetTimestampUs(); + // qDebug("generate table list, cost:%ld us", stt1-stt); + } else if (!pTagCond) { vnodeGetCtbIdList(pVnode, pScanNode->suid, res); } } else { // Create one table group. - if(metaIsTableExist(metaHandle, tableUid)){ + if (metaIsTableExist(metaHandle, tableUid)) { taosArrayPush(res, &tableUid); } } if (pTagCond) { + terrno = TDB_CODE_SUCCESS; SColumnInfoData* pColInfoData = getColInfoResult(metaHandle, pListInfo->suid, res, pTagCond); - if(terrno != TDB_CODE_SUCCESS){ + if (terrno != TDB_CODE_SUCCESS) { colDataDestroy(pColInfoData); taosMemoryFreeClear(pColInfoData); taosArrayDestroy(res); + qError("failed to getColInfoResult, code: %s", tstrerror(terrno)); return terrno; } @@ -589,7 +831,7 @@ size_t getTableTagsBufLen(const SNodeList* pGroups) { int32_t getGroupIdFromTagsVal(void* pMeta, uint64_t uid, SNodeList* pGroupNode, char* keyBuf, uint64_t* pGroupId) { SMetaReader mr = {0}; metaReaderInit(&mr, pMeta, 0); - if(metaGetTableEntryByUid(&mr, uid) != 0){ // table not exist + if (metaGetTableEntryByUid(&mr, uid) != 0) { // table not exist metaReaderClear(&mr); return TSDB_CODE_PAR_TABLE_NOT_EXIST; } @@ -747,7 +989,7 @@ static SResSchema createResSchema(int32_t type, int32_t bytes, int32_t slotId, i return s; } -static SColumn* createColumn(int32_t blockId, int32_t slotId, int32_t colId, SDataType* pType) { +static SColumn* createColumn(int32_t blockId, int32_t slotId, int32_t colId, SDataType* pType, EColumnType colType) { SColumn* pCol = taosMemoryCalloc(1, sizeof(SColumn)); if (pCol == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -761,7 +1003,7 @@ static SColumn* createColumn(int32_t blockId, int32_t slotId, int32_t colId, SDa pCol->scale = pType->scale; pCol->precision = pType->precision; pCol->dataBlockId = blockId; - + pCol->colType = colType; return pCol; } @@ -805,7 +1047,8 @@ SExprInfo* createExprInfo(SNodeList* pNodeList, SNodeList* pGroupKeys, int32_t* SDataType* pType = &pColNode->node.resType; pExp->base.resSchema = createResSchema(pType->type, pType->bytes, pTargetNode->slotId, pType->scale, pType->precision, pColNode->colName); - pExp->base.pParam[0].pCol = createColumn(pColNode->dataBlockId, pColNode->slotId, pColNode->colId, pType); + pExp->base.pParam[0].pCol = + createColumn(pColNode->dataBlockId, pColNode->slotId, pColNode->colId, pType, pColNode->colType); pExp->base.pParam[0].type = FUNC_PARAM_TYPE_COLUMN; } else if (type == QUERY_NODE_VALUE) { pExp->pExpr->nodeType = QUERY_NODE_VALUE; @@ -857,7 +1100,8 @@ SExprInfo* createExprInfo(SNodeList* pNodeList, SNodeList* pGroupKeys, int32_t* SColumnNode* pcn = (SColumnNode*)p1; pExp->base.pParam[j].type = FUNC_PARAM_TYPE_COLUMN; - pExp->base.pParam[j].pCol = createColumn(pcn->dataBlockId, pcn->slotId, pcn->colId, &pcn->node.resType); + pExp->base.pParam[j].pCol = + createColumn(pcn->dataBlockId, pcn->slotId, pcn->colId, &pcn->node.resType, pcn->colType); } else if (p1->type == QUERY_NODE_VALUE) { SValueNode* pvn = (SValueNode*)p1; pExp->base.pParam[j].type = FUNC_PARAM_TYPE_VALUE; @@ -973,6 +1217,7 @@ SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput, pCtx->end.key = INT64_MIN; pCtx->numOfParams = pExpr->base.numOfParams; pCtx->increase = false; + pCtx->isStream = false; pCtx->param = pFunct->pParam; } diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index d8f63cb008..fe1f4911ca 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -352,12 +352,14 @@ int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId, int32_t code = createExecTaskInfoImpl(pSubplan, pTask, readHandle, taskId, sql, model); if (code != TSDB_CODE_SUCCESS) { + qError("failed to createExecTaskInfoImpl, code: %s", tstrerror(code)); goto _error; } SDataSinkMgtCfg cfg = {.maxDataBlockNum = 10000, .maxDataBlockNumPerQuery = 5000}; code = dsDataSinkMgtInit(&cfg); if (code != TSDB_CODE_SUCCESS) { + qError("failed to dsDataSinkMgtInit, code: %s", tstrerror(code)); goto _error; } @@ -365,6 +367,7 @@ int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId, void* pSinkParam = NULL; code = createDataSinkParam(pSubplan->pDataSink, &pSinkParam, pTaskInfo, readHandle); if (code != TSDB_CODE_SUCCESS) { + qError("failed to createDataSinkParam, code: %s", tstrerror(code)); goto _error; } diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 0b2b7d0220..893acf1bbc 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -140,20 +140,6 @@ static int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, static void initCtxOutputBuffer(SqlFunctionCtx* pCtx, int32_t size); static void doSetTableGroupOutputBuf(SOperatorInfo* pOperator, int32_t numOfOutput, uint64_t groupId); -// setup the output buffer for each operator -static bool hasNull(SColumn* pColumn, SColumnDataAgg* pStatis) { - if (TSDB_COL_IS_TAG(pColumn->flag) || TSDB_COL_IS_UD_COL(pColumn->flag) || - pColumn->colId == PRIMARYKEY_TIMESTAMP_COL_ID) { - return false; - } - - if (pStatis != NULL && pStatis->numOfNull == 0) { - return false; - } - - return true; -} - #if 0 static bool chkResultRowFromKey(STaskRuntimeEnv* pRuntimeEnv, SResultRowInfo* pResultRowInfo, char* pData, int16_t bytes, bool masterscan, uint64_t uid) { @@ -381,7 +367,7 @@ static void functionCtxSave(SqlFunctionCtx* pCtx, SFunctionCtxStatus* pStatus) { static void functionCtxRestore(SqlFunctionCtx* pCtx, SFunctionCtxStatus* pStatus) { pCtx->input.colDataAggIsSet = pStatus->hasAgg; - pCtx->input.numOfRows = pStatus->numOfRows; + pCtx->input.numOfRows = pStatus->numOfRows; pCtx->input.startRowIndex = pStatus->startOffset; } @@ -3139,6 +3125,7 @@ int32_t aggDecodeResultRow(SOperatorInfo* pOperator, char* result) { initResultRow(resultRow); pInfo->resultRowInfo.cur = (SResultRowPosition){.pageId = resultRow->pageId, .offset = resultRow->offset}; + // releaseBufPage(pSup->pResultBuf, getBufPage(pSup->pResultBuf, pageId)); } if (offset != length) { @@ -3330,7 +3317,11 @@ static SSDataBlock* doFillImpl(SOperatorInfo* pOperator) { pInfo->curGroupId = pInfo->pRes->info.groupId; // the first data block pInfo->totalInputRows += pInfo->pRes->info.rows; - taosFillSetStartInfo(pInfo->pFillInfo, pInfo->pRes->info.rows, pBlock->info.window.ekey); + if (order == pInfo->pFillInfo->order) { + taosFillSetStartInfo(pInfo->pFillInfo, pInfo->pRes->info.rows, pBlock->info.window.ekey); + } else { + taosFillSetStartInfo(pInfo->pFillInfo, pInfo->pRes->info.rows, pBlock->info.window.skey); + } taosFillSetInputDataBlock(pInfo->pFillInfo, pInfo->pRes); } else if (pInfo->curGroupId != pBlock->info.groupId) { // the new group data block pInfo->existNewGroupBlock = pBlock; @@ -3699,13 +3690,20 @@ static int32_t initFillInfo(SFillOperatorInfo* pInfo, SExprInfo* pExpr, int32_t const char* id, SInterval* pInterval, int32_t fillType, int32_t order) { SFillColInfo* pColInfo = createFillColInfo(pExpr, numOfCols, pNotFillExpr, numOfNotFillCols, pValNode); - STimeWindow w = getAlignQueryTimeWindow(pInterval, pInterval->precision, win.skey); - w = getFirstQualifiedTimeWindow(win.skey, &w, pInterval, TSDB_ORDER_ASC); + int64_t startKey = (order == TSDB_ORDER_ASC) ? win.skey : win.ekey; + STimeWindow w = getAlignQueryTimeWindow(pInterval, pInterval->precision, startKey); + w = getFirstQualifiedTimeWindow(startKey, &w, pInterval, order); pInfo->pFillInfo = taosCreateFillInfo(w.skey, numOfCols, numOfNotFillCols, capacity, pInterval, fillType, pColInfo, pInfo->primaryTsCol, order, id); - pInfo->win = win; + if (order == TSDB_ORDER_ASC) { + pInfo->win.skey = win.skey; + pInfo->win.ekey = win.ekey; + } else { + pInfo->win.skey = win.ekey; + pInfo->win.ekey = win.skey; + } pInfo->p = taosMemoryCalloc(numOfCols, POINTER_BYTES); if (pInfo->pFillInfo == NULL || pInfo->p == NULL) { @@ -3882,9 +3880,9 @@ static void cleanupTableSchemaInfo(SSchemaInfo* pSchemaInfo) { tDeleteSSchemaWrapper(pSchemaInfo->qsw); } -static int32_t sortTableGroup(STableListInfo* pTableListInfo, int32_t groupNum) { +static int32_t sortTableGroup(STableListInfo* pTableListInfo) { taosArrayClear(pTableListInfo->pGroupList); - SArray* sortSupport = taosArrayInit(groupNum, sizeof(uint64_t)); + SArray* sortSupport = taosArrayInit(16, sizeof(uint64_t)); if (sortSupport == NULL) return TSDB_CODE_OUT_OF_MEMORY; for (int32_t i = 0; i < taosArrayGetSize(pTableListInfo->pTableList); i++) { STableKeyInfo* info = taosArrayGet(pTableListInfo->pTableList, i); @@ -3962,48 +3960,26 @@ int32_t generateGroupIdMap(STableListInfo* pTableListInfo, SReadHandle* pHandle, if (pTableListInfo->map == NULL) { return TSDB_CODE_OUT_OF_MEMORY; } - int32_t keyLen = 0; - void* keyBuf = NULL; - - SNode* node; - FOREACH(node, group) { - SExprNode* pExpr = (SExprNode*)node; - keyLen += pExpr->resType.bytes; - } - - int32_t nullFlagSize = sizeof(int8_t) * LIST_LENGTH(group); - keyLen += nullFlagSize; - - keyBuf = taosMemoryCalloc(1, keyLen); - if (keyBuf == NULL) { - return TSDB_CODE_OUT_OF_MEMORY; - } bool assignUid = groupbyTbname(group); - int32_t groupNum = 0; - size_t numOfTables = taosArrayGetSize(pTableListInfo->pTableList); + size_t numOfTables = taosArrayGetSize(pTableListInfo->pTableList); - for (int32_t i = 0; i < numOfTables; i++) { - STableKeyInfo* info = taosArrayGet(pTableListInfo->pTableList, i); - - if (assignUid) { + if (assignUid) { + for (int32_t i = 0; i < numOfTables; i++) { + STableKeyInfo* info = taosArrayGet(pTableListInfo->pTableList, i); info->groupId = info->uid; - } else { - int32_t code = getGroupIdFromTagsVal(pHandle->meta, info->uid, group, keyBuf, &info->groupId); - if (code != TSDB_CODE_SUCCESS) { - return code; - } + taosHashPut(pTableListInfo->map, &(info->uid), sizeof(uint64_t), &info->groupId, sizeof(uint64_t)); + } + } else { + int32_t code = getColInfoResultForGroupby(pHandle->meta, group, pTableListInfo); + if (code != TSDB_CODE_SUCCESS) { + return code; } - - taosHashPut(pTableListInfo->map, &(info->uid), sizeof(uint64_t), &info->groupId, sizeof(uint64_t)); - groupNum++; } - taosMemoryFree(keyBuf); - if (pTableListInfo->needSortTableByGroupId) { - return sortTableGroup(pTableListInfo, groupNum); + return sortTableGroup(pTableListInfo); } return TDB_CODE_SUCCESS; @@ -4048,6 +4024,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo pTableListInfo, pTagCond, pTagIndexCond, GET_TASKID(pTaskInfo)); if (code) { pTaskInfo->code = code; + qError("failed to createScanTableListInfo, code: %s", tstrerror(code)); return NULL; } @@ -4067,6 +4044,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo pTableListInfo, pTagCond, pTagIndexCond, GET_TASKID(pTaskInfo)); if (code) { pTaskInfo->code = code; + qError("failed to createScanTableListInfo, code: %s", tstrerror(code)); return NULL; } @@ -4090,6 +4068,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo pHandle, pTableListInfo, pTagCond, pTagIndexCond, GET_TASKID(pTaskInfo)); if (code) { pTaskInfo->code = code; + qError("failed to createScanTableListInfo, code: %s", tstrerror(code)); return NULL; } @@ -4112,6 +4091,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo int32_t code = getTableList(pHandle->meta, pHandle->vnode, pScanPhyNode, pTagCond, pTagIndexCond, pTableListInfo); if (code != TSDB_CODE_SUCCESS) { pTaskInfo->code = terrno; + qError("failed to getTableList, code: %s", tstrerror(code)); return NULL; } @@ -4610,6 +4590,10 @@ int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SRead goto _complete; } + if (pHandle && pHandle->pStateBackend) { + (*pTaskInfo)->streamInfo.pState = pHandle->pStateBackend; + } + (*pTaskInfo)->sql = sql; sql = NULL; (*pTaskInfo)->pSubplan = pPlan; diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index c9b8d5a377..db5405d86c 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -1086,7 +1086,10 @@ static int32_t generateSessionScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSr SColumnInfoData* pDestStartCol = taosArrayGet(pDestBlock->pDataBlock, START_TS_COLUMN_INDEX); SColumnInfoData* pDestEndCol = taosArrayGet(pDestBlock->pDataBlock, END_TS_COLUMN_INDEX); + SColumnInfoData* pDestUidCol = taosArrayGet(pDestBlock->pDataBlock, UID_COLUMN_INDEX); SColumnInfoData* pDestGpCol = taosArrayGet(pDestBlock->pDataBlock, GROUPID_COLUMN_INDEX); + SColumnInfoData* pDestCalStartTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX); + SColumnInfoData* pDestCalEndTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX); int32_t dummy = 0; for (int32_t i = 0; i < pSrcBlock->info.rows; i++) { uint64_t groupId = getGroupId(pInfo->pTableScanOp, uidCol[i]); @@ -1100,9 +1103,13 @@ static int32_t generateSessionScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSr SResultWindowInfo* pEndWin = getCurSessionWindow(pInfo->sessionSup.pStreamAggSup, endData[i], endData[i], groupId, 0, &dummy); ASSERT(pEndWin); + TSKEY ts = INT64_MIN; colDataAppend(pDestStartCol, i, (const char*)&pStartWin->win.skey, false); colDataAppend(pDestEndCol, i, (const char*)&pEndWin->win.ekey, false); + colDataAppendNULL(pDestUidCol, i); colDataAppend(pDestGpCol, i, (const char*)&groupId, false); + colDataAppendNULL(pDestCalStartTsCol, i); + colDataAppendNULL(pDestCalEndTsCol, i); pDestBlock->info.rows++; } return TSDB_CODE_SUCCESS; @@ -1157,13 +1164,13 @@ static int32_t generateScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSrcBlock, return code; } -void appendOneRow(SSDataBlock* pBlock, TSKEY* pStartTs, TSKEY* pEndTs, uint64_t* pUid) { +void appendOneRow(SSDataBlock* pBlock, TSKEY* pStartTs, TSKEY* pEndTs, int32_t uidCol, uint64_t* pID) { SColumnInfoData* pStartTsCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX); SColumnInfoData* pEndTsCol = taosArrayGet(pBlock->pDataBlock, END_TS_COLUMN_INDEX); - SColumnInfoData* pUidCol = taosArrayGet(pBlock->pDataBlock, UID_COLUMN_INDEX); + SColumnInfoData* pUidCol = taosArrayGet(pBlock->pDataBlock, uidCol); colDataAppend(pStartTsCol, pBlock->info.rows, (const char*)pStartTs, false); colDataAppend(pEndTsCol, pBlock->info.rows, (const char*)pEndTs, false); - colDataAppend(pUidCol, pBlock->info.rows, (const char*)pUid, false); + colDataAppend(pUidCol, pBlock->info.rows, (const char*)pID, false); pBlock->info.rows++; } @@ -1190,7 +1197,7 @@ static void checkUpdateData(SStreamScanInfo* pInfo, bool invertible, SSDataBlock bool closedWin = isClosed && isSignleIntervalWindow(pInfo) && isDeletedWindow(&win, pBlock->info.groupId, pInfo->sessionSup.pIntervalAggSup); if ((update || closedWin) && out) { - appendOneRow(pInfo->pUpdateDataRes, tsCol + rowId, tsCol + rowId, &pBlock->info.uid); + appendOneRow(pInfo->pUpdateDataRes, tsCol + rowId, tsCol + rowId, UID_COLUMN_INDEX, &pBlock->info.uid); } } if (out) { @@ -1274,6 +1281,42 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) { SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SStreamScanInfo* pInfo = pOperator->info; +#if 0 + SStreamState* pState = pTaskInfo->streamInfo.pState; + if (pState) { + printf(">>>>>>>> stream write backend\n"); + SWinKey key = { + .ts = 1, + .groupId = 2, + }; + char tmp[100] = "abcdefg1"; + if (streamStatePut(pState, &key, &tmp, strlen(tmp) + 1) < 0) { + ASSERT(0); + } + + key.ts = 2; + char tmp2[100] = "abcdefg2"; + if (streamStatePut(pState, &key, &tmp2, strlen(tmp2) + 1) < 0) { + ASSERT(0); + } + + key.groupId = 5; + key.ts = 1; + char tmp3[100] = "abcdefg3"; + if (streamStatePut(pState, &key, &tmp3, strlen(tmp3) + 1) < 0) { + ASSERT(0); + } + + char* val2 = NULL; + int32_t sz; + if (streamStateGet(pState, &key, (void**)&val2, &sz) < 0) { + ASSERT(0); + } + printf("stream read %s %d\n", val2, sz); + streamFreeVal(val2); + } +#endif + qDebug("stream scan called"); if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__LOG) { while (1) { @@ -2640,6 +2683,7 @@ int32_t createScanTableListInfo(SScanPhysiNode* pScanNode, SNodeList* pGroupTags int32_t code = getTableList(pHandle->meta, pHandle->vnode, pScanNode, pTagCond, pTagIndexCond, pTableListInfo); if (code != TSDB_CODE_SUCCESS) { + qError("failed to getTableList, code: %s", tstrerror(code)); return code; } diff --git a/source/libs/executor/src/tfill.c b/source/libs/executor/src/tfill.c index 6d7cd727b9..f23552c5a7 100644 --- a/source/libs/executor/src/tfill.c +++ b/source/libs/executor/src/tfill.c @@ -36,6 +36,7 @@ #define GET_DEST_SLOT_ID(_p) ((_p)->pExpr->base.resSchema.slotId) static void doSetVal(SColumnInfoData* pDstColInfoData, int32_t rowIndex, const SGroupKeys* pKey); +static bool fillIfWindowPseudoColumn(SFillInfo* pFillInfo, SFillColInfo* pCol, SColumnInfoData* pDstColInfoData, int32_t rowIndex); static void setNullRow(SSDataBlock* pBlock, SFillInfo* pFillInfo, int32_t rowIndex) { for(int32_t i = 0; i < pFillInfo->numOfCols; ++i) { @@ -43,9 +44,8 @@ static void setNullRow(SSDataBlock* pBlock, SFillInfo* pFillInfo, int32_t rowInd int32_t dstSlotId = GET_DEST_SLOT_ID(pCol); SColumnInfoData* pDstColInfo = taosArrayGet(pBlock->pDataBlock, dstSlotId); if (pCol->notFillCol) { - if (pDstColInfo->info.type == TSDB_DATA_TYPE_TIMESTAMP) { - colDataAppend(pDstColInfo, rowIndex, (const char*)&pFillInfo->currentKey, false); - } else { + bool filled = fillIfWindowPseudoColumn(pFillInfo, pCol, pDstColInfo, rowIndex); + if (!filled) { SArray* p = FILL_IS_ASC_FILL(pFillInfo) ? pFillInfo->prev.pRowVal : pFillInfo->next.pRowVal; SGroupKeys* pKey = taosArrayGet(p, i); doSetVal(pDstColInfo, rowIndex, pKey); @@ -76,6 +76,35 @@ static void doSetUserSpecifiedValue(SColumnInfoData* pDst, SVariant* pVar, int32 } } +//fill windows pseudo column, _wstart, _wend, _wduration and return true, otherwise return false +static bool fillIfWindowPseudoColumn(SFillInfo* pFillInfo, SFillColInfo* pCol, SColumnInfoData* pDstColInfoData, int32_t rowIndex) { + if (!pCol->notFillCol) { + return false; + } + if (pCol->pExpr->pExpr->nodeType == QUERY_NODE_COLUMN) { + if (pCol->pExpr->base.numOfParams != 1) { + return false; + } + if (pCol->pExpr->base.pParam[0].pCol->colType == COLUMN_TYPE_WINDOW_START) { + colDataAppend(pDstColInfoData, rowIndex, (const char*)&pFillInfo->currentKey, false); + return true; + } else if (pCol->pExpr->base.pParam[0].pCol->colType == COLUMN_TYPE_WINDOW_END) { + //TODO: include endpoint + SInterval* pInterval = &pFillInfo->interval; + int32_t step = (pFillInfo->order == TSDB_ORDER_ASC) ? 1 : -1; + int64_t windowEnd = + taosTimeAdd(pFillInfo->currentKey, pInterval->sliding * step, pInterval->slidingUnit, pInterval->precision); + colDataAppend(pDstColInfoData, rowIndex, (const char*)&windowEnd, false); + return true; + } else if (pCol->pExpr->base.pParam[0].pCol->colType == COLUMN_TYPE_WINDOW_DURATION) { + //TODO: include endpoint + colDataAppend(pDstColInfoData, rowIndex, (const char*)&pFillInfo->interval.sliding, false); + return true; + } + } + return false; +} + static void doFillOneRow(SFillInfo* pFillInfo, SSDataBlock* pBlock, SSDataBlock* pSrcBlock, int64_t ts, bool outOfBound) { SPoint point1, point2, point; @@ -92,10 +121,8 @@ static void doFillOneRow(SFillInfo* pFillInfo, SSDataBlock* pBlock, SSDataBlock* SFillColInfo* pCol = &pFillInfo->pFillCol[i]; SColumnInfoData* pDstColInfoData = taosArrayGet(pBlock->pDataBlock, GET_DEST_SLOT_ID(pCol)); - - if (pDstColInfoData->info.type == TSDB_DATA_TYPE_TIMESTAMP) { - colDataAppend(pDstColInfoData, index, (const char*)&pFillInfo->currentKey, false); - } else { + bool filled = fillIfWindowPseudoColumn(pFillInfo, pCol, pDstColInfoData, index); + if (!filled) { SGroupKeys* pKey = taosArrayGet(p, i); doSetVal(pDstColInfoData, index, pKey); } @@ -106,10 +133,8 @@ static void doFillOneRow(SFillInfo* pFillInfo, SSDataBlock* pBlock, SSDataBlock* for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) { SFillColInfo* pCol = &pFillInfo->pFillCol[i]; SColumnInfoData* pDstColInfoData = taosArrayGet(pBlock->pDataBlock, GET_DEST_SLOT_ID(pCol)); - - if (pDstColInfoData->info.type == TSDB_DATA_TYPE_TIMESTAMP) { - colDataAppend(pDstColInfoData, index, (const char*)&pFillInfo->currentKey, false); - } else { + bool filled = fillIfWindowPseudoColumn(pFillInfo, pCol, pDstColInfoData, index); + if (!filled) { SGroupKeys* pKey = taosArrayGet(p, i); doSetVal(pDstColInfoData, index, pKey); } @@ -127,9 +152,8 @@ static void doFillOneRow(SFillInfo* pFillInfo, SSDataBlock* pBlock, SSDataBlock* int16_t type = pDstCol->info.type; if (pCol->notFillCol) { - if (type == TSDB_DATA_TYPE_TIMESTAMP) { - colDataAppend(pDstCol, index, (const char*)&pFillInfo->currentKey, false); - } else { + bool filled = fillIfWindowPseudoColumn(pFillInfo, pCol, pDstCol, index); + if (!filled) { SArray* p = FILL_IS_ASC_FILL(pFillInfo) ? pFillInfo->prev.pRowVal : pFillInfo->next.pRowVal; SGroupKeys* pKey = taosArrayGet(p, i); doSetVal(pDstCol, index, pKey); @@ -170,9 +194,8 @@ static void doFillOneRow(SFillInfo* pFillInfo, SSDataBlock* pBlock, SSDataBlock* SColumnInfoData* pDst = taosArrayGet(pBlock->pDataBlock, slotId); if (pCol->notFillCol) { - if (pDst->info.type == TSDB_DATA_TYPE_TIMESTAMP) { - colDataAppend(pDst, index, (const char*)&pFillInfo->currentKey, false); - } else { + bool filled = fillIfWindowPseudoColumn(pFillInfo, pCol, pDst, index); + if (!filled) { SArray* p = FILL_IS_ASC_FILL(pFillInfo) ? pFillInfo->prev.pRowVal : pFillInfo->next.pRowVal; SGroupKeys* pKey = taosArrayGet(p, i); doSetVal(pDst, index, pKey); @@ -540,7 +563,7 @@ int64_t getNumOfResultsAfterFillGap(SFillInfo* pFillInfo, TSKEY ekey, int32_t ma int64_t numOfRes = -1; if (numOfRows > 0) { // still fill gap within current data block, not generating data after the result set. - TSKEY lastKey = (TSDB_ORDER_ASC == pFillInfo->order ? tsList[pFillInfo->numOfRows - 1] : tsList[0]); + TSKEY lastKey = tsList[pFillInfo->numOfRows - 1]; numOfRes = taosTimeCountInterval(lastKey, pFillInfo->currentKey, pFillInfo->interval.sliding, pFillInfo->interval.slidingUnit, pFillInfo->interval.precision); numOfRes += 1; diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 1ef191679e..d56ede49f7 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -15,6 +15,7 @@ #include "executorimpl.h" #include "function.h" #include "functionMgt.h" +#include "tcommon.h" #include "tcompare.h" #include "tdatablock.h" #include "tfill.h" @@ -27,11 +28,6 @@ typedef enum SResultTsInterpType { #define IS_FINAL_OP(op) ((op)->isFinal) -typedef struct SWinRes { - TSKEY ts; - uint64_t groupId; -} SWinRes; - typedef struct SPullWindowInfo { STimeWindow window; uint64_t groupId; @@ -641,7 +637,8 @@ static void doInterpUnclosedTimeWindow(SOperatorInfo* pOperatorInfo, int32_t num setResultRowInterpo(pResult, RESULT_ROW_END_INTERP); setNotInterpoWindowKey(pSup->pCtx, numOfExprs, RESULT_ROW_START_INTERP); - doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, 0, pBlock->info.rows, numOfExprs); + doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, 0, pBlock->info.rows, + numOfExprs); if (isResultRowInterpolated(pResult, RESULT_ROW_END_INTERP)) { closeResultRow(pr); @@ -812,7 +809,7 @@ static int32_t savePullWindow(SPullWindowInfo* pPullInfo, SArray* pPullWins) { int32_t compareResKey(void* pKey, void* data, int32_t index) { SArray* res = (SArray*)data; SResKeyPos* pos = taosArrayGetP(res, index); - SWinRes* pData = (SWinRes*)pKey; + SWinKey* pData = (SWinKey*)pKey; if (pData->ts == *(int64_t*)pos->key) { if (pData->groupId > pos->groupId) { return 1; @@ -828,7 +825,7 @@ int32_t compareResKey(void* pKey, void* data, int32_t index) { static int32_t saveResult(int64_t ts, int32_t pageId, int32_t offset, uint64_t groupId, SArray* pUpdated) { int32_t size = taosArrayGetSize(pUpdated); - SWinRes data = {.ts = ts, .groupId = groupId}; + SWinKey data = {.ts = ts, .groupId = groupId}; int32_t index = binarySearchCom(pUpdated, size, &data, TSDB_ORDER_DESC, compareResKey); if (index == -1) { index = 0; @@ -861,8 +858,8 @@ static int32_t saveWinResult(int64_t ts, int32_t pageId, int32_t offset, uint64_ newPos->groupId = groupId; newPos->pos = (SResultRowPosition){.pageId = pageId, .offset = offset}; *(int64_t*)newPos->key = ts; - SWinRes key = {.ts = ts, .groupId = groupId}; - if (taosHashPut(pUpdatedMap, &key, sizeof(SWinRes), &newPos, sizeof(void*)) != TSDB_CODE_SUCCESS) { + SWinKey key = {.ts = ts, .groupId = groupId}; + if (taosHashPut(pUpdatedMap, &key, sizeof(SWinKey), &newPos, sizeof(void*)) != TSDB_CODE_SUCCESS) { taosMemoryFree(newPos); } return TSDB_CODE_SUCCESS; @@ -879,20 +876,20 @@ static int32_t saveResultRow(SResultRow* result, uint64_t groupId, SArray* pUpda static void removeResults(SArray* pWins, SHashObj* pUpdatedMap) { int32_t size = taosArrayGetSize(pWins); for (int32_t i = 0; i < size; i++) { - SWinRes* pW = taosArrayGet(pWins, i); - taosHashRemove(pUpdatedMap, pW, sizeof(SWinRes)); + SWinKey* pW = taosArrayGet(pWins, i); + taosHashRemove(pUpdatedMap, pW, sizeof(SWinKey)); } } int64_t getWinReskey(void* data, int32_t index) { SArray* res = (SArray*)data; - SWinRes* pos = taosArrayGet(res, index); + SWinKey* pos = taosArrayGet(res, index); return pos->ts; } int32_t compareWinRes(void* pKey, void* data, int32_t index) { SArray* res = (SArray*)data; - SWinRes* pos = taosArrayGetP(res, index); + SWinKey* pos = taosArrayGetP(res, index); SResKeyPos* pData = (SResKeyPos*)pKey; if (*(int64_t*)pData->key == pos->ts) { if (pData->groupId > pos->groupId) { @@ -985,8 +982,8 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul if ((!pInfo->ignoreExpiredData || !isCloseWindow(&win, &pInfo->twAggSup)) && inSlidingWindow(&pInfo->interval, &win, &pBlock->info)) { updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &win, true); - doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, - pBlock->info.rows, numOfOutput); + doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, pBlock->info.rows, + numOfOutput); } doCloseWindow(pResultRowInfo, pInfo, pResult); @@ -1025,8 +1022,8 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul doWindowBorderInterpolation(pInfo, pBlock, pResult, &nextWin, startPos, forwardRows, pSup); updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &nextWin, true); - doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, - pBlock->info.rows, numOfOutput); + doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, pBlock->info.rows, + numOfOutput); doCloseWindow(pResultRowInfo, pInfo, pResult); } @@ -1214,8 +1211,8 @@ static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorI } updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pRowSup->win, false); - doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, pRowSup->startRowIndex, - pRowSup->numOfRows, pBlock->info.rows, numOfOutput); + doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, pRowSup->startRowIndex, pRowSup->numOfRows, + pBlock->info.rows, numOfOutput); } static SSDataBlock* doStateWindowAgg(SOperatorInfo* pOperator) { @@ -1418,7 +1415,7 @@ void doDeleteSpecifyIntervalWindow(SAggSupporter* pAggSup, SSDataBlock* pBlock, STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, tsStarts[i], pInterval, TSDB_ORDER_ASC); doDeleteIntervalWindow(pAggSup, win.skey, groupIds[i]); if (pUpWins) { - SWinRes winRes = {.ts = win.skey, .groupId = groupIds[i]}; + SWinKey winRes = {.ts = win.skey, .groupId = groupIds[i]}; taosArrayPush(pUpWins, &winRes); } } @@ -1445,7 +1442,7 @@ static void doClearWindows(SAggSupporter* pAggSup, SExprSupp* pSup1, SInterval* uint64_t winGpId = pGpDatas ? pGpDatas[startPos] : pBlock->info.groupId; bool res = doClearWindow(pAggSup, pSup1, (char*)&win.skey, sizeof(TSKEY), winGpId, numOfOutput); if (pUpWins && res) { - SWinRes winRes = {.ts = win.skey, .groupId = winGpId}; + SWinKey winRes = {.ts = win.skey, .groupId = winGpId}; taosArrayPush(pUpWins, &winRes); } getNextTimeWindow(pInterval, pInterval->precision, TSDB_ORDER_ASC, &win); @@ -1484,11 +1481,11 @@ static int32_t closeIntervalWindow(SHashObj* pHashMap, STimeWindowAggSupp* pSup, STimeWindow win; win.skey = ts; win.ekey = taosTimeAdd(win.skey, pInterval->interval, pInterval->intervalUnit, pInterval->precision) - 1; - SWinRes winRe = { + SWinKey winRe = { .ts = win.skey, .groupId = groupId, }; - void* chIds = taosHashGet(pPullDataMap, &winRe, sizeof(SWinRes)); + void* chIds = taosHashGet(pPullDataMap, &winRe, sizeof(SWinKey)); if (isCloseWindow(&win, pSup)) { if (chIds && pPullDataMap) { SArray* chAy = *(SArray**)chIds; @@ -1555,7 +1552,7 @@ static void doBuildDeleteResult(SArray* pWins, int32_t* index, SSDataBlock* pBlo SColumnInfoData* pTsCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX); SColumnInfoData* pGroupCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX); for (int32_t i = *index; i < size; i++) { - SWinRes* pWin = taosArrayGet(pWins, i); + SWinKey* pWin = taosArrayGet(pWins, i); colDataAppend(pTsCol, pBlock->info.rows, (const char*)&pWin->ts, false); colDataAppend(pGroupCol, pBlock->info.rows, (const char*)&pWin->groupId, false); pBlock->info.rows++; @@ -1595,6 +1592,9 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { SArray* pUpdated = taosArrayInit(4, POINTER_BYTES); // SResKeyPos _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); SHashObj* pUpdatedMap = taosHashInit(1024, hashFn, false, HASH_NO_LOCK); + + SStreamState* pState = pTaskInfo->streamInfo.pState; + while (1) { SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream); if (pBlock == NULL) { @@ -1639,6 +1639,35 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, MAIN_SCAN, pUpdatedMap); } +#if 0 + if (pState) { + printf(">>>>>>>> stream read backend\n"); + SWinKey key = { + .ts = 1, + .groupId = 2, + }; + char* val = NULL; + int32_t sz; + if (streamStateGet(pState, &key, (void**)&val, &sz) < 0) { + ASSERT(0); + } + printf("stream read %s %d\n", val, sz); + streamFreeVal(val); + + SStreamStateCur* pCur = streamStateGetCur(pState, &key); + ASSERT(pCur); + while (streamStateCurNext(pState, pCur) == 0) { + SWinKey key1; + const void* val1; + if (streamStateGetKVByCur(pCur, &key1, &val1, &sz) < 0) { + break; + } + printf("stream iter key groupId:%d ts:%d, value %s %d\n", key1.groupId, key1.ts, val1, sz); + } + streamStateFreeCur(pCur); + } +#endif + pOperator->status = OP_RES_TO_RETURN; closeIntervalWindow(pInfo->aggSup.pResultRowHashTable, &pInfo->twAggSup, &pInfo->interval, NULL, pUpdatedMap, pInfo->pRecycledPages, pInfo->aggSup.pResultBuf); @@ -1793,6 +1822,12 @@ void initIntervalDownStream(SOperatorInfo* downstream, uint16_t type, SAggSuppor pScanInfo->sessionSup.pIntervalAggSup = pSup; } +void initStreamFunciton(SqlFunctionCtx* pCtx, int32_t numOfExpr) { + for (int32_t i = 0; i < numOfExpr; i++) { + pCtx[i].isStream = true; + } +} + SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId, STimeWindowAggSupp* pTwAggSupp, SIntervalPhysiNode* pPhyNode, @@ -1839,6 +1874,7 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* if (isStream) { ASSERT(numOfCols > 0); increaseTs(pSup->pCtx); + initStreamFunciton(pSup->pCtx, pSup->numOfExprs); } initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pInfo->win); @@ -1855,7 +1891,7 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* } pInfo->pRecycledPages = taosArrayInit(4, sizeof(int32_t)); - pInfo->pDelWins = taosArrayInit(4, sizeof(SWinRes)); + pInfo->pDelWins = taosArrayInit(4, sizeof(SWinKey)); pInfo->delIndex = 0; pInfo->pDelRes = createSpecialDataBlock(STREAM_DELETE_RESULT); initResultRowInfo(&pInfo->binfo.resultRowInfo); @@ -1916,8 +1952,8 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSessionAggOperator if (gid != pRowSup->groupId || pInfo->winSup.prevTs == INT64_MIN) { doKeepNewWindowStartInfo(pRowSup, tsList, j, gid); doKeepTuple(pRowSup, tsList[j], gid); - } else if ((tsList[j] - pRowSup->prevTs >= 0) && tsList[j] - pRowSup->prevTs <= gap || - (pRowSup->prevTs - tsList[j] >= 0) && (pRowSup->prevTs - tsList[j] <= gap)) { + } else if (((tsList[j] - pRowSup->prevTs >= 0) && (tsList[j] - pRowSup->prevTs <= gap)) || + ((pRowSup->prevTs - tsList[j] >= 0) && (pRowSup->prevTs - tsList[j] <= gap))) { // The gap is less than the threshold, so it belongs to current session window that has been opened already. doKeepTuple(pRowSup, tsList[j], gid); if (j == 0 && pRowSup->startRowIndex != 0) { @@ -1956,8 +1992,8 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSessionAggOperator } updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pRowSup->win, false); - doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, pRowSup->startRowIndex, - pRowSup->numOfRows, pBlock->info.rows, numOfOutput); + doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, pRowSup->startRowIndex, pRowSup->numOfRows, + pBlock->info.rows, numOfOutput); } static SSDataBlock* doSessionWindowAgg(SOperatorInfo* pOperator) { @@ -2822,7 +2858,7 @@ static void rebuildIntervalWindow(SStreamFinalIntervalOperatorInfo* pInfo, SExpr return; } for (int32_t i = 0; i < size; i++) { - SWinRes* pWinRes = taosArrayGet(pWinArray, i); + SWinKey* pWinRes = taosArrayGet(pWinArray, i); SResultRow* pCurResult = NULL; STimeWindow ParentWin = {.skey = pWinRes->ts, .ekey = pWinRes->ts + 1}; setTimeWindowOutputBuf(&pInfo->binfo.resultRowInfo, &ParentWin, true, &pCurResult, pWinRes->groupId, pSup->pCtx, @@ -2865,12 +2901,12 @@ int32_t getNexWindowPos(SInterval* pInterval, SDataBlockInfo* pBlockInfo, TSKEY* return getNextQualifiedWindow(pInterval, pNextWin, pBlockInfo, tsCols, prevEndPos, TSDB_ORDER_ASC); } -void addPullWindow(SHashObj* pMap, SWinRes* pWinRes, int32_t size) { +void addPullWindow(SHashObj* pMap, SWinKey* pWinRes, int32_t size) { SArray* childIds = taosArrayInit(8, sizeof(int32_t)); for (int32_t i = 0; i < size; i++) { taosArrayPush(childIds, &i); } - taosHashPut(pMap, pWinRes, sizeof(SWinRes), &childIds, sizeof(void*)); + taosHashPut(pMap, pWinRes, sizeof(SWinKey), &childIds, sizeof(void*)); } static int32_t getChildIndex(SSDataBlock* pBlock) { return pBlock->info.childId; } @@ -2917,11 +2953,11 @@ static void doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBloc } if (IS_FINAL_OP(pInfo) && isClosed && pInfo->pChildren) { bool ignore = true; - SWinRes winRes = { + SWinKey winRes = { .ts = nextWin.skey, .groupId = tableGroupId, }; - void* chIds = taosHashGet(pInfo->pPullDataMap, &winRes, sizeof(SWinRes)); + void* chIds = taosHashGet(pInfo->pPullDataMap, &winRes, sizeof(SWinKey)); if (isDeletedWindow(&nextWin, tableGroupId, &pInfo->aggSup) && !chIds) { SPullWindowInfo pull = {.window = nextWin, .groupId = tableGroupId}; // add pull data request @@ -3049,8 +3085,8 @@ void processPullOver(SSDataBlock* pBlock, SHashObj* pMap) { uint64_t* groupIdData = (uint64_t*)pGroupCol->pData; int32_t chId = getChildIndex(pBlock); for (int32_t i = 0; i < pBlock->info.rows; i++) { - SWinRes winRes = {.ts = tsData[i], .groupId = groupIdData[i]}; - void* chIds = taosHashGet(pMap, &winRes, sizeof(SWinRes)); + SWinKey winRes = {.ts = tsData[i], .groupId = groupIdData[i]}; + void* chIds = taosHashGet(pMap, &winRes, sizeof(SWinKey)); if (chIds) { SArray* chArray = *(SArray**)chIds; int32_t index = taosArraySearchIdx(chArray, &chId, compareInt32Val, TD_EQ); @@ -3059,7 +3095,7 @@ void processPullOver(SSDataBlock* pBlock, SHashObj* pMap) { taosArrayRemove(chArray, index); if (taosArrayGetSize(chArray) == 0) { // pull data is over - taosHashRemove(pMap, &winRes, sizeof(SWinRes)); + taosHashRemove(pMap, &winRes, sizeof(SWinKey)); } } } @@ -3139,11 +3175,11 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) { maxTs = TMAX(maxTs, pBlock->info.window.ekey); maxTs = TMAX(maxTs, pBlock->info.watermark); - if (pBlock->info.type == STREAM_NORMAL || pBlock->info.type == STREAM_PULL_DATA || - pBlock->info.type == STREAM_INVALID) { + ASSERT(pBlock->info.type != STREAM_INVERT); + if (pBlock->info.type == STREAM_NORMAL || pBlock->info.type == STREAM_PULL_DATA) { pInfo->binfo.pRes->info.type = pBlock->info.type; } else if (pBlock->info.type == STREAM_CLEAR) { - SArray* pUpWins = taosArrayInit(8, sizeof(SWinRes)); + SArray* pUpWins = taosArrayInit(8, sizeof(SWinKey)); doClearWindows(&pInfo->aggSup, pSup, &pInfo->interval, pOperator->exprSupp.numOfExprs, pBlock, pUpWins); if (IS_FINAL_OP(pInfo)) { int32_t childIndex = getChildIndex(pBlock); @@ -3181,7 +3217,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) { getAllIntervalWindow(pInfo->aggSup.pResultRowHashTable, pUpdatedMap); continue; } else if (pBlock->info.type == STREAM_RETRIEVE && !IS_FINAL_OP(pInfo)) { - SArray* pUpWins = taosArrayInit(8, sizeof(SWinRes)); + SArray* pUpWins = taosArrayInit(8, sizeof(SWinKey)); doClearWindows(&pInfo->aggSup, pSup, &pInfo->interval, pOperator->exprSupp.numOfExprs, pBlock, pUpWins); removeResults(pUpWins, pUpdatedMap); taosArrayDestroy(pUpWins); @@ -3350,6 +3386,7 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, goto _error; } + initStreamFunciton(pOperator->exprSupp.pCtx, pOperator->exprSupp.numOfExprs); initBasicInfo(&pInfo->binfo, pResBlock); ASSERT(numOfCols > 0); @@ -3397,7 +3434,7 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, pInfo->ignoreExpiredData = pIntervalPhyNode->window.igExpired; pInfo->pDelRes = createSpecialDataBlock(STREAM_DELETE_RESULT); pInfo->delIndex = 0; - pInfo->pDelWins = taosArrayInit(4, sizeof(SWinRes)); + pInfo->pDelWins = taosArrayInit(4, sizeof(SWinKey)); pInfo->pRecycledPages = taosArrayInit(4, sizeof(int32_t)); pOperator->operatorType = pPhyNode->type; @@ -3489,6 +3526,7 @@ int32_t initBasicInfoEx(SOptrBasicInfo* pBasicInfo, SExprSupp* pSup, SExprInfo* if (code != TSDB_CODE_SUCCESS) { return code; } + initStreamFunciton(pSup->pCtx, pSup->numOfExprs); initBasicInfo(pBasicInfo, pResultBlock); @@ -3731,8 +3769,8 @@ int32_t updateSessionWindowInfo(SResultWindowInfo* pWinInfo, TSKEY* pStartTs, TS } if (pWinInfo->win.skey > pStartTs[i]) { if (pStDeleted && pWinInfo->isOutput) { - SWinRes res = {.ts = pWinInfo->win.skey, .groupId = groupId}; - taosHashPut(pStDeleted, &pWinInfo->pos, sizeof(SResultRowPosition), &res, sizeof(SWinRes)); + SWinKey res = {.ts = pWinInfo->win.skey, .groupId = groupId}; + taosHashPut(pStDeleted, &pWinInfo->pos, sizeof(SResultRowPosition), &res, sizeof(SWinKey)); pWinInfo->isOutput = false; } pWinInfo->win.skey = pStartTs[i]; @@ -3850,8 +3888,8 @@ void compactTimeWindow(SStreamSessionAggOperatorInfo* pInfo, int32_t startIndex, compactFunctions(pSup->pCtx, pInfo->pDummyCtx, numOfOutput, pTaskInfo); taosHashRemove(pStUpdated, &pWinInfo->pos, sizeof(SResultRowPosition)); if (pWinInfo->isOutput) { - SWinRes res = {.ts = pWinInfo->win.skey, .groupId = groupId}; - taosHashPut(pStDeleted, &pWinInfo->pos, sizeof(SResultRowPosition), &res, sizeof(SWinRes)); + SWinKey res = {.ts = pWinInfo->win.skey, .groupId = groupId}; + taosHashPut(pStDeleted, &pWinInfo->pos, sizeof(SResultRowPosition), &res, sizeof(SWinKey)); pWinInfo->isOutput = false; } taosArrayRemove(pInfo->streamAggSup.pCurWins, i); @@ -3913,8 +3951,8 @@ static void doStreamSessionAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSData } pCurWin->isClosed = false; if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE && pStUpdated) { - SWinRes value = {.ts = pCurWin->win.skey, .groupId = groupId}; - code = taosHashPut(pStUpdated, &pCurWin->pos, sizeof(SResultRowPosition), &value, sizeof(SWinRes)); + SWinKey value = {.ts = pCurWin->win.skey, .groupId = groupId}; + code = taosHashPut(pStUpdated, &pCurWin->pos, sizeof(SResultRowPosition), &value, sizeof(SWinKey)); if (code != TSDB_CODE_SUCCESS) { T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } @@ -3961,11 +3999,12 @@ static void doClearSessionWindows(SStreamAggSupporter* pAggSup, SExprSupp* pSup, int32_t numOfOutput, int64_t gap, SArray* result) { SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, tsIndex); TSKEY* tsCols = (TSKEY*)pColDataInfo->pData; + SColumnInfoData* pGpDataInfo = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX); + uint64_t* gpCols = (uint64_t*)pGpDataInfo->pData; int32_t step = 0; for (int32_t i = 0; i < pBlock->info.rows; i += step) { int32_t winIndex = 0; - SResultWindowInfo* pCurWin = - getCurSessionWindow(pAggSup, tsCols[i], INT64_MIN, pBlock->info.groupId, gap, &winIndex); + SResultWindowInfo* pCurWin = getCurSessionWindow(pAggSup, tsCols[i], INT64_MIN, gpCols[i], gap, &winIndex); if (!pCurWin || pCurWin->pos.pageId == -1) { // window has been closed. step = 1; @@ -3990,9 +4029,9 @@ static int32_t copyUpdateResult(SHashObj* pStUpdated, SArray* pUpdated) { if (pos == NULL) { return TSDB_CODE_QRY_OUT_OF_MEMORY; } - pos->groupId = ((SWinRes*)pData)->groupId; + pos->groupId = ((SWinKey*)pData)->groupId; pos->pos = *(SResultRowPosition*)key; - *(int64_t*)pos->key = ((SWinRes*)pData)->ts; + *(int64_t*)pos->key = ((SWinKey*)pData)->ts; taosArrayPush(pUpdated, &pos); } taosArraySort(pUpdated, resultrowComparAsc); @@ -4008,7 +4047,7 @@ void doBuildDeleteDataBlock(SHashObj* pStDeleted, SSDataBlock* pBlock, void** It blockDataEnsureCapacity(pBlock, size); size_t keyLen = 0; while (((*Ite) = taosHashIterate(pStDeleted, *Ite)) != NULL) { - SWinRes* res = *Ite; + SWinKey* res = *Ite; SColumnInfoData* pTsCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX); colDataAppend(pTsCol, pBlock->info.rows, (const char*)&res->ts, false); SColumnInfoData* pGpCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX); @@ -4139,8 +4178,8 @@ static void copyDeleteWindowInfo(SArray* pResWins, SHashObj* pStDeleted) { int32_t size = taosArrayGetSize(pResWins); for (int32_t i = 0; i < size; i++) { SResultWindowInfo* pWinInfo = taosArrayGet(pResWins, i); - SWinRes res = {.ts = pWinInfo->win.skey, .groupId = pWinInfo->groupId}; - taosHashPut(pStDeleted, &pWinInfo->pos, sizeof(SResultRowPosition), &res, sizeof(SWinRes)); + SWinKey res = {.ts = pWinInfo->win.skey, .groupId = pWinInfo->groupId}; + taosHashPut(pStDeleted, &pWinInfo->pos, sizeof(SResultRowPosition), &res, sizeof(SWinKey)); } } @@ -4178,14 +4217,14 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) { if (pBlock->info.type == STREAM_CLEAR) { SArray* pWins = taosArrayInit(16, sizeof(SResultWindowInfo)); - doClearSessionWindows(&pInfo->streamAggSup, &pOperator->exprSupp, pBlock, 0, pOperator->exprSupp.numOfExprs, 0, - pWins); + doClearSessionWindows(&pInfo->streamAggSup, &pOperator->exprSupp, pBlock, START_TS_COLUMN_INDEX, + pOperator->exprSupp.numOfExprs, 0, pWins); if (IS_FINAL_OP(pInfo)) { int32_t childIndex = getChildIndex(pBlock); SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, childIndex); SStreamSessionAggOperatorInfo* pChildInfo = pChildOp->info; - doClearSessionWindows(&pChildInfo->streamAggSup, &pChildOp->exprSupp, pBlock, 0, pChildOp->exprSupp.numOfExprs, - 0, NULL); + doClearSessionWindows(&pChildInfo->streamAggSup, &pChildOp->exprSupp, pBlock, START_TS_COLUMN_INDEX, + pChildOp->exprSupp.numOfExprs, 0, NULL); rebuildTimeWindow(pInfo, pWins, pBlock->info.groupId, pOperator->exprSupp.numOfExprs, pOperator); } taosArrayDestroy(pWins); @@ -4293,21 +4332,21 @@ static SSDataBlock* doStreamSessionSemiAgg(SOperatorInfo* pOperator) { } else if (pOperator->status == OP_RES_TO_RETURN) { doBuildResultDatablock(pOperator, pBInfo, &pInfo->groupResInfo, pInfo->streamAggSup.pResultBuf); if (pBInfo->pRes->info.rows > 0) { - printDataBlock(pBInfo->pRes, "sems session"); + printDataBlock(pBInfo->pRes, "semi session"); return pBInfo->pRes; } // doBuildDeleteDataBlock(pInfo->pStDeleted, pInfo->pDelRes, &pInfo->pDelIterator); if (pInfo->pDelRes->info.rows > 0 && !pInfo->returnDelete) { pInfo->returnDelete = true; - printDataBlock(pInfo->pDelRes, "sems session"); + printDataBlock(pInfo->pDelRes, "semi session"); return pInfo->pDelRes; } if (pInfo->pUpdateRes->info.rows > 0) { // process the rest of the data pOperator->status = OP_OPENED; - printDataBlock(pInfo->pUpdateRes, "sems session"); + printDataBlock(pInfo->pUpdateRes, "semi session"); return pInfo->pUpdateRes; } // semi interval operator clear disk buffer @@ -4326,13 +4365,14 @@ static SSDataBlock* doStreamSessionSemiAgg(SOperatorInfo* pOperator) { clearSpecialDataBlock(pInfo->pUpdateRes); break; } + printDataBlock(pBlock, "semi session recv"); if (pBlock->info.type == STREAM_CLEAR) { SArray* pWins = taosArrayInit(16, sizeof(SResultWindowInfo)); - doClearSessionWindows(&pInfo->streamAggSup, pSup, pBlock, 0, pSup->numOfExprs, 0, pWins); + doClearSessionWindows(&pInfo->streamAggSup, pSup, pBlock, START_TS_COLUMN_INDEX, pSup->numOfExprs, 0, pWins); removeSessionResults(pStUpdated, pWins); taosArrayDestroy(pWins); - copyUpdateDataBlock(pInfo->pUpdateRes, pBlock, pInfo->primaryTsIndex); + copyDataBlock(pInfo->pUpdateRes, pBlock); break; } else if (pBlock->info.type == STREAM_DELETE_DATA || pBlock->info.type == STREAM_DELETE_RESULT) { // gap must be 0 @@ -4372,21 +4412,21 @@ static SSDataBlock* doStreamSessionSemiAgg(SOperatorInfo* pOperator) { doBuildResultDatablock(pOperator, pBInfo, &pInfo->groupResInfo, pInfo->streamAggSup.pResultBuf); if (pBInfo->pRes->info.rows > 0) { - printDataBlock(pBInfo->pRes, "sems session"); + printDataBlock(pBInfo->pRes, "semi session"); return pBInfo->pRes; } // doBuildDeleteDataBlock(pInfo->pStDeleted, pInfo->pDelRes, &pInfo->pDelIterator); if (pInfo->pDelRes->info.rows > 0 && !pInfo->returnDelete) { pInfo->returnDelete = true; - printDataBlock(pInfo->pDelRes, "sems session"); + printDataBlock(pInfo->pDelRes, "semi session"); return pInfo->pDelRes; } if (pInfo->pUpdateRes->info.rows > 0) { // process the rest of the data pOperator->status = OP_OPENED; - printDataBlock(pInfo->pUpdateRes, "sems session"); + printDataBlock(pInfo->pUpdateRes, "semi session"); return pInfo->pUpdateRes; } @@ -4408,8 +4448,7 @@ SOperatorInfo* createStreamFinalSessionAggOperatorInfo(SOperatorInfo* downstream pOperator->name = "StreamSessionFinalAggOperator"; } else { pInfo->isFinal = false; - pInfo->pUpdateRes = createResDataBlock(pPhyNode->pOutputDataBlockDesc); - pInfo->pUpdateRes->info.type = STREAM_CLEAR; + pInfo->pUpdateRes = createSpecialDataBlock(STREAM_CLEAR); blockDataEnsureCapacity(pInfo->pUpdateRes, 128); pOperator->name = "StreamSessionSemiAggOperator"; pOperator->fpSet = @@ -4586,8 +4625,9 @@ SStateWindowInfo* getStateWindow(SStreamAggSupporter* pAggSup, TSKEY ts, uint64_ return insertNewStateWindow(pWinInfos, ts, pKeyData, index + 1, pCol); } -int32_t updateStateWindowInfo(SArray* pWinInfos, int32_t winIndex, TSKEY* pTs, SColumnInfoData* pKeyCol, int32_t rows, - int32_t start, bool* allEqual, SHashObj* pSeDelete) { +int32_t updateStateWindowInfo(SArray* pWinInfos, int32_t winIndex, TSKEY* pTs, uint64_t groupId, + SColumnInfoData* pKeyCol, int32_t rows, int32_t start, bool* allEqual, + SHashObj* pSeDeleted) { *allEqual = true; SStateWindowInfo* pWinInfo = taosArrayGet(pWinInfos, winIndex); for (int32_t i = start; i < rows; ++i) { @@ -4607,9 +4647,9 @@ int32_t updateStateWindowInfo(SArray* pWinInfos, int32_t winIndex, TSKEY* pTs, S } } if (pWinInfo->winInfo.win.skey > pTs[i]) { - if (pSeDelete && pWinInfo->winInfo.isOutput) { - taosHashPut(pSeDelete, &pWinInfo->winInfo.pos, sizeof(SResultRowPosition), &pWinInfo->winInfo.win.skey, - sizeof(TSKEY)); + if (pSeDeleted && pWinInfo->winInfo.isOutput) { + SWinKey res = {.ts = pWinInfo->winInfo.win.skey, .groupId = groupId}; + taosHashPut(pSeDeleted, &pWinInfo->winInfo.pos, sizeof(SResultRowPosition), &res, sizeof(SWinKey)); pWinInfo->winInfo.isOutput = false; } pWinInfo->winInfo.win.skey = pTs[i]; @@ -4622,23 +4662,21 @@ int32_t updateStateWindowInfo(SArray* pWinInfos, int32_t winIndex, TSKEY* pTs, S return rows - start; } -static void doClearStateWindows(SStreamAggSupporter* pAggSup, SSDataBlock* pBlock, int32_t tsIndex, SColumn* pCol, - int32_t keyIndex, SHashObj* pSeUpdated, SHashObj* pSeDeleted) { - SColumnInfoData* pTsColInfo = taosArrayGet(pBlock->pDataBlock, tsIndex); - SColumnInfoData* pKeyColInfo = taosArrayGet(pBlock->pDataBlock, keyIndex); +static void doClearStateWindows(SStreamAggSupporter* pAggSup, SSDataBlock* pBlock, SHashObj* pSeUpdated, + SHashObj* pSeDeleted) { + SColumnInfoData* pTsColInfo = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX); + SColumnInfoData* pGroupColInfo = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX); TSKEY* tsCol = (TSKEY*)pTsColInfo->pData; bool allEqual = false; int32_t step = 1; + uint64_t* gpCol = (uint64_t*)pGroupColInfo->pData; for (int32_t i = 0; i < pBlock->info.rows; i += step) { - char* pKeyData = colDataGetData(pKeyColInfo, i); int32_t winIndex = 0; - SStateWindowInfo* pCurWin = getStateWindowByTs(pAggSup, tsCol[i], pBlock->info.groupId, &winIndex); + SStateWindowInfo* pCurWin = getStateWindowByTs(pAggSup, tsCol[i], gpCol[i], &winIndex); if (!pCurWin) { continue; } - step = updateStateWindowInfo(pAggSup->pCurWins, winIndex, tsCol, pKeyColInfo, pBlock->info.rows, i, &allEqual, - pSeDeleted); - ASSERT(isTsInWindow(pCurWin, tsCol[i]) || isEqualStateKey(pCurWin, pKeyData)); + updateSessionWindowInfo(&pCurWin->winInfo, tsCol, NULL, 0, pBlock->info.rows, i, 0, NULL); taosHashRemove(pSeUpdated, &pCurWin->winInfo.pos, sizeof(SResultRowPosition)); deleteWindow(pAggSup->pCurWins, winIndex, destroyStateWinInfo); } @@ -4675,13 +4713,12 @@ static void doStreamStateAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBl char* pKeyData = colDataGetData(pKeyColInfo, i); int32_t winIndex = 0; bool allEqual = true; - SStateWindowInfo* pCurWin = - getStateWindow(pAggSup, tsCols[i], pSDataBlock->info.groupId, pKeyData, &pInfo->stateCol, &winIndex); - winRows = updateStateWindowInfo(pAggSup->pCurWins, winIndex, tsCols, pKeyColInfo, pSDataBlock->info.rows, i, - &allEqual, pInfo->pSeDeleted); + SStateWindowInfo* pCurWin = getStateWindow(pAggSup, tsCols[i], groupId, pKeyData, &pInfo->stateCol, &winIndex); + winRows = updateStateWindowInfo(pAggSup->pCurWins, winIndex, tsCols, groupId, pKeyColInfo, pSDataBlock->info.rows, + i, &allEqual, pStDeleted); if (!allEqual) { - appendOneRow(pAggSup->pScanBlock, &pCurWin->winInfo.win.skey, &pCurWin->winInfo.win.ekey, - &pSDataBlock->info.groupId); + appendOneRow(pAggSup->pScanBlock, &pCurWin->winInfo.win.skey, &pCurWin->winInfo.win.ekey, GROUPID_COLUMN_INDEX, + &groupId); taosHashRemove(pSeUpdated, &pCurWin->winInfo.pos, sizeof(SResultRowPosition)); deleteWindow(pAggSup->pCurWins, winIndex, destroyStateWinInfo); continue; @@ -4692,8 +4729,8 @@ static void doStreamStateAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBl } pCurWin->winInfo.isClosed = false; if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE) { - SWinRes value = {.ts = pCurWin->winInfo.win.skey, .groupId = groupId}; - code = taosHashPut(pSeUpdated, &pCurWin->winInfo.pos, sizeof(SResultRowPosition), &value, sizeof(SWinRes)); + SWinKey value = {.ts = pCurWin->winInfo.win.skey, .groupId = groupId}; + code = taosHashPut(pSeUpdated, &pCurWin->winInfo.pos, sizeof(SResultRowPosition), &value, sizeof(SWinKey)); if (code != TSDB_CODE_SUCCESS) { T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } @@ -4736,8 +4773,7 @@ static SSDataBlock* doStreamStateAgg(SOperatorInfo* pOperator) { printDataBlock(pBlock, "single state recv"); if (pBlock->info.type == STREAM_CLEAR) { - doClearStateWindows(&pInfo->streamAggSup, pBlock, pInfo->primaryTsIndex, &pInfo->stateCol, pInfo->stateCol.slotId, - pSeUpdated, pInfo->pSeDeleted); + doClearStateWindows(&pInfo->streamAggSup, pBlock, pSeUpdated, pInfo->pSeDeleted); continue; } else if (pBlock->info.type == STREAM_DELETE_DATA) { SArray* pWins = taosArrayInit(16, sizeof(SResultWindowInfo)); @@ -4845,9 +4881,7 @@ SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhys _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); pInfo->pSeDeleted = taosHashInit(64, hashFn, true, HASH_NO_LOCK); pInfo->pDelIterator = NULL; - // pInfo->pDelRes = createSpecialDataBlock(STREAM_DELETE_RESULT); - pInfo->pDelRes = createOneDataBlock(pInfo->binfo.pRes, false); // todo(liuyao) for delete - pInfo->pDelRes->info.type = STREAM_DELETE_RESULT; // todo(liuyao) for delete + pInfo->pDelRes = createSpecialDataBlock(STREAM_DELETE_RESULT); pInfo->pChildren = NULL; pInfo->ignoreExpiredData = pStateNode->window.igExpired; diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 5051dcd65c..013c58cc45 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -468,7 +468,7 @@ int32_t functionFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId); SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx); - pResInfo->isNullRes = (pResInfo->isNullRes == 1) ? 1 : (pResInfo->numOfRes == 0); + pResInfo->isNullRes = (pResInfo->numOfRes == 0) ? 1 : 0; char* in = GET_ROWCELL_INTERBUF(pResInfo); colDataAppend(pCol, pBlock->info.rows, in, pResInfo->isNullRes); @@ -498,7 +498,7 @@ int32_t functionFinalizeWithResultBuf(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId); SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx); - pResInfo->isNullRes = (pResInfo->isNullRes == 1) ? 1 : (pResInfo->numOfRes == 0);; + pResInfo->isNullRes = (pResInfo->numOfRes == 0) ? 1 : 0; char* in = finalResult; colDataAppend(pCol, pBlock->info.rows, in, pResInfo->isNullRes); @@ -663,8 +663,7 @@ int32_t sumFunction(SqlFunctionCtx* pCtx) { // check for overflow if (IS_FLOAT_TYPE(type) && (isinf(pSumRes->dsum) || isnan(pSumRes->dsum))) { - GET_RES_INFO(pCtx)->isNullRes = 1; - numOfElem = 1; + numOfElem = 0; } _sum_over: @@ -791,8 +790,7 @@ int32_t avgFunction(SqlFunctionCtx* pCtx) { int32_t numOfRows = pInput->numOfRows; if (IS_NULL_TYPE(type)) { - GET_RES_INFO(pCtx)->isNullRes = 1; - numOfElem = 1; + numOfElem = 0; goto _avg_over; } @@ -1613,7 +1611,7 @@ int32_t minmaxFunctionFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { int32_t currentRow = pBlock->info.rows; SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId); - pEntryInfo->isNullRes = (pEntryInfo->isNullRes == 1) ? 1 : (pEntryInfo->numOfRes == 0); + pEntryInfo->isNullRes = (pEntryInfo->numOfRes == 0) ? 1 : 0; if (pCol->info.type == TSDB_DATA_TYPE_FLOAT) { float v = *(double*)&pRes->v; @@ -1792,8 +1790,7 @@ int32_t stddevFunction(SqlFunctionCtx* pCtx) { int32_t numOfRows = pInput->numOfRows; if (IS_NULL_TYPE(type)) { - GET_RES_INFO(pCtx)->isNullRes = 1; - numOfElem = 1; + numOfElem = 0; goto _stddev_over; } diff --git a/source/libs/index/src/indexFilter.c b/source/libs/index/src/indexFilter.c index 21aeaba70b..75844ce76f 100644 --- a/source/libs/index/src/indexFilter.c +++ b/source/libs/index/src/indexFilter.c @@ -255,6 +255,13 @@ static int32_t sifInitOperParams(SIFParam **params, SOperatorNode *node, SIFCtx if (node->opType == OP_TYPE_JSON_GET_VALUE) { return code; } + if ((node->pLeft != NULL && nodeType(node->pLeft) == QUERY_NODE_COLUMN) && + (node->pRight != NULL && nodeType(node->pRight) == QUERY_NODE_VALUE)) { + SColumnNode *cn = (SColumnNode *)(node->pLeft); + if (cn->node.resType.type == TSDB_DATA_TYPE_JSON) { + SIF_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); + } + } SIFParam *paramList = taosMemoryCalloc(nParam, sizeof(SIFParam)); if (NULL == paramList) { diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c index f8ba6e6901..d13057a93e 100644 --- a/source/libs/nodes/src/nodesUtilFuncs.c +++ b/source/libs/nodes/src/nodesUtilFuncs.c @@ -817,6 +817,7 @@ void nodesDestroyNode(SNode* pNode) { destroyLogicNode((SLogicNode*)pLogicNode); nodesDestroyNode(pLogicNode->pWStartTs); nodesDestroyNode(pLogicNode->pValues); + nodesDestroyList(pLogicNode->pFillExprs); break; } case QUERY_NODE_LOGIC_PLAN_SORT: { diff --git a/source/libs/parser/src/parInsert.c b/source/libs/parser/src/parInsert.c index 0922cdb6b9..4e32672697 100644 --- a/source/libs/parser/src/parInsert.c +++ b/source/libs/parser/src/parInsert.c @@ -125,6 +125,37 @@ static int32_t skipInsertInto(char** pSql, SMsgBuf* pMsg) { return TSDB_CODE_SUCCESS; } +static char* tableNameGetPosition(SToken* pToken, char target) { + bool inEscape = false; + bool inQuote = false; + char quotaStr = 0; + + for (uint32_t i = 0; i < pToken->n; ++i) { + if (*(pToken->z + i) == target && (!inEscape) && (!inQuote)) { + return pToken->z + i; + } + + if (*(pToken->z + i) == TS_ESCAPE_CHAR) { + if (!inQuote) { + inEscape = !inEscape; + } + } + + if (*(pToken->z + i) == '\'' || *(pToken->z + i) == '"') { + if (!inEscape) { + if (!inQuote) { + quotaStr = *(pToken->z + i); + inQuote = !inQuote; + } else if (quotaStr == *(pToken->z + i)) { + inQuote = !inQuote; + } + } + } + } + + return NULL; +} + static int32_t createSName(SName* pName, SToken* pTableName, int32_t acctId, const char* dbName, SMsgBuf* pMsgBuf) { const char* msg1 = "name too long"; const char* msg2 = "invalid database name"; @@ -132,7 +163,7 @@ static int32_t createSName(SName* pName, SToken* pTableName, int32_t acctId, con const char* msg4 = "invalid table name"; int32_t code = TSDB_CODE_SUCCESS; - char* p = strnchr(pTableName->z, TS_PATH_DELIMITER[0], pTableName->n, true); + char* p = tableNameGetPosition(pTableName, TS_PATH_DELIMITER[0]); if (p != NULL) { // db has been specified in sql string so we ignore current db path assert(*p == TS_PATH_DELIMITER[0]); @@ -681,6 +712,11 @@ static int32_t parseBoundColumns(SInsertParseContext* pCxt, SParsedDataColInfo* break; } + char tmpTokenBuf[TSDB_COL_NAME_LEN + 2] = {0}; // used for deleting Escape character backstick(`) + strncpy(tmpTokenBuf, sToken.z, sToken.n); + sToken.z = tmpTokenBuf; + sToken.n = strdequote(sToken.z); + col_id_t t = lastColIdx + 1; col_id_t index = findCol(&sToken, t, nCols, pSchema); if (index < 0 && t > 0) { @@ -1686,9 +1722,17 @@ static int32_t collectTableMetaKey(SInsertParseSyntaxCxt* pCxt, bool isStable, i return TSDB_CODE_SUCCESS; } +static int32_t checkTableName(const char* pTableName, SMsgBuf* pMsgBuf) { + if (NULL != strchr(pTableName, '.')) { + return generateSyntaxErrMsgExt(pMsgBuf, TSDB_CODE_PAR_INVALID_IDENTIFIER_NAME, "The table name cannot contain '.'"); + } + return TSDB_CODE_SUCCESS; +} + static int32_t collectAutoCreateTableMetaKey(SInsertParseSyntaxCxt* pCxt, int32_t tableNo, SToken* pTbToken) { SName name; CHECK_CODE(createSName(&name, pTbToken, pCxt->pComCxt->acctId, pCxt->pComCxt->db, &pCxt->msg)); + CHECK_CODE(checkTableName(name.tname, &pCxt->msg)); CHECK_CODE(reserveTableMetaInCacheForInsert(&name, CATALOG_REQ_TYPE_VGROUP, tableNo, pCxt->pMetaCache)); return TSDB_CODE_SUCCESS; } diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 3c0d9a5f63..8a1d8763bf 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -1881,6 +1881,12 @@ static EDealRes doCheckExprForGroupBy(SNode** pNode, void* pContext) { return rewriteExprToGroupKeyFunc(pCxt, pNode); } } + if (NULL != pSelect->pWindow && QUERY_NODE_STATE_WINDOW == nodeType(pSelect->pWindow)) { + if (nodesEqualNode(((SStateWindowNode*)pSelect->pWindow)->pExpr, *pNode)) { + pSelect->hasStateKey = true; + return rewriteExprToGroupKeyFunc(pCxt, pNode); + } + } if (isScanPseudoColumnFunc(*pNode) || QUERY_NODE_COLUMN == nodeType(*pNode)) { if (pSelect->selectFuncNum > 1 || pSelect->hasOtherVectorFunc || !pSelect->hasSelectFunc) { return generateDealNodeErrMsg(pCxt, getGroupByErrorCode(pCxt)); @@ -1973,7 +1979,7 @@ static int32_t checkWindowFuncCoexist(STranslateContext* pCxt, SSelectStmt* pSel if (NULL == pSelect->pWindow) { return TSDB_CODE_SUCCESS; } - if (NULL != pSelect->pWindow && !pSelect->hasAggFuncs) { + if (NULL != pSelect->pWindow && !pSelect->hasAggFuncs && !pSelect->hasStateKey) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_NO_VALID_FUNC_IN_WIN); } return TSDB_CODE_SUCCESS; @@ -2588,8 +2594,13 @@ static int32_t getQueryTimeRange(STranslateContext* pCxt, SNode* pWhere, STimeWi return code; } -static int32_t checkFill(STranslateContext* pCxt, SFillNode* pFill, SValueNode* pInterval) { +static int32_t checkFill(STranslateContext* pCxt, SFillNode* pFill, SValueNode* pInterval, + bool isInterpFill) { if (FILL_MODE_NONE == pFill->mode) { + if (isInterpFill) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Unsupported fill type"); + } + return TSDB_CODE_SUCCESS; } @@ -2629,7 +2640,7 @@ static int32_t translateFill(STranslateContext* pCxt, SSelectStmt* pSelect, SInt } ((SFillNode*)pInterval->pFill)->timeRange = pSelect->timeRange; - return checkFill(pCxt, (SFillNode*)pInterval->pFill, (SValueNode*)pInterval->pInterval); + return checkFill(pCxt, (SFillNode*)pInterval->pFill, (SValueNode*)pInterval->pInterval, false); } static int64_t getMonthsFromTimeVal(int64_t val, int32_t fromPrecision, char unit) { @@ -2825,7 +2836,7 @@ static int32_t createDefaultFillNode(STranslateContext* pCxt, SNode** pOutput) { static int32_t checkEvery(STranslateContext* pCxt, SValueNode* pInterval) { int32_t len = strlen(pInterval->literal); - char *unit = &pInterval->literal[len - 1]; + char* unit = &pInterval->literal[len - 1]; if (*unit == 'n' || *unit == 'y') { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Unsupported time unit in EVERY clause"); @@ -2837,7 +2848,7 @@ static int32_t checkEvery(STranslateContext* pCxt, SValueNode* pInterval) { static int32_t translateInterpEvery(STranslateContext* pCxt, SNode** pEvery) { int32_t code = TSDB_CODE_SUCCESS; - code = checkEvery(pCxt, (SValueNode *)(*pEvery)); + code = checkEvery(pCxt, (SValueNode*)(*pEvery)); if (TSDB_CODE_SUCCESS == code) { code = translateExpr(pCxt, pEvery); } @@ -2858,7 +2869,7 @@ static int32_t translateInterpFill(STranslateContext* pCxt, SSelectStmt* pSelect code = getQueryTimeRange(pCxt, pSelect->pRange, &(((SFillNode*)pSelect->pFill)->timeRange)); } if (TSDB_CODE_SUCCESS == code) { - code = checkFill(pCxt, (SFillNode*)pSelect->pFill, (SValueNode*)pSelect->pEvery); + code = checkFill(pCxt, (SFillNode*)pSelect->pFill, (SValueNode*)pSelect->pEvery, true); } return code; diff --git a/source/libs/parser/src/parUtil.c b/source/libs/parser/src/parUtil.c index 17e78e7806..32513fd0b6 100644 --- a/source/libs/parser/src/parUtil.c +++ b/source/libs/parser/src/parUtil.c @@ -1159,6 +1159,16 @@ void destoryParseMetaCache(SParseMetaCache* pMetaCache, bool request) { taosHashCleanup(pMetaCache->pTableMeta); taosHashCleanup(pMetaCache->pTableVgroup); } + SInsertTablesMetaReq* p = taosHashIterate(pMetaCache->pInsertTables, NULL); + while (NULL != p) { + taosArrayDestroy(p->pTableMetaPos); + taosArrayDestroy(p->pTableMetaReq); + taosArrayDestroy(p->pTableVgroupPos); + taosArrayDestroy(p->pTableVgroupReq); + + p = taosHashIterate(pMetaCache->pInsertTables, p); + } + taosHashCleanup(pMetaCache->pInsertTables); taosHashCleanup(pMetaCache->pDbVgroup); taosHashCleanup(pMetaCache->pDbCfg); taosHashCleanup(pMetaCache->pDbInfo); diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c index 71f084d412..0667c5f5b9 100644 --- a/source/libs/planner/src/planLogicCreater.c +++ b/source/libs/planner/src/planLogicCreater.c @@ -44,12 +44,15 @@ static void setColumnInfo(SFunctionNode* pFunc, SColumnNode* pCol) { pCol->colType = COLUMN_TYPE_TBNAME; break; case FUNCTION_TYPE_WSTART: + pCol->colId = PRIMARYKEY_TIMESTAMP_COL_ID; + pCol->colType = COLUMN_TYPE_WINDOW_START; + break; case FUNCTION_TYPE_WEND: pCol->colId = PRIMARYKEY_TIMESTAMP_COL_ID; - pCol->colType = COLUMN_TYPE_WINDOW_PC; + pCol->colType = COLUMN_TYPE_WINDOW_END; break; case FUNCTION_TYPE_WDURATION: - pCol->colType = COLUMN_TYPE_WINDOW_PC; + pCol->colType = COLUMN_TYPE_WINDOW_DURATION; break; case FUNCTION_TYPE_GROUP_KEY: pCol->colType = COLUMN_TYPE_GROUP_KEY; @@ -784,7 +787,10 @@ static int32_t createWindowLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSele static EDealRes needFillValueImpl(SNode* pNode, void* pContext) { if (QUERY_NODE_COLUMN == nodeType(pNode)) { SColumnNode* pCol = (SColumnNode*)pNode; - if (COLUMN_TYPE_WINDOW_PC != pCol->colType && COLUMN_TYPE_GROUP_KEY != pCol->colType) { + if (COLUMN_TYPE_WINDOW_START != pCol->colType && + COLUMN_TYPE_WINDOW_END != pCol->colType && + COLUMN_TYPE_WINDOW_DURATION != pCol->colType && + COLUMN_TYPE_GROUP_KEY != pCol->colType) { *(bool*)pContext = true; return DEAL_RES_END; } diff --git a/source/libs/qworker/src/qworker.c b/source/libs/qworker/src/qworker.c index 862d142100..f006096ce2 100644 --- a/source/libs/qworker/src/qworker.c +++ b/source/libs/qworker/src/qworker.c @@ -149,13 +149,10 @@ int32_t qwExecTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *queryStop) { } } +_return: + taosArrayDestroy(pResList); QW_RET(code); - -_return: - taosArrayDestroy(pResList); - - return code; } int32_t qwGenerateSchHbRsp(SQWorker *mgmt, SQWSchStatus *sch, SQWHbInfo *hbInfo) { diff --git a/source/libs/scheduler/inc/schInt.h b/source/libs/scheduler/inc/schInt.h index 1b3d75f33b..957fd46ba5 100644 --- a/source/libs/scheduler/inc/schInt.h +++ b/source/libs/scheduler/inc/schInt.h @@ -60,8 +60,7 @@ typedef enum { #define SCH_DEFAULT_TASK_TIMEOUT_USEC 10000000 #define SCH_MAX_TASK_TIMEOUT_USEC 60000000 #define SCH_DEFAULT_MAX_RETRY_NUM 6 - -#define SCH_ASYNC_LAUNCH_TASK 0 +#define SCH_MIN_AYSNC_EXEC_NUM 3 typedef struct SSchDebug { bool lockEnable; @@ -284,7 +283,7 @@ typedef struct SSchJob { } SSchJob; typedef struct SSchTaskCtx { - SSchJob *pJob; + int64_t jobRid; SSchTask *pTask; } SSchTaskCtx; diff --git a/source/libs/scheduler/src/schTask.c b/source/libs/scheduler/src/schTask.c index 612b908d41..9cab39c301 100644 --- a/source/libs/scheduler/src/schTask.c +++ b/source/libs/scheduler/src/schTask.c @@ -821,7 +821,13 @@ int32_t schProcessOnTaskStatusRsp(SQueryNodeEpId *pEpId, SArray *pStatusList) { int32_t schLaunchTaskImpl(void *param) { SSchTaskCtx *pCtx = (SSchTaskCtx *)param; - SSchJob *pJob = pCtx->pJob; + SSchJob *pJob = schAcquireJob(pCtx->jobRid); + if (NULL == pJob) { + taosMemoryFree(param); + qDebug("job refId 0x%" PRIx64 " already not exist", pCtx->jobRid); + SCH_RET(TSDB_CODE_SCH_JOB_IS_DROPPING); + } + SSchTask *pTask = pCtx->pTask; int8_t status = 0; int32_t code = 0; @@ -871,14 +877,16 @@ _return: taosMemoryFree(param); -#if SCH_ASYNC_LAUNCH_TASK - if (code) { - code = schProcessOnTaskFailure(pJob, pTask, code); + if (pJob->taskNum >= SCH_MIN_AYSNC_EXEC_NUM) { + if (code) { + code = schProcessOnTaskFailure(pJob, pTask, code); + } + if (code) { + code = schHandleJobFailure(pJob, code); + } } - if (code) { - code = schHandleJobFailure(pJob, code); - } -#endif + + schReleaseJob(pJob->refId); SCH_RET(code); } @@ -890,15 +898,15 @@ int32_t schAsyncLaunchTaskImpl(SSchJob *pJob, SSchTask *pTask) { SCH_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); } - param->pJob = pJob; + param->jobRid = pJob->refId; param->pTask = pTask; -#if SCH_ASYNC_LAUNCH_TASK - taosAsyncExec(schLaunchTaskImpl, param, NULL); -#else - SCH_ERR_RET(schLaunchTaskImpl(param)); -#endif - + if (pJob->taskNum >= SCH_MIN_AYSNC_EXEC_NUM) { + taosAsyncExec(schLaunchTaskImpl, param, NULL); + } else { + SCH_ERR_RET(schLaunchTaskImpl(param)); + } + return TSDB_CODE_SUCCESS; } diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index c78ff0756f..9d4010f60e 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -358,7 +358,7 @@ int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pDat FAIL_SHUFFLE_DISPATCH: if (pReqs) { for (int32_t i = 0; i < vgSz; i++) { - taosArrayDestroy(pReqs[i].data); + taosArrayDestroyP(pReqs[i].data, taosMemoryFree); taosArrayDestroy(pReqs[i].dataLen); } taosMemoryFree(pReqs); diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index 06ca26f029..102bad7426 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -140,7 +140,6 @@ int32_t streamPipelineExec(SStreamTask* pTask, int32_t batchNum, bool dispatch) return 0; } -// TODO: handle version int32_t streamExecForAll(SStreamTask* pTask) { while (1) { int32_t batchCnt = 1; diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 5ff700546c..20a2f7d332 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -14,7 +14,7 @@ */ #include "executor.h" -#include "tstream.h" +#include "streamInc.h" #include "ttimer.h" SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandFunc) { @@ -23,17 +23,23 @@ SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandF terrno = TSDB_CODE_OUT_OF_MEMORY; return NULL; } - pMeta->path = strdup(path); + int32_t len = strlen(path) + 20; + char* streamPath = taosMemoryCalloc(1, len); + sprintf(streamPath, "%s/%s", path, "stream"); + pMeta->path = strdup(streamPath); if (tdbOpen(pMeta->path, 16 * 1024, 1, &pMeta->db) < 0) { goto _err; } + sprintf(streamPath, "%s/%s", pMeta->path, "checkpoints"); + mkdir(streamPath, 0755); + taosMemoryFree(streamPath); + if (tdbTbOpen("task.db", sizeof(int32_t), -1, NULL, pMeta->db, &pMeta->pTaskDb) < 0) { goto _err; } - // open state storage backend - if (tdbTbOpen("state.db", sizeof(int32_t), -1, NULL, pMeta->db, &pMeta->pStateDb) < 0) { + if (tdbTbOpen("checkpoint.db", sizeof(int32_t), -1, NULL, pMeta->db, &pMeta->pCheckpointDb) < 0) { goto _err; } @@ -49,16 +55,13 @@ SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandF pMeta->ahandle = ahandle; pMeta->expandFunc = expandFunc; - if (streamLoadTasks(pMeta) < 0) { - goto _err; - } return pMeta; _err: if (pMeta->path) taosMemoryFree(pMeta->path); if (pMeta->pTasks) taosHashCleanup(pMeta->pTasks); - if (pMeta->pStateDb) tdbTbClose(pMeta->pStateDb); if (pMeta->pTaskDb) tdbTbClose(pMeta->pTaskDb); + if (pMeta->pCheckpointDb) tdbTbClose(pMeta->pCheckpointDb); if (pMeta->db) tdbClose(pMeta->db); taosMemoryFree(pMeta); return NULL; @@ -67,7 +70,7 @@ _err: void streamMetaClose(SStreamMeta* pMeta) { tdbCommit(pMeta->db, &pMeta->txn); tdbTbClose(pMeta->pTaskDb); - tdbTbClose(pMeta->pStateDb); + tdbTbClose(pMeta->pCheckpointDb); tdbClose(pMeta->db); void* pIter = NULL; diff --git a/source/libs/stream/src/streamRecover.c b/source/libs/stream/src/streamRecover.c index 263053778b..0505c3edd6 100644 --- a/source/libs/stream/src/streamRecover.c +++ b/source/libs/stream/src/streamRecover.c @@ -176,6 +176,7 @@ int32_t tDecodeSStreamTaskRecoverRsp(SDecoder* pDecoder, SStreamRecoverDownstrea } int32_t streamSaveStateInfo(SStreamMeta* pMeta, SStreamTask* pTask) { +#if 0 void* buf = NULL; ASSERT(pTask->taskLevel == TASK_LEVEL__SINK); @@ -224,10 +225,12 @@ int32_t streamSaveStateInfo(SStreamMeta* pMeta, SStreamTask* pTask) { FAIL: if (buf) taosMemoryFree(buf); return -1; +#endif return 0; } int32_t streamLoadStateInfo(SStreamMeta* pMeta, SStreamTask* pTask) { +#if 0 void* pVal = NULL; int32_t vLen = 0; if (tdbTbGet(pMeta->pStateDb, &pTask->taskId, sizeof(void*), &pVal, &vLen) < 0) { @@ -241,7 +244,7 @@ int32_t streamLoadStateInfo(SStreamMeta* pMeta, SStreamTask* pTask) { pTask->nextCheckId = aggCheckpoint.checkpointId + 1; pTask->checkpointInfo = aggCheckpoint.checkpointVer; - +#endif return 0; } diff --git a/source/libs/stream/src/streamState.c b/source/libs/stream/src/streamState.c new file mode 100644 index 0000000000..dfd6f012cc --- /dev/null +++ b/source/libs/stream/src/streamState.c @@ -0,0 +1,205 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "executor.h" +#include "streamInc.h" +#include "tcommon.h" +#include "ttimer.h" + +SStreamState* streamStateOpen(char* path, SStreamTask* pTask) { + SStreamState* pState = taosMemoryCalloc(1, sizeof(SStreamState)); + if (pState == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return NULL; + } + char statePath[300]; + sprintf(statePath, "%s/%d", path, pTask->taskId); + if (tdbOpen(statePath, 4096, 256, &pState->db) < 0) { + goto _err; + } + + // open state storage backend + if (tdbTbOpen("state.db", sizeof(SWinKey), -1, SWinKeyCmpr, pState->db, &pState->pStateDb) < 0) { + goto _err; + } + + if (streamStateBegin(pState) < 0) { + goto _err; + } + + pState->pOwner = pTask; + + return pState; + +_err: + if (pState->pStateDb) tdbTbClose(pState->pStateDb); + if (pState->db) tdbClose(pState->db); + taosMemoryFree(pState); + return NULL; +} + +void streamStateClose(SStreamState* pState) { + tdbCommit(pState->db, &pState->txn); + tdbTbClose(pState->pStateDb); + tdbClose(pState->db); + + taosMemoryFree(pState); +} + +int32_t streamStateBegin(SStreamState* pState) { + if (tdbTxnOpen(&pState->txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < + 0) { + return -1; + } + + if (tdbBegin(pState->db, &pState->txn) < 0) { + tdbTxnClose(&pState->txn); + return -1; + } + return 0; +} + +int32_t streamStateCommit(SStreamState* pState) { + if (tdbCommit(pState->db, &pState->txn) < 0) { + return -1; + } + memset(&pState->txn, 0, sizeof(TXN)); + if (tdbTxnOpen(&pState->txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < + 0) { + return -1; + } + if (tdbBegin(pState->db, &pState->txn) < 0) { + return -1; + } + return 0; +} + +int32_t streamStateAbort(SStreamState* pState) { + if (tdbAbort(pState->db, &pState->txn) < 0) { + return -1; + } + memset(&pState->txn, 0, sizeof(TXN)); + if (tdbTxnOpen(&pState->txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < + 0) { + return -1; + } + if (tdbBegin(pState->db, &pState->txn) < 0) { + return -1; + } + return 0; +} + +int32_t streamStatePut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen) { + return tdbTbUpsert(pState->pStateDb, key, sizeof(SWinKey), value, vLen, &pState->txn); +} +int32_t streamStateGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen) { + return tdbTbGet(pState->pStateDb, key, sizeof(SWinKey), pVal, pVLen); +} + +int32_t streamStateDel(SStreamState* pState, const SWinKey* key) { + return tdbTbDelete(pState->pStateDb, key, sizeof(SWinKey), &pState->txn); +} + +SStreamStateCur* streamStateGetCur(SStreamState* pState, const SWinKey* key) { + SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur)); + if (pCur == NULL) return NULL; + tdbTbcOpen(pState->pStateDb, &pCur->pCur, NULL); + + int32_t c; + tdbTbcMoveTo(pCur->pCur, key, sizeof(SWinKey), &c); + if (c != 0) { + taosMemoryFree(pCur); + return NULL; + } + return pCur; +} + +int32_t streamStateGetKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen) { + const SWinKey* pKTmp = NULL; + int32_t kLen; + if (tdbTbcGet(pCur->pCur, (const void**)&pKTmp, &kLen, pVal, pVLen) < 0) { + return -1; + } + *pKey = *pKTmp; + return 0; +} + +int32_t streamStateSeekFirst(SStreamState* pState, SStreamStateCur* pCur) { + // + return tdbTbcMoveToFirst(pCur->pCur); +} + +int32_t streamStateSeekLast(SStreamState* pState, SStreamStateCur* pCur) { + // + return tdbTbcMoveToLast(pCur->pCur); +} + +SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const SWinKey* key) { + SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur)); + if (pCur == NULL) { + return NULL; + } + + int32_t c; + if (tdbTbcMoveTo(pCur->pCur, key, sizeof(SWinKey), &c) < 0) { + taosMemoryFree(pCur); + return NULL; + } + if (c > 0) return pCur; + + if (tdbTbcMoveToNext(pCur->pCur) < 0) { + taosMemoryFree(pCur); + return NULL; + } + + return pCur; +} + +SStreamStateCur* streamStateSeekKeyPrev(SStreamState* pState, const SWinKey* key) { + SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur)); + if (pCur == NULL) { + return NULL; + } + + int32_t c; + if (tdbTbcMoveTo(pCur->pCur, key, sizeof(SWinKey), &c) < 0) { + taosMemoryFree(pCur); + return NULL; + } + if (c < 0) return pCur; + + if (tdbTbcMoveToPrev(pCur->pCur) < 0) { + taosMemoryFree(pCur); + return NULL; + } + + return pCur; +} + +int32_t streamStateCurNext(SStreamState* pState, SStreamStateCur* pCur) { + // + return tdbTbcMoveToNext(pCur->pCur); +} + +int32_t streamStateCurPrev(SStreamState* pState, SStreamStateCur* pCur) { + // + return tdbTbcMoveToPrev(pCur->pCur); +} +void streamStateFreeCur(SStreamStateCur* pCur) { + tdbTbcClose(pCur->pCur); + taosMemoryFree(pCur); +} + +void streamFreeVal(void* val) { tdbFree(val); } diff --git a/source/libs/stream/src/streamTask.c b/source/libs/stream/src/streamTask.c index 4009a47c65..ce5917de29 100644 --- a/source/libs/stream/src/streamTask.c +++ b/source/libs/stream/src/streamTask.c @@ -165,5 +165,8 @@ void tFreeSStreamTask(SStreamTask* pTask) { if (pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH) { taosArrayDestroy(pTask->shuffleDispatcher.dbInfo.pVgroupInfos); } + + if (pTask->pState) streamStateClose(pTask->pState); + taosMemoryFree(pTask); } diff --git a/source/libs/sync/inc/syncSnapshot.h b/source/libs/sync/inc/syncSnapshot.h index 0dc67cf150..6fb558e45c 100644 --- a/source/libs/sync/inc/syncSnapshot.h +++ b/source/libs/sync/inc/syncSnapshot.h @@ -28,10 +28,10 @@ extern "C" { #include "syncMessage.h" #include "taosdef.h" -#define SYNC_SNAPSHOT_SEQ_INVALID -1 +#define SYNC_SNAPSHOT_SEQ_INVALID -1 #define SYNC_SNAPSHOT_SEQ_FORCE_CLOSE -2 -#define SYNC_SNAPSHOT_SEQ_BEGIN 0 -#define SYNC_SNAPSHOT_SEQ_END 0x7FFFFFFF +#define SYNC_SNAPSHOT_SEQ_BEGIN 0 +#define SYNC_SNAPSHOT_SEQ_END 0x7FFFFFFF #define SYNC_SNAPSHOT_RETRY_MS 5000 @@ -40,14 +40,14 @@ typedef struct SSyncSnapshotSender { bool start; int32_t seq; int32_t ack; - void * pReader; - void * pCurrentBlock; + void *pReader; + void *pCurrentBlock; int32_t blockLen; SSnapshotParam snapshotParam; SSnapshot snapshot; SSyncCfg lastConfig; int64_t sendingMS; - SSyncNode * pSyncNode; + SSyncNode *pSyncNode; int32_t replicaIndex; SyncTerm term; SyncTerm privateTerm; @@ -64,20 +64,20 @@ int32_t snapshotSend(SSyncSnapshotSender *pSender); int32_t snapshotReSend(SSyncSnapshotSender *pSender); cJSON *snapshotSender2Json(SSyncSnapshotSender *pSender); -char * snapshotSender2Str(SSyncSnapshotSender *pSender); -char * snapshotSender2SimpleStr(SSyncSnapshotSender *pSender, char *event); +char *snapshotSender2Str(SSyncSnapshotSender *pSender); +char *snapshotSender2SimpleStr(SSyncSnapshotSender *pSender, char *event); //--------------------------------------------------- typedef struct SSyncSnapshotReceiver { bool start; int32_t ack; - void * pWriter; + void *pWriter; SyncTerm term; SyncTerm privateTerm; SSnapshotParam snapshotParam; SSnapshot snapshot; SRaftId fromId; - SSyncNode * pSyncNode; + SSyncNode *pSyncNode; } SSyncSnapshotReceiver; @@ -86,10 +86,11 @@ void snapshotReceiverDestroy(SSyncSnapshotReceiver *pReceiver) int32_t snapshotReceiverStart(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *pBeginMsg); int32_t snapshotReceiverStop(SSyncSnapshotReceiver *pReceiver); bool snapshotReceiverIsStart(SSyncSnapshotReceiver *pReceiver); +void snapshotReceiverForceStop(SSyncSnapshotReceiver *pReceiver); cJSON *snapshotReceiver2Json(SSyncSnapshotReceiver *pReceiver); -char * snapshotReceiver2Str(SSyncSnapshotReceiver *pReceiver); -char * snapshotReceiver2SimpleStr(SSyncSnapshotReceiver *pReceiver, char *event); +char *snapshotReceiver2Str(SSyncSnapshotReceiver *pReceiver); +char *snapshotReceiver2SimpleStr(SSyncSnapshotReceiver *pReceiver, char *event); //--------------------------------------------------- // on message diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index 3fe600ecbb..51098374b0 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -2181,6 +2181,11 @@ void syncNodeBecomeLeader(SSyncNode* pSyncNode, const char* debugStr) { (pMySender->privateTerm) += 100; } + // close receiver + if (snapshotReceiverIsStart(pSyncNode->pNewNodeReceiver)) { + snapshotReceiverForceStop(pSyncNode->pNewNodeReceiver); + } + // stop elect timer syncNodeStopElectTimer(pSyncNode); diff --git a/source/libs/sync/src/syncRaftCfg.c b/source/libs/sync/src/syncRaftCfg.c index 5de21bceca..ab404d1b9a 100644 --- a/source/libs/sync/src/syncRaftCfg.c +++ b/source/libs/sync/src/syncRaftCfg.c @@ -171,7 +171,7 @@ SRaftCfg *raftCfgOpen(const char *path) { taosLSeekFile(pCfg->pFile, 0, SEEK_SET); - char buf[1024] = {0}; + char buf[CONFIG_FILE_LEN] = {0}; int len = taosReadFile(pCfg->pFile, buf, sizeof(buf)); ASSERT(len > 0); diff --git a/source/libs/sync/src/syncSnapshot.c b/source/libs/sync/src/syncSnapshot.c index 702e9f01dc..5489a107e7 100644 --- a/source/libs/sync/src/syncSnapshot.c +++ b/source/libs/sync/src/syncSnapshot.c @@ -24,7 +24,6 @@ //---------------------------------- static void snapshotSenderUpdateProgress(SSyncSnapshotSender *pSender, SyncSnapshotRsp *pMsg); static void snapshotReceiverDoStart(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *pBeginMsg); -static void snapshotReceiverForceStop(SSyncSnapshotReceiver *pReceiver); static void snapshotReceiverGotData(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *pMsg); static int32_t snapshotReceiverFinish(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *pMsg); @@ -374,14 +373,14 @@ cJSON *snapshotSender2Json(SSyncSnapshotSender *pSender) { char *snapshotSender2Str(SSyncSnapshotSender *pSender) { cJSON *pJson = snapshotSender2Json(pSender); - char * serialized = cJSON_Print(pJson); + char *serialized = cJSON_Print(pJson); cJSON_Delete(pJson); return serialized; } char *snapshotSender2SimpleStr(SSyncSnapshotSender *pSender, char *event) { int32_t len = 256; - char * s = taosMemoryMalloc(len); + char *s = taosMemoryMalloc(len); SRaftId destId = pSender->pSyncNode->replicasId[pSender->replicaIndex]; char host[64]; @@ -480,7 +479,7 @@ static void snapshotReceiverDoStart(SSyncSnapshotReceiver *pReceiver, SyncSnapsh } // force stop -static void snapshotReceiverForceStop(SSyncSnapshotReceiver *pReceiver) { +void snapshotReceiverForceStop(SSyncSnapshotReceiver *pReceiver) { // force close, abandon incomplete data if (pReceiver->pWriter != NULL) { int32_t ret = pReceiver->pSyncNode->pFsm->FpSnapshotStopWrite(pReceiver->pSyncNode->pFsm, pReceiver->pWriter, false, @@ -653,7 +652,7 @@ cJSON *snapshotReceiver2Json(SSyncSnapshotReceiver *pReceiver) { cJSON_AddStringToObject(pFromId, "addr", u64buf); { uint64_t u64 = pReceiver->fromId.addr; - cJSON * pTmp = pFromId; + cJSON *pTmp = pFromId; char host[128] = {0}; uint16_t port; syncUtilU642Addr(u64, host, sizeof(host), &port); @@ -686,14 +685,14 @@ cJSON *snapshotReceiver2Json(SSyncSnapshotReceiver *pReceiver) { char *snapshotReceiver2Str(SSyncSnapshotReceiver *pReceiver) { cJSON *pJson = snapshotReceiver2Json(pReceiver); - char * serialized = cJSON_Print(pJson); + char *serialized = cJSON_Print(pJson); cJSON_Delete(pJson); return serialized; } char *snapshotReceiver2SimpleStr(SSyncSnapshotReceiver *pReceiver, char *event) { int32_t len = 256; - char * s = taosMemoryMalloc(len); + char *s = taosMemoryMalloc(len); SRaftId fromId = pReceiver->fromId; char host[128]; diff --git a/source/libs/tdb/src/db/tdbBtree.c b/source/libs/tdb/src/db/tdbBtree.c index 7a44edb12c..5430acb972 100644 --- a/source/libs/tdb/src/db/tdbBtree.c +++ b/source/libs/tdb/src/db/tdbBtree.c @@ -1401,7 +1401,7 @@ static int tdbBtreeDecodeCell(SPage *pPage, const SCell *pCell, SCellDecoder *pD pDecoder->pgno = 0; TDB_CELLDECODER_SET_FREE_NIL(pDecoder); - tdbDebug("tdb btc decoder set nil: %p/0x%x ", pDecoder, pDecoder->freeKV); + // tdbTrace("tdb btc decoder set nil: %p/0x%x ", pDecoder, pDecoder->freeKV); // 1. Decode header part if (!leaf) { diff --git a/source/libs/tdb/src/db/tdbPCache.c b/source/libs/tdb/src/db/tdbPCache.c index ab9b21dc3f..527ad200d4 100644 --- a/source/libs/tdb/src/db/tdbPCache.c +++ b/source/libs/tdb/src/db/tdbPCache.c @@ -145,7 +145,7 @@ static SPage *tdbPCacheFetchImpl(SPCache *pCache, const SPgid *pPgid, TXN *pTxn) // 1. Search the hash table pPage = pCache->pgHash[tdbPCachePageHash(pPgid) % pCache->nHash]; while (pPage) { - if (memcmp(pPage->pgid.fileid, pPgid->fileid, TDB_FILE_ID_LEN) == 0 && pPage->pgid.pgno == pPgid->pgno) break; + if (pPage->pgid.pgno == pPgid->pgno && memcmp(pPage->pgid.fileid, pPgid->fileid, TDB_FILE_ID_LEN) == 0) break; pPage = pPage->pHashNext; } @@ -243,7 +243,7 @@ static void tdbPCachePinPage(SPCache *pCache, SPage *pPage) { pCache->nRecyclable--; // printf("pin page %d pgno %d pPage %p\n", pPage->id, TDB_PAGE_PGNO(pPage), pPage); - tdbTrace("pin page %d", pPage->id); + tdbDebug("pin page %d", pPage->id); } } @@ -264,30 +264,33 @@ static void tdbPCacheUnpinPage(SPCache *pCache, SPage *pPage) { pCache->nRecyclable++; // printf("unpin page %d pgno %d pPage %p\n", pPage->id, TDB_PAGE_PGNO(pPage), pPage); - tdbTrace("unpin page %d", pPage->id); + tdbDebug("unpin page %d", pPage->id); } static void tdbPCacheRemovePageFromHash(SPCache *pCache, SPage *pPage) { - SPage **ppPage; - uint32_t h; - - h = tdbPCachePageHash(&(pPage->pgid)); - for (ppPage = &(pCache->pgHash[h % pCache->nHash]); (*ppPage) && *ppPage != pPage; ppPage = &((*ppPage)->pHashNext)) - ; + uint32_t h = tdbPCachePageHash(&(pPage->pgid)) % pCache->nHash; + SPage **ppPage = &(pCache->pgHash[h]); + if (*ppPage == pPage) { + pCache->pgHash[h] = pPage->pHashNext; + } else { + for (; (*ppPage) && (*ppPage)->pHashNext != pPage; ppPage = &((*ppPage)->pHashNext)) + ; + if (*ppPage) { + (*ppPage)->pHashNext = pPage->pHashNext; + } + } if (*ppPage) { - *ppPage = pPage->pHashNext; - pCache->nPage--; + pPage->pHashNext = NULL; + --pCache->nPage; // printf("rmv page %d to hash, pgno %d, pPage %p\n", pPage->id, TDB_PAGE_PGNO(pPage), pPage); } - tdbTrace("remove page %d to hash", pPage->id); + tdbDebug("remove page %p/%d from hash", pPage, pPage->id); } static void tdbPCacheAddPageToHash(SPCache *pCache, SPage *pPage) { - int h; - - h = tdbPCachePageHash(&(pPage->pgid)) % pCache->nHash; + uint32_t h = tdbPCachePageHash(&(pPage->pgid)) % pCache->nHash; pPage->pHashNext = pCache->pgHash[h]; pCache->pgHash[h] = pPage; @@ -295,7 +298,7 @@ static void tdbPCacheAddPageToHash(SPCache *pCache, SPage *pPage) { pCache->nPage++; // printf("add page %d to hash, pgno %d, pPage %p\n", pPage->id, TDB_PAGE_PGNO(pPage), pPage); - tdbTrace("add page %d to hash", pPage->id); + tdbDebug("add page %p/%d to hash", pPage, pPage->id); } static int tdbPCacheOpenImpl(SPCache *pCache) { diff --git a/source/libs/tdb/src/db/tdbPage.c b/source/libs/tdb/src/db/tdbPage.c index 276b06b147..9e0cd76573 100644 --- a/source/libs/tdb/src/db/tdbPage.c +++ b/source/libs/tdb/src/db/tdbPage.c @@ -74,6 +74,7 @@ int tdbPageCreate(int pageSize, SPage **ppPage, void *(*xMalloc)(void *, size_t) int tdbPageDestroy(SPage *pPage, void (*xFree)(void *arg, void *ptr), void *arg) { u8 *ptr; + tdbDebug("page/destroy: %p %p", pPage, xFree); ASSERT(xFree); for (int iOvfl = 0; iOvfl < pPage->nOverflow; iOvfl++) { @@ -87,6 +88,7 @@ int tdbPageDestroy(SPage *pPage, void (*xFree)(void *arg, void *ptr), void *arg) } void tdbPageZero(SPage *pPage, u8 szAmHdr, int (*xCellSize)(const SPage *, SCell *, int, TXN *, SBTree *pBt)) { + tdbDebug("page/zero: %p %" PRIu8 " %p", pPage, szAmHdr, xCellSize); pPage->pPageHdr = pPage->pData + szAmHdr; TDB_PAGE_NCELLS_SET(pPage, 0); TDB_PAGE_CCELLS_SET(pPage, pPage->pageSize - sizeof(SPageFtr)); @@ -103,6 +105,7 @@ void tdbPageZero(SPage *pPage, u8 szAmHdr, int (*xCellSize)(const SPage *, SCell } void tdbPageInit(SPage *pPage, u8 szAmHdr, int (*xCellSize)(const SPage *, SCell *, int, TXN *, SBTree *pBt)) { + tdbDebug("page/init: %p %" PRIu8 " %p", pPage, szAmHdr, xCellSize); pPage->pPageHdr = pPage->pData + szAmHdr; pPage->pCellIdx = pPage->pPageHdr + TDB_PAGE_HDR_SIZE(pPage); pPage->pFreeStart = pPage->pCellIdx + TDB_PAGE_OFFSET_SIZE(pPage) * TDB_PAGE_NCELLS(pPage); diff --git a/source/libs/transport/src/thttp.c b/source/libs/transport/src/thttp.c index 7cfb188ac9..98e0b8f9c9 100644 --- a/source/libs/transport/src/thttp.c +++ b/source/libs/transport/src/thttp.c @@ -23,6 +23,7 @@ #define HTTP_RECV_BUF_SIZE 1024 + typedef struct SHttpClient { uv_connect_t conn; uv_tcp_t tcp; @@ -143,9 +144,9 @@ static void clientAllocBuffCb(uv_handle_t *handle, size_t suggested_size, uv_buf static void clientRecvCb(uv_stream_t* handle, ssize_t nread, const uv_buf_t *buf) { SHttpClient* cli = handle->data; if (nread < 0) { - uError("http-report read error:%s", uv_err_name(nread)); + uError("http-report recv error:%s", uv_err_name(nread)); } else { - uInfo("http-report succ to read %d bytes, just ignore it", nread); + uTrace("http-report succ to recv %d bytes, just ignore it", nread); } uv_close((uv_handle_t*)&cli->tcp, clientCloseCb); } @@ -155,7 +156,7 @@ static void clientSentCb(uv_write_t* req, int32_t status) { terrno = TAOS_SYSTEM_ERROR(status); uError("http-report failed to send data %s", uv_strerror(status)); } else { - uInfo("http-report succ to send data"); + uTrace("http-report succ to send data"); } uv_read_start((uv_stream_t *)&cli->tcp, clientAllocBuffCb, clientRecvCb); } diff --git a/source/libs/transport/src/transSvr.c b/source/libs/transport/src/transSvr.c index 447db76136..207b967923 100644 --- a/source/libs/transport/src/transSvr.c +++ b/source/libs/transport/src/transSvr.c @@ -276,14 +276,16 @@ void uvOnRecvCb(uv_stream_t* cli, ssize_t nread, const uv_buf_t* buf) { while (transReadComplete(pBuf)) { tTrace("%s conn %p alread read complete packet", transLabel(pTransInst), conn); if (true == pBuf->invalid || false == uvHandleReq(conn)) { - tError("%s conn %p read invalid packet", transLabel(pTransInst), conn); + tError("%s conn %p read invalid packet, received from %s, local info:%s", transLabel(pTransInst), conn, + conn->dst, conn->src); destroyConn(conn, true); return; } } return; } else { - tError("%s conn %p read invalid packet, exceed limit", transLabel(pTransInst), conn); + tError("%s conn %p read invalid packet, exceed limit, received from %s, local info:", transLabel(pTransInst), + conn, conn->dst, conn->src); destroyConn(conn, true); return; } @@ -649,7 +651,7 @@ void uvOnAcceptCb(uv_stream_t* stream, int status) { pObj->workerIdx = (pObj->workerIdx + 1) % pObj->numOfThreads; - tTrace("new conntion accepted by main server, dispatch to %dth worker-thread", pObj->workerIdx); + tTrace("new connection accepted by main server, dispatch to %dth worker-thread", pObj->workerIdx); uv_write2(wr, (uv_stream_t*)&(pObj->pipe[pObj->workerIdx][0]), &buf, 1, (uv_stream_t*)cli, uvOnPipeWriteCb); } else { diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c index a8da680910..93ced912f8 100644 --- a/source/libs/wal/src/walMeta.c +++ b/source/libs/wal/src/walMeta.c @@ -121,7 +121,7 @@ static FORCE_INLINE int64_t walScanLogGetLastVer(SWal* pWal) { if (found == NULL) { // file corrupted, no complete log // TODO delete and search in previous files - ASSERT(0); + /*ASSERT(0);*/ terrno = TSDB_CODE_WAL_FILE_CORRUPTED; return -1; } @@ -221,7 +221,6 @@ int walCheckAndRepairMeta(SWal* pWal) { int code = walSaveMeta(pWal); if (code < 0) { - taosArrayDestroy(actualLog); return -1; } } diff --git a/source/os/src/osFile.c b/source/os/src/osFile.c index 6c8e949b25..f9797f6319 100644 --- a/source/os/src/osFile.c +++ b/source/os/src/osFile.c @@ -203,10 +203,11 @@ int32_t taosRenameFile(const char *oldName, const char *newName) { } int32_t taosStatFile(const char *path, int64_t *size, int32_t *mtime) { - struct stat fileStat; #ifdef WINDOWS - int32_t code = _stat(path, &fileStat); + struct _stati64 fileStat; + int32_t code = _stati64(path, &fileStat); #else + struct stat fileStat; int32_t code = stat(path, &fileStat); #endif if (code < 0) { @@ -440,10 +441,10 @@ int64_t taosPReadFile(TdFilePtr pFile, void *buf, int64_t count, int64_t offset) #endif assert(pFile->fd >= 0); // Please check if you have closed the file. #ifdef WINDOWS - size_t pos = _lseek(pFile->fd, 0, SEEK_CUR); - _lseek(pFile->fd, offset, SEEK_SET); + size_t pos = _lseeki64(pFile->fd, 0, SEEK_CUR); + _lseeki64(pFile->fd, offset, SEEK_SET); int64_t ret = _read(pFile->fd, buf, count); - _lseek(pFile->fd, pos, SEEK_SET); + _lseeki64(pFile->fd, pos, SEEK_SET); #else int64_t ret = pread(pFile->fd, buf, count, offset); #endif @@ -493,7 +494,7 @@ int64_t taosLSeekFile(TdFilePtr pFile, int64_t offset, int32_t whence) { #endif assert(pFile->fd >= 0); // Please check if you have closed the file. #ifdef WINDOWS - int64_t ret = _lseek(pFile->fd, offset, whence); + int64_t ret = _lseeki64(pFile->fd, offset, whence); #else int64_t ret = lseek(pFile->fd, offset, whence); #endif @@ -637,7 +638,7 @@ int64_t taosFSendFile(TdFilePtr pFileOut, TdFilePtr pFileIn, int64_t *offset, in #ifdef WINDOWS - _lseek(pFileIn->fd, (int32_t)(*offset), 0); + _lseeki64(pFileIn->fd, *offset, 0); int64_t writeLen = 0; uint8_t buffer[_SEND_FILE_STEP_] = {0}; diff --git a/source/os/src/osSysinfo.c b/source/os/src/osSysinfo.c index 3a75e18a7f..3aa3f4f29e 100644 --- a/source/os/src/osSysinfo.c +++ b/source/os/src/osSysinfo.c @@ -851,13 +851,12 @@ char *taosGetCmdlineByPID(int pid) { } void taosSetCoreDump(bool enable) { + if (!enable) return; #ifdef WINDOWS - // SetUnhandledExceptionFilter(exceptionHandler); - // SetUnhandledExceptionFilter(&FlCrashDump); + SetUnhandledExceptionFilter(exceptionHandler); + SetUnhandledExceptionFilter(&FlCrashDump); #elif defined(_TD_DARWIN_64) #else - if (!enable) return; - // 1. set ulimit -c unlimited struct rlimit rlim; struct rlimit rlim_new; diff --git a/source/util/src/tcache.c b/source/util/src/tcache.c index dd61f7d225..f9f42aa103 100644 --- a/source/util/src/tcache.c +++ b/source/util/src/tcache.c @@ -702,7 +702,7 @@ void taosCacheCleanup(SCacheObj *pCacheObj) { taosMsleep(50); } - uInfo("cache:%s will be cleaned up", pCacheObj->name); + uTrace("cache:%s will be cleaned up", pCacheObj->name); doCleanupDataCache(pCacheObj); } diff --git a/source/util/src/tcompression.c b/source/util/src/tcompression.c index e8f1f06ef1..ba877915b1 100644 --- a/source/util/src/tcompression.c +++ b/source/util/src/tcompression.c @@ -83,8 +83,8 @@ int32_t tsCompressInit() { if (lossyFloat == false && lossyDouble == false) return 0; tdszInit(fPrecision, dPrecision, maxRange, curRange, Compressor); - if (lossyFloat) uInfo("lossy compression float is opened. "); - if (lossyDouble) uInfo("lossy compression double is opened. "); + if (lossyFloat) uTrace("lossy compression float is opened. "); + if (lossyDouble) uTrace("lossy compression double is opened. "); return 1; } // exit call diff --git a/source/util/src/tqueue.c b/source/util/src/tqueue.c index 50beba8a9b..eb70002680 100644 --- a/source/util/src/tqueue.c +++ b/source/util/src/tqueue.c @@ -298,7 +298,8 @@ int32_t taosGetQitem(STaosQall *qall, void **ppItem) { return num; } -void taosResetQitems(STaosQall *qall) { qall->current = qall->start; } +void taosResetQitems(STaosQall *qall) { qall->current = qall->start; } +int32_t taosQallItemSize(STaosQall *qall) { return qall->numOfItems; } STaosQset *taosOpenQset() { STaosQset *qset = taosMemoryCalloc(sizeof(STaosQset), 1); From dfe7a5e007cbf548a6c809c0474cdcf460798b92 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 25 Aug 2022 11:02:04 +0800 Subject: [PATCH 53/79] other:merge 3.0 --- CONTRIBUTING.md | 63 +++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 56 insertions(+), 7 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3b1a66839d..5be84bec34 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,15 +1,64 @@ # Contributing -We appreciate contributions from all developers. Feel free to follow us, fork the repository, report bugs and even submit your code on GitHub. However, we would like developers to follow our guides to contribute for better corporation. +We appreciate contributions from all developers. Feel free to follow us, fork the repository, report bugs, and even submit your code on GitHub. However, we would like developers to follow the guidelines in this document to ensure effective cooperation. -## Report bugs +## Reporting a bug -Any users can report bugs to us through the [github issue tracker](https://github.com/taosdata/TDengine/issues). We appreciate a detailed description of the problem you met. It is better to provide the detailed steps on reproducing the bug. Otherwise, an appendix with log files generated by the bug is welcome. +- Any users can report bugs to us through the **[GitHub issue tracker](https://github.com/taosdata/TDengine/issues)**. We would appreciate if you could provide **a detailed description** of the problem you encountered, including steps to reproduce it. -## Read the contributor license agreement +- Attaching log files caused by the bug is really appreciated. -It is required to agree the Contributor Licence Agreement(CLA) before a user submitting his/her code patch. Follow the [TaosData CLA](https://www.taosdata.com/en/contributor/) link to read through the agreement. +## Guidelines for committing code -## Submit your code +- You must agree to the **Contributor License Agreement(CLA) before submitting your code patch**. Follow the **[TAOSData CLA](https://cla-assistant.io/taosdata/TDengine)** link to read through and sign the agreement. If you do not accept the agreement, your contributions cannot be accepted. -Before submitting your code, make sure to [read the contributor license agreement](#read-the-contributor-license-agreement) beforehand. If you don't accept the aggreement, please stop submitting. Your submission means you have accepted the agreement. Your submission should solve an issue or add a feature registered in the [github issue tracker](https://github.com/taosdata/TDengine/issues). If no corresponding issue or feature is found in the issue tracker, please create one. When submitting your code to our repository, please create a pull request with the issue number included. +- Please solve an issue or add a feature registered in the **[GitHub issue tracker](https://github.com/taosdata/TDengine/issues)**. +- If no corresponding issue or feature is found in the issue tracker, please **create one**. +- When submitting your code to our repository, please create a pull request with the **issue number** included. + +## Guidelines for communicating + +1. Please be **nice and polite** in the description. +2. **Active voice is better than passive voice in general**. Sentences in the active voice will highlight who is performing the action rather than the recipient of the action highlighted by the passive voice. +3. Documentation writing advice + +- Spell the product name "TDengine" correctly. "TD" is written in capital letters, and there is no space between "TD" and "engine" (**Correct spelling: TDengine**). +- Please **capitalize the first letter** of every sentence. +- Leave **only one space** after periods or other punctuation marks. +- Use **American spelling**. +- When possible, **use second person** rather than first person (e.g.“You are recommended to use a reverse proxy such as Nginx.” rather than “We recommend to use a reverse proxy such as Nginx.”). + +5. Use **simple sentences**, rather than complex sentences. + +## Gifts for the contributors + +Developers, as long as you contribute to TDengine, whether it's code contributions to fix bugs or feature requests, or documentation changes, **you are eligible for a very special Contributor Souvenir Gift!** + +**You can choose one of the following gifts:** + +

+ + + + +The TDengine community is committed to making TDengine accepted and used by more developers. + +Just fill out the **Contributor Submission Form** to choose your desired gift. + +- [Contributor Submission Form](https://page.ma.scrmtech.com/form/index?pf_uid=27715_2095&id=12100) + +## Contact us + +If you have any problems or questions that need help from us, please feel free to add our WeChat account: TDengineECO. From 374968d0e2d56301ab4dedd5e4262a9b48809482 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 25 Aug 2022 11:07:03 +0800 Subject: [PATCH 54/79] other:merge 3.0 --- docs/zh/12-taos-sql/14-stream.md | 135 +++++++++++++++++++++++++++++++ 1 file changed, 135 insertions(+) create mode 100644 docs/zh/12-taos-sql/14-stream.md diff --git a/docs/zh/12-taos-sql/14-stream.md b/docs/zh/12-taos-sql/14-stream.md new file mode 100644 index 0000000000..28f52be59a --- /dev/null +++ b/docs/zh/12-taos-sql/14-stream.md @@ -0,0 +1,135 @@ +--- +sidebar_label: 流式计算 +title: 流式计算 +--- + + +## 创建流式计算 + +```sql +CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name AS subquery +stream_options: { + TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time] + WATERMARK time +} + +``` + +其中 subquery 是 select 普通查询语法的子集: + +```sql +subquery: SELECT select_list + from_clause + [WHERE condition] + [PARTITION BY tag_list] + [window_clause] +``` + +支持会话窗口、状态窗口与滑动窗口,其中,会话窗口与状态窗口搭配超级表时必须与partition by tbname一起使用 + +```sql +window_clause: { + SESSION(ts_col, tol_val) + | STATE_WINDOW(col) + | INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] +} +``` + +其中,SESSION 是会话窗口,tol_val 是时间间隔的最大范围。在 tol_val 时间间隔范围内的数据都属于同一个窗口,如果连续的两条数据的时间超过 tol_val,则自动开启下一个窗口。 + +窗口的定义与时序数据特色查询中的定义完全相同,详见 [TDengine 特色查询](../distinguished) + +例如,如下语句创建流式计算,同时自动创建名为 avg_vol 的超级表,此流计算以一分钟为时间窗口、30 秒为前向增量统计这些电表的平均电压,并将来自 meters 表的数据的计算结果写入 avg_vol 表,不同 partition 的数据会分别创建子表并写入不同子表。 + +```sql +CREATE STREAM avg_vol_s INTO avg_vol AS +SELECT _wstartts, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVAL(1m) SLIDING(30s); +``` + +## 流式计算的 partition + +可以使用 PARTITION BY TBNAME 或 PARTITION BY tag,对一个流进行多分区的计算,每个分区的时间线与时间窗口是独立的,会各自聚合,并写入到目的表中的不同子表。 + +不带 PARTITION BY 选项时,所有的数据将写入到一张子表。 + +流式计算创建的超级表有唯一的 tag 列 groupId,每个 partition 会被分配唯一 groupId。与 schemaless 写入一致,我们通过 MD5 计算子表名,并自动创建它。 + +## 删除流式计算 + +```sql +DROP STREAM [IF NOT EXISTS] stream_name; +``` + +仅删除流式计算任务,由流式计算写入的数据不会被删除。 + +## 展示流式计算 + +```sql +SHOW STREAMS; +``` + +若要展示更详细的信息,可以使用: + +```sql +SELECT * from performance_schema.`perf_streams`; +``` + +## 流式计算的触发模式 + +在创建流时,可以通过 TRIGGER 指令指定流式计算的触发模式。 + +对于非窗口计算,流式计算的触发是实时的;对于窗口计算,目前提供 3 种触发模式: + +1. AT_ONCE:写入立即触发 + +2. WINDOW_CLOSE:窗口关闭时触发(窗口关闭由事件时间决定,可配合 watermark 使用) + +3. MAX_DELAY time:若窗口关闭,则触发计算。若窗口未关闭,且未关闭时长超过 max delay 指定的时间,则触发计算。 + +由于窗口关闭是由事件时间决定的,如事件流中断、或持续延迟,则事件时间无法更新,可能导致无法得到最新的计算结果。 + +因此,流式计算提供了以事件时间结合处理时间计算的 MAX_DELAY 触发模式。 + +MAX_DELAY 模式在窗口关闭时会立即触发计算。此外,当数据写入后,计算触发的时间超过 max delay 指定的时间,则立即触发计算 + +## 流式计算的窗口关闭 + +流式计算以事件时间(插入记录中的时间戳主键)为基准计算窗口关闭,而非以 TDengine 服务器的时间,以事件时间为基准,可以避免客户端与服务器时间不一致带来的问题,能够解决乱序数据写入等等问题。流式计算还提供了 watermark 来定义容忍的乱序程度。 + +在创建流时,可以在 stream_option 中指定 watermark,它定义了数据乱序的容忍上界。 + +流式计算通过 watermark 来度量对乱序数据的容忍程度,watermark 默认为 0。 + +T = 最新事件时间 - watermark + +每次写入的数据都会以上述公式更新窗口关闭时间,并将窗口结束时间 < T 的所有打开的窗口关闭,若触发模式为 WINDOW_CLOSE 或 MAX_DELAY,则推送窗口聚合结果。 + + +![TDengine 流式计算窗口关闭示意图](./watermark.webp) + + +图中,纵轴表示不同时刻,对于不同时刻,我们画出其对应的 TDengine 收到的数据,即为横轴。 + +横轴上的数据点表示已经收到的数据,其中蓝色的点表示事件时间(即数据中的时间戳主键)最后的数据,该数据点减去定义的 watermark 时间,得到乱序容忍的上界 T。 + +所有结束时间小于 T 的窗口都将被关闭(图中以灰色方框标记)。 + +T2 时刻,乱序数据(黄色的点)到达 TDengine,由于有 watermark 的存在,这些数据进入的窗口并未被关闭,因此可以被正确处理。 + +T3 时刻,最新事件到达,T 向后推移超过了第二个窗口关闭的时间,该窗口被关闭,乱序数据被正确处理。 + +在 window_close 或 max_delay 模式下,窗口关闭直接影响推送结果。在 at_once 模式下,窗口关闭只与内存占用有关。 + + +## 流式计算的过期数据处理策略 + +对于已关闭的窗口,再次落入该窗口中的数据被标记为过期数据. + +TDengine 对于过期数据提供两种处理方式,由 IGNORE EXPIRED 选项指定: + +1. 重新计算,即 IGNORE EXPIRED 0:默认配置,从 TSDB 中重新查找对应窗口的所有数据并重新计算得到最新结果 + +2. 直接丢弃, 即 IGNORE EXPIRED 1:忽略过期数据 + + +无论在哪种模式下,watermark 都应该被妥善设置,来得到正确结果(直接丢弃模式)或避免频繁触发重算带来的性能开销(重新计算模式)。 From 487a1f26aed2581ba7c82c43d9ab5916a1a456e9 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Thu, 25 Aug 2022 10:56:29 +0800 Subject: [PATCH 55/79] add test cases --- tests/system-test/2-query/interp.py | 67 ++++++++++++++++++----------- 1 file changed, 42 insertions(+), 25 deletions(-) diff --git a/tests/system-test/2-query/interp.py b/tests/system-test/2-query/interp.py index 0fe86e44eb..5550519e05 100644 --- a/tests/system-test/2-query/interp.py +++ b/tests/system-test/2-query/interp.py @@ -552,37 +552,54 @@ class TDTestCase: tdSql.checkData(1, 0, 15) tdLog.printNoPrefix("==========step9:test multi-interp cases") - tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-12 00:00:05') every(1d) fill(null)") + tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-09 00:00:05', '2020-02-13 00:00:05') every(1d) fill(null)") + tdSql.checkRows(5) + tdSql.checkCols(4) + + for i in range (tdSql.queryCols): + tdSql.checkData(0, i, None) + tdSql.checkData(1, i, None) + tdSql.checkData(2, i, 15) + tdSql.checkData(3, i, None) + tdSql.checkData(4, i, None) + + tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-09 00:00:05', '2020-02-13 00:00:05') every(1d) fill(value, 1)") + tdSql.checkRows(5) + tdSql.checkCols(4) + + for i in range (tdSql.queryCols): + tdSql.checkData(0, i, 1) + tdSql.checkData(1, i, 1) + tdSql.checkData(2, i, 15) + tdSql.checkData(3, i, 1) + tdSql.checkData(4, i, 1) + + tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-09 00:00:05', '2020-02-13 00:00:05') every(1d) fill(prev)") + tdSql.checkRows(5) + tdSql.checkCols(4) + + for i in range (tdSql.queryCols): + tdSql.checkData(0, i, 5) + tdSql.checkData(1, i, 5) + tdSql.checkData(2, i, 15) + tdSql.checkData(3, i, 15) + tdSql.checkData(4, i, 15) + + tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-09 00:00:05', '2020-02-13 00:00:05') every(1d) fill(next)") tdSql.checkRows(3) tdSql.checkCols(4) - tdSql.checkData(0, 0, None) - tdSql.checkData(1, 0, 15) - tdSql.checkData(2, 0, None) - tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-12 00:00:05') every(1d) fill(value, 1)") - tdSql.checkRows(3) - tdSql.checkCols(4) - tdSql.checkData(0, 0, 1) - tdSql.checkData(1, 0, 15) - tdSql.checkData(2, 0, 1) + for i in range (tdSql.queryCols): + tdSql.checkData(0, i, 15) + tdSql.checkData(1, i, 15) + tdSql.checkData(2, i, 15) - tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-12 00:00:05') every(1d) fill(prev)") - tdSql.checkRows(3) - tdSql.checkCols(4) - tdSql.checkData(0, 0, 5) - tdSql.checkData(1, 0, 15) - tdSql.checkData(2, 0, 15) - - tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-12 00:00:05') every(1d) fill(next)") - tdSql.checkRows(2) - tdSql.checkCols(4) - tdSql.checkData(0, 0, 15) - tdSql.checkData(1, 0, 15) - - tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-12 00:00:05') every(1d) fill(linear)") + tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-09 00:00:05', '2020-02-13 00:00:05') every(1d) fill(linear)") tdSql.checkRows(1) tdSql.checkCols(4) - tdSql.checkData(0, 0, 15) + + for i in range (tdSql.queryCols): + tdSql.checkData(0, i, 15) tdLog.printNoPrefix("==========step10:test error cases") From 7f3146203b52107ff263ee40a5247e2f0b0bdbfd Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Thu, 25 Aug 2022 11:08:14 +0800 Subject: [PATCH 56/79] Merge branch '3.0' into fix/TD-18617 --- tests/script/tsim/parser/alter1.sim | 2 +- tests/script/tsim/parser/binary_escapeCharacter.sim | 2 +- tests/script/tsim/parser/col_arithmetic_operation.sim | 2 +- tests/script/tsim/parser/columnValue_unsign.sim | 2 +- tests/script/tsim/parser/fill_stb.sim | 2 +- tests/script/tsim/parser/import_file.sim | 2 +- tests/script/tsim/parser/repeatAlter.sim | 2 +- tests/script/tsim/parser/select_from_cache_disk.sim | 2 +- tests/script/tsim/parser/single_row_in_tb.sim | 2 +- tests/script/tsim/parser/single_row_in_tb_query.sim | 2 +- 10 files changed, 10 insertions(+), 10 deletions(-) diff --git a/tests/script/tsim/parser/alter1.sim b/tests/script/tsim/parser/alter1.sim index 9d0049e45e..369419dcd9 100644 --- a/tests/script/tsim/parser/alter1.sim +++ b/tests/script/tsim/parser/alter1.sim @@ -130,4 +130,4 @@ endi # return -1 #endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/parser/binary_escapeCharacter.sim b/tests/script/tsim/parser/binary_escapeCharacter.sim index 0b437d8b04..5a9c0e7bb1 100644 --- a/tests/script/tsim/parser/binary_escapeCharacter.sim +++ b/tests/script/tsim/parser/binary_escapeCharacter.sim @@ -101,4 +101,4 @@ sql_error insert into tb values(now, '\'); #sql_error insert into tb values(now, '\\\n'); sql insert into tb values(now, '\n'); -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/parser/col_arithmetic_operation.sim b/tests/script/tsim/parser/col_arithmetic_operation.sim index f22beefdf8..9a2ba34c85 100644 --- a/tests/script/tsim/parser/col_arithmetic_operation.sim +++ b/tests/script/tsim/parser/col_arithmetic_operation.sim @@ -132,4 +132,4 @@ sql_error select max(c1-c2) from $tb print =====================> td-1764 sql select sum(c1)/count(*), sum(c1) as b, count(*) as b from $stb interval(1y) -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/parser/columnValue_unsign.sim b/tests/script/tsim/parser/columnValue_unsign.sim index 85ff490bf4..7ae1b20eca 100644 --- a/tests/script/tsim/parser/columnValue_unsign.sim +++ b/tests/script/tsim/parser/columnValue_unsign.sim @@ -129,4 +129,4 @@ if $rows != 1 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/parser/fill_stb.sim b/tests/script/tsim/parser/fill_stb.sim index 656b1ac94e..6c61631aa8 100644 --- a/tests/script/tsim/parser/fill_stb.sim +++ b/tests/script/tsim/parser/fill_stb.sim @@ -279,7 +279,7 @@ endi #endi ## linear fill -sql select _wstart, max(c1), min(c2), avg(c3), sum(c4), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu partition by t1 interval(5m) fill(linear) +sql select _wstart, max(c1), min(c2), avg(c3), sum(c4), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu partition by t1 interval(5m) fill(linear) $val = $rowNum * 2 $val = $val - 1 $val = $val * $tbNum diff --git a/tests/script/tsim/parser/import_file.sim b/tests/script/tsim/parser/import_file.sim index e031e0249d..37dc0c4476 100644 --- a/tests/script/tsim/parser/import_file.sim +++ b/tests/script/tsim/parser/import_file.sim @@ -69,4 +69,4 @@ endi system rm -f $inFileName -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/parser/repeatAlter.sim b/tests/script/tsim/parser/repeatAlter.sim index d28a03e193..b4012048cc 100644 --- a/tests/script/tsim/parser/repeatAlter.sim +++ b/tests/script/tsim/parser/repeatAlter.sim @@ -6,4 +6,4 @@ while $i <= $loops $i = $i + 1 endw -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/parser/select_from_cache_disk.sim b/tests/script/tsim/parser/select_from_cache_disk.sim index 0983e36a3a..3c0b13c638 100644 --- a/tests/script/tsim/parser/select_from_cache_disk.sim +++ b/tests/script/tsim/parser/select_from_cache_disk.sim @@ -60,4 +60,4 @@ if $data12 != 1 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/parser/single_row_in_tb.sim b/tests/script/tsim/parser/single_row_in_tb.sim index 1bd53ad24e..e7b4c9a871 100644 --- a/tests/script/tsim/parser/single_row_in_tb.sim +++ b/tests/script/tsim/parser/single_row_in_tb.sim @@ -33,4 +33,4 @@ print ================== server restart completed run tsim/parser/single_row_in_tb_query.sim -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/parser/single_row_in_tb_query.sim b/tests/script/tsim/parser/single_row_in_tb_query.sim index 422756b798..37e193f9d2 100644 --- a/tests/script/tsim/parser/single_row_in_tb_query.sim +++ b/tests/script/tsim/parser/single_row_in_tb_query.sim @@ -195,4 +195,4 @@ endi print ===============>safty check TD-4927 sql select first(ts, c1) from sr_stb where ts<1 group by t1; -sql select first(ts, c1) from sr_stb where ts>0 and ts<1; \ No newline at end of file +sql select first(ts, c1) from sr_stb where ts>0 and ts<1; From 167673e5091d510898f47ec9d94ead98b3321515 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Thu, 25 Aug 2022 11:08:14 +0800 Subject: [PATCH 57/79] Merge branch '3.0' into fix/TD-18617 --- tests/script/tsim/query/complex_group.sim | 2 +- tests/script/tsim/query/complex_having.sim | 2 +- tests/script/tsim/query/complex_limit.sim | 2 +- tests/script/tsim/query/complex_select.sim | 2 +- tests/script/tsim/query/complex_where.sim | 2 +- tests/script/tsim/query/crash_sql.sim | 2 +- tests/script/tsim/query/diff.sim | 18 +++++++++--------- tests/script/tsim/query/explain.sim | 16 ++++++++-------- tests/script/tsim/query/interval.sim | 2 +- tests/script/tsim/query/scalarFunction.sim | 6 +++--- tests/script/tsim/query/scalarNull.sim | 2 +- tests/script/tsim/query/session.sim | 12 ++++++------ tests/script/tsim/query/stddev.sim | 2 +- tests/script/tsim/query/time_process.sim | 2 +- tests/script/tsim/query/udf.sim | 2 +- 15 files changed, 37 insertions(+), 37 deletions(-) diff --git a/tests/script/tsim/query/complex_group.sim b/tests/script/tsim/query/complex_group.sim index 3dad8059cd..d7d14c0ee8 100644 --- a/tests/script/tsim/query/complex_group.sim +++ b/tests/script/tsim/query/complex_group.sim @@ -454,4 +454,4 @@ if $rows != 1 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/query/complex_having.sim b/tests/script/tsim/query/complex_having.sim index 9e28c3803e..4c0af6d10c 100644 --- a/tests/script/tsim/query/complex_having.sim +++ b/tests/script/tsim/query/complex_having.sim @@ -365,4 +365,4 @@ if $rows != 1 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/query/complex_limit.sim b/tests/script/tsim/query/complex_limit.sim index 2a90e7ff1d..acb133f650 100644 --- a/tests/script/tsim/query/complex_limit.sim +++ b/tests/script/tsim/query/complex_limit.sim @@ -508,4 +508,4 @@ if $rows != 1 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/query/complex_select.sim b/tests/script/tsim/query/complex_select.sim index f4c9877bfd..b7697e5cab 100644 --- a/tests/script/tsim/query/complex_select.sim +++ b/tests/script/tsim/query/complex_select.sim @@ -558,4 +558,4 @@ if $data00 != 33 then endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/query/complex_where.sim b/tests/script/tsim/query/complex_where.sim index bda1c036f0..847f67ed34 100644 --- a/tests/script/tsim/query/complex_where.sim +++ b/tests/script/tsim/query/complex_where.sim @@ -669,4 +669,4 @@ if $rows != 1 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/query/crash_sql.sim b/tests/script/tsim/query/crash_sql.sim index 1d20491869..169f2e7272 100644 --- a/tests/script/tsim/query/crash_sql.sim +++ b/tests/script/tsim/query/crash_sql.sim @@ -79,4 +79,4 @@ print ================ start query ====================== print ================ SQL used to cause taosd or taos shell crash sql_error select sum(c1) ,count(c1) from ct4 group by c1 having sum(c10) between 0 and 1 ; -#system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +#system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/query/diff.sim b/tests/script/tsim/query/diff.sim index f0d82b01e9..badd139a9f 100644 --- a/tests/script/tsim/query/diff.sim +++ b/tests/script/tsim/query/diff.sim @@ -25,17 +25,17 @@ $i = 0 while $i < $tbNum $tb = $tbPrefix . $i sql create table $tb using $mt tags( $i ) - + $x = 0 while $x < $rowNum $cc = $x * 60000 $ms = 1601481600000 + $cc - sql insert into $tb values ($ms , $x ) + sql insert into $tb values ($ms , $x ) $x = $x + 1 - endw - + endw + $i = $i + 1 -endw +endw sleep 100 @@ -61,7 +61,7 @@ sql select _rowts, diff(tbcol) from $tb where ts > $ms print ===> rows: $rows print ===> $data00 $data01 $data02 $data03 $data04 $data05 print ===> $data10 $data11 $data12 $data13 $data14 $data15 -if $data11 != 1 then +if $data11 != 1 then return -1 endi @@ -72,7 +72,7 @@ sql select _rowts, diff(tbcol) from $tb where ts <= $ms print ===> rows: $rows print ===> $data00 $data01 $data02 $data03 $data04 $data05 print ===> $data10 $data11 $data12 $data13 $data14 $data15 -if $data11 != 1 then +if $data11 != 1 then return -1 endi @@ -82,7 +82,7 @@ sql select _rowts, diff(tbcol) as b from $tb print ===> rows: $rows print ===> $data00 $data01 $data02 $data03 $data04 $data05 print ===> $data10 $data11 $data12 $data13 $data14 $data15 -if $data11 != 1 then +if $data11 != 1 then return -1 endi @@ -107,4 +107,4 @@ if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/query/explain.sim b/tests/script/tsim/query/explain.sim index 30a857815c..2871252d91 100644 --- a/tests/script/tsim/query/explain.sim +++ b/tests/script/tsim/query/explain.sim @@ -3,7 +3,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/exec.sh -n dnode1 -s start sql connect -print ======== step1 +print ======== step1 sql create database db1 vgroups 3; sql use db1; sql select * from information_schema.ins_databases; @@ -30,7 +30,7 @@ sql insert into tb4 values (now, 4, "Bitmap Heap Scan on tenk1 t1 (cost=5.07..2 #sql insert into tb4 values (now, 4, "Bitmap Heap Scan on tenk1 t1 (cost=5.07..229.20 rows=101 width=244) (actual time=0.080..0.526 rows=100 loops=1)"); -print ======== step2 +print ======== step2 sql explain select * from st1 where -2; sql explain select ts from tb1; sql explain select * from st1; @@ -41,14 +41,14 @@ sql explain select count(*),sum(f1) from st1; sql explain select count(*),sum(f1) from st1 group by f1; #sql explain select count(f1) from tb1 interval(10s, 2s) sliding(3s) fill(prev); -print ======== step3 +print ======== step3 sql explain verbose true select * from st1 where -2; sql explain verbose true select ts from tb1 where f1 > 0; sql explain verbose true select * from st1 where f1 > 0 and ts > '2020-10-31 00:00:00' and ts < '2021-10-31 00:00:00'; sql explain verbose true select count(*) from st1 partition by tbname slimit 1 soffset 2 limit 2 offset 1; sql explain verbose true select * from information_schema.ins_stables where db_name='db2'; -print ======== step4 +print ======== step4 sql explain analyze select ts from st1 where -2; sql explain analyze select ts from tb1; sql explain analyze select ts from st1; @@ -59,7 +59,7 @@ sql explain analyze select count(*),sum(f1) from tb1; sql explain analyze select count(*),sum(f1) from st1; sql explain analyze select count(*),sum(f1) from st1 group by f1; -print ======== step5 +print ======== step5 sql explain analyze verbose true select ts from st1 where -2; sql explain analyze verbose true select ts from tb1; sql explain analyze verbose true select ts from st1; @@ -87,12 +87,12 @@ sql explain analyze verbose true select count(f1) from st1 group by tbname; #sql explain select * from tb1, tb2 where tb1.ts=tb2.ts; #sql explain select * from st1, st2 where tb1.ts=tb2.ts; #sql explain analyze verbose true select sum(a+b) from (select _rowts, min(f1) b,count(*) a from st1 where f1 > 0 interval(1a)) where a < 0 interval(1s); -#sql explain select min(f1) from st1 interval(1m, 2a) sliding(30s); +#sql explain select min(f1) from st1 interval(1m, 2a) sliding(30s); #sql explain verbose true select count(*),sum(f1) from st1 where f1 > 0 and ts > '2021-10-31 00:00:00' group by f1 having sum(f1) > 0; -#sql explain analyze select min(f1) from st1 interval(3m, 2a) sliding(1m); +#sql explain analyze select min(f1) from st1 interval(3m, 2a) sliding(1m); #sql explain analyze select count(f1) from tb1 interval(10s, 2s) sliding(3s) fill(prev); #sql explain analyze verbose true select count(*),sum(f1) from st1 where f1 > 0 and ts > '2021-10-31 00:00:00' group by f1 having sum(f1) > 0; -#sql explain analyze verbose true select min(f1) from st1 interval(3m, 2a) sliding(1m); +#sql explain analyze verbose true select min(f1) from st1 interval(3m, 2a) sliding(1m); system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/query/interval.sim b/tests/script/tsim/query/interval.sim index cc8a73daec..833da4a8ba 100644 --- a/tests/script/tsim/query/interval.sim +++ b/tests/script/tsim/query/interval.sim @@ -177,4 +177,4 @@ print =============== clear # return -1 #endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/query/scalarFunction.sim b/tests/script/tsim/query/scalarFunction.sim index 103e66e54e..1b8115fec6 100644 --- a/tests/script/tsim/query/scalarFunction.sim +++ b/tests/script/tsim/query/scalarFunction.sim @@ -33,7 +33,7 @@ print =============== create normal table sql create table ntb (ts timestamp, c1 int, c2 float, c3 double) sql show tables -if $rows != 101 then +if $rows != 101 then return -1 endi @@ -444,7 +444,7 @@ if $loop_test == 0 then print =============== stop and restart taosd system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode1 -s start - + $loop_cnt = 0 check_dnode_ready_0: $loop_cnt = $loop_cnt + 1 @@ -462,7 +462,7 @@ if $loop_test == 0 then goto check_dnode_ready_0 endi - $loop_test = 1 + $loop_test = 1 goto loop_test_pos endi diff --git a/tests/script/tsim/query/scalarNull.sim b/tests/script/tsim/query/scalarNull.sim index ec95c94f23..6abe3d62d9 100644 --- a/tests/script/tsim/query/scalarNull.sim +++ b/tests/script/tsim/query/scalarNull.sim @@ -3,7 +3,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/exec.sh -n dnode1 -s start sql connect -print ======== step1 +print ======== step1 sql create database db1 vgroups 3; sql use db1; sql select * from information_schema.ins_databases; diff --git a/tests/script/tsim/query/session.sim b/tests/script/tsim/query/session.sim index 158448d765..b6eb4ed3aa 100644 --- a/tests/script/tsim/query/session.sim +++ b/tests/script/tsim/query/session.sim @@ -35,8 +35,8 @@ sql INSERT INTO dev_001 VALUES('2020-05-13 13:00:00.001', 12) sql INSERT INTO dev_001 VALUES('2020-05-14 13:00:00.001', 13) sql INSERT INTO dev_001 VALUES('2020-05-15 14:00:00.000', 14) sql INSERT INTO dev_001 VALUES('2020-05-20 10:00:00.000', 15) -sql INSERT INTO dev_001 VALUES('2020-05-27 10:00:00.001', 16) - +sql INSERT INTO dev_001 VALUES('2020-05-27 10:00:00.001', 16) + sql INSERT INTO dev_002 VALUES('2020-05-13 10:00:00.000', 1) sql INSERT INTO dev_002 VALUES('2020-05-13 10:00:00.005', 2) sql INSERT INTO dev_002 VALUES('2020-05-13 10:00:00.009', 3) @@ -46,7 +46,7 @@ sql INSERT INTO dev_002 VALUES('2020-05-13 10:00:00.036', 6) sql INSERT INTO dev_002 VALUES('2020-05-13 10:00:00.51', 7) # vnode does not return the precision of the table -print ====> create database d1 precision 'us' +print ====> create database d1 precision 'us' sql create database d1 precision 'us' sql use d1 sql create table dev_001 (ts timestamp ,i timestamp ,j int) @@ -54,7 +54,7 @@ sql insert into dev_001 values(1623046993681000,now,1)(1623046993681001,now+1s,2 sql create table secondts(ts timestamp,t2 timestamp,i int) sql insert into secondts values(1623046993681000,now,1)(1623046993681001,now+1s,2)(1623046993681002,now+2s,3)(1623046993681004,now+5s,4) -$loop_test = 0 +$loop_test = 0 loop_test_pos: sql use $dbNamme @@ -299,7 +299,7 @@ if $loop_test == 0 then print =============== stop and restart taosd system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode1 -s start - + $loop_cnt = 0 check_dnode_ready_0: $loop_cnt = $loop_cnt + 1 @@ -317,7 +317,7 @@ if $loop_test == 0 then goto check_dnode_ready_0 endi - $loop_test = 1 + $loop_test = 1 goto loop_test_pos endi diff --git a/tests/script/tsim/query/stddev.sim b/tests/script/tsim/query/stddev.sim index d61c7273e1..b45c7d80a3 100644 --- a/tests/script/tsim/query/stddev.sim +++ b/tests/script/tsim/query/stddev.sim @@ -409,4 +409,4 @@ if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/query/time_process.sim b/tests/script/tsim/query/time_process.sim index b3c0e9561f..83a6445846 100644 --- a/tests/script/tsim/query/time_process.sim +++ b/tests/script/tsim/query/time_process.sim @@ -111,4 +111,4 @@ if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/query/udf.sim b/tests/script/tsim/query/udf.sim index 7cc1403bcb..7f8b1044ef 100644 --- a/tests/script/tsim/query/udf.sim +++ b/tests/script/tsim/query/udf.sim @@ -9,7 +9,7 @@ system sh/cfg.sh -n dnode1 -c udf -v 1 system sh/exec.sh -n dnode1 -s start sql connect -print ======== step1 udf +print ======== step1 udf system sh/compile_udf.sh sql create database udf vgroups 3; sql use udf; From bebd9225cc9026643ddb9ee9fefc4652cea87cb4 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Thu, 25 Aug 2022 11:49:01 +0800 Subject: [PATCH 58/79] fix: fix explain buffer issue --- source/common/src/tmsg.c | 9 ++++++++- source/libs/nodes/src/nodesToSQLFuncs.c | 12 +++++++++++- source/util/test/hashTest.cpp | 2 -- 3 files changed, 19 insertions(+), 4 deletions(-) diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index b3c0363e44..1d96131f11 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -5123,8 +5123,15 @@ int tDecodeSVCreateTbRsp(SDecoder *pCoder, SVCreateTbRsp *pRsp) { } void tFreeSVCreateTbRsp(void* param) { + if (NULL == param) { + return; + } + SVCreateTbRsp* pRsp = (SVCreateTbRsp*)param; - taosMemoryFree(pRsp->pMeta); + if (pRsp->pMeta) { + taosMemoryFree(pRsp->pMeta->pSchemas); + taosMemoryFree(pRsp->pMeta); + } } // TDMT_VND_DROP_TABLE ================= diff --git a/source/libs/nodes/src/nodesToSQLFuncs.c b/source/libs/nodes/src/nodesToSQLFuncs.c index e521c57c3d..9325d02886 100644 --- a/source/libs/nodes/src/nodesToSQLFuncs.c +++ b/source/libs/nodes/src/nodesToSQLFuncs.c @@ -135,7 +135,12 @@ int32_t nodesNodeToSQL(SNode *pNode, char *buf, int32_t bufSize, int32_t *len) { NODES_ERR_RET(TSDB_CODE_QRY_APP_ERROR); } - *len += snprintf(buf + *len, bufSize - *len, "%s", t); + int32_t tlen = strlen(t); + if (tlen > 32) { + *len += snprintf(buf + *len, bufSize - *len, "%.*s...%s", 32, t, t + tlen - 1); + } else { + *len += snprintf(buf + *len, bufSize - *len, "%s", t); + } taosMemoryFree(t); return TSDB_CODE_SUCCESS; @@ -199,12 +204,17 @@ int32_t nodesNodeToSQL(SNode *pNode, char *buf, int32_t bufSize, int32_t *len) { SNodeListNode *pListNode = (SNodeListNode *)pNode; SNode *node = NULL; bool first = true; + int32_t num = 0; *len += snprintf(buf + *len, bufSize - *len, "("); FOREACH(node, pListNode->pNodeList) { if (!first) { *len += snprintf(buf + *len, bufSize - *len, ", "); + if (++num >= 10) { + *len += snprintf(buf + *len, bufSize - *len, "..."); + break; + } } NODES_ERR_RET(nodesNodeToSQL(node, buf, bufSize, len)); first = false; diff --git a/source/util/test/hashTest.cpp b/source/util/test/hashTest.cpp index 135db8192a..97e67ea36e 100644 --- a/source/util/test/hashTest.cpp +++ b/source/util/test/hashTest.cpp @@ -313,14 +313,12 @@ void perfTest() { SArray *slArray = taosArrayInit(100000000, 9); for (int64_t i = 0; i < 1000; ++i) { int32_t num = taosArrayGetSize(sArray[i]); - printf("%d ", num); SArray* pArray = sArray[i]; for (int64_t m = 0; m < num; ++m) { char* p = (char*)taosArrayGet(pArray, m); ASSERT(taosArrayPush(slArray, p)); } } - printf("\n"); int64_t start100mS = taosGetTimestampMs(); int64_t start100mSCt = taosHashGetCompTimes(hash100m); int32_t num = taosArrayGetSize(slArray); From d29c4e642b6677648ac35efb3ee83bee0e180b66 Mon Sep 17 00:00:00 2001 From: Jeff Tao Date: Thu, 25 Aug 2022 11:56:22 +0800 Subject: [PATCH 59/79] Update index.md --- docs/zh/04-concept/index.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/zh/04-concept/index.md b/docs/zh/04-concept/index.md index 8e97d4a2f4..c6d83ce4c3 100644 --- a/docs/zh/04-concept/index.md +++ b/docs/zh/04-concept/index.md @@ -104,15 +104,15 @@ title: 数据模型和基本概念 ## 采集量 (Metric) -采集量是指传感器、设备或其他类型采集点采集的物理量,比如电流、电压、温度、压力、GPS 位置等,是随时间变化的,数据类型可以是整型、浮点型、布尔型,也可是字符串。随着时间的推移,存储的采集量的数据量越来越大。 +采集量是指传感器、设备或其他类型采集点采集的物理量,比如电流、电压、温度、压力、GPS 位置等,是随时间变化的,数据类型可以是整型、浮点型、布尔型,也可是字符串。随着时间的推移,存储的采集量的数据量越来越大。智能电表示例中的电流、电压、相位就是采集量。 ## 标签 (Label/Tag) -标签是指传感器、设备或其他类型采集点的静态属性,不是随时间变化的,比如设备型号、颜色、设备的所在地等,数据类型可以是任何类型。虽然是静态的,但 TDengine 容许用户修改、删除或增加标签值。与采集量不一样的是,随时间的推移,存储的标签的数据量不会有什么变化。 +标签是指传感器、设备或其他类型采集点的静态属性,不是随时间变化的,比如设备型号、颜色、设备的所在地等,数据类型可以是任何类型。虽然是静态的,但 TDengine 容许用户修改、删除或增加标签值。与采集量不一样的是,随时间的推移,存储的标签的数据量不会有什么变化。智能电表示例中的location与groupId就是标签。 ## 数据采集点 (Data Collection Point) -数据采集点是指按照预设时间周期或受事件触发采集物理量的硬件或软件。一个数据采集点可以采集一个或多个采集量,**但这些采集量都是同一时刻采集的,具有相同的时间戳**。对于复杂的设备,往往有多个数据采集点,每个数据采集点采集的周期都可能不一样,而且完全独立,不同步。比如对于一台汽车,有数据采集点专门采集 GPS 位置,有数据采集点专门采集发动机状态,有数据采集点专门采集车内的环境,这样一台汽车就有三个数据采集点。 +数据采集点是指按照预设时间周期或受事件触发采集物理量的硬件或软件。一个数据采集点可以采集一个或多个采集量,**但这些采集量都是同一时刻采集的,具有相同的时间戳**。对于复杂的设备,往往有多个数据采集点,每个数据采集点采集的周期都可能不一样,而且完全独立,不同步。比如对于一台汽车,有数据采集点专门采集 GPS 位置,有数据采集点专门采集发动机状态,有数据采集点专门采集车内的环境,这样一台汽车就有三个数据采集点。智能电表示例中的d1001, d1002, d1003, d1004等就是数据采集点。 ## 表 (Table) @@ -137,7 +137,7 @@ TDengine 建议用数据采集点的名字(如上表中的 D1001)来做表 超级表是指某一特定类型的数据采集点的集合。同一类型的数据采集点,其表的结构是完全一样的,但每个表(数据采集点)的静态属性(标签)是不一样的。描述一个超级表(某一特定类型的数据采集点的集合),除需要定义采集量的表结构之外,还需要定义其标签的 schema,标签的数据类型可以是整数、浮点数、字符串,标签可以有多个,可以事后增加、删除或修改。如果整个系统有 N 个不同类型的数据采集点,就需要建立 N 个超级表。 -在 TDengine 的设计里,**表用来代表一个具体的数据采集点,超级表用来代表一组相同类型的数据采集点集合**。 +在 TDengine 的设计里,**表用来代表一个具体的数据采集点,超级表用来代表一组相同类型的数据采集点集合**。智能电表示例中,我们可以创建一个超级表meters. ## 子表 (Subtable) @@ -156,7 +156,7 @@ TDengine 建议用数据采集点的名字(如上表中的 D1001)来做表 查询既可以在表上进行,也可以在超级表上进行。针对超级表的查询,TDengine 将把所有子表中的数据视为一个整体数据集进行处理,会先把满足标签过滤条件的表从超级表中找出来,然后再扫描这些表的时序数据,进行聚合操作,这样需要扫描的数据集会大幅减少,从而显著提高查询的性能。本质上,TDengine 通过对超级表查询的支持,实现了多个同类数据采集点的高效聚合。 -TDengine系统建议给一个数据采集点建表,需要通过超级表建表,而不是建普通表。 +TDengine系统建议给一个数据采集点建表,需要通过超级表建表,而不是建普通表。在智能电表的示例中,我们可以通过超级表meters创建子表d1001, d1002, d1003, d1004等。 ## 库 (database) From 8d8bb68f92a3d4e67f072199d97041a63f6aa310 Mon Sep 17 00:00:00 2001 From: jiacy-jcy Date: Thu, 25 Aug 2022 12:35:44 +0800 Subject: [PATCH 60/79] add description of apt-get remove tdengine --- docs/zh/17-operation/01-pkg-install.md | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/docs/zh/17-operation/01-pkg-install.md b/docs/zh/17-operation/01-pkg-install.md index 5e4cc93130..838ec538b0 100644 --- a/docs/zh/17-operation/01-pkg-install.md +++ b/docs/zh/17-operation/01-pkg-install.md @@ -47,7 +47,26 @@ lrwxrwxrwx 1 root root 13 Feb 22 09:34 log -> /var/log/taos/ -内容 TBD +卸载命令如下: + +``` +$ sudo apt-get remove tdengine +Reading package lists... Done +Building dependency tree +Reading state information... Done +The following packages were automatically installed and are no longer required: + libevent-core-2.1-7 libevent-pthreads-2.1-7 libopts25 sntp +Use 'apt autoremove' to remove them. +The following packages will be REMOVED: + tdengine +0 upgraded, 0 newly installed, 1 to remove and 18 not upgraded. +After this operation, 68.3 MB disk space will be freed. +Do you want to continue? [Y/n] y +(Reading database ... 135625 files and directories currently installed.) +Removing tdengine (3.0.0.0) ... +TDengine is removed successfully! + +``` @@ -57,7 +76,7 @@ lrwxrwxrwx 1 root root 13 Feb 22 09:34 log -> /var/log/taos/ ``` $ sudo dpkg -r tdengine (Reading database ... 120119 files and directories currently installed.) -Removing tdengine (3.0.0.10002) ... +Removing tdengine (3.0.0.0) ... TDengine is removed successfully! ``` From f1304dafc5a2dccdb277a7bc09f9e0c2f016113b Mon Sep 17 00:00:00 2001 From: jiacy-jcy Date: Thu, 25 Aug 2022 12:41:22 +0800 Subject: [PATCH 61/79] docs:add aptremove of tdengine for en document --- docs/en/13-operation/01-pkg-install.md | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/docs/en/13-operation/01-pkg-install.md b/docs/en/13-operation/01-pkg-install.md index a8d8d7b474..caaa920945 100644 --- a/docs/en/13-operation/01-pkg-install.md +++ b/docs/en/13-operation/01-pkg-install.md @@ -15,6 +15,30 @@ About details of installing TDenine, please refer to [Installation Guide](../../ ## Uninstall + + +Apt-get package of TDengine can be uninstalled as below: + +```bash +$ sudo apt-get remove tdengine +Reading package lists... Done +Building dependency tree +Reading state information... Done +The following packages were automatically installed and are no longer required: + libevent-core-2.1-7 libevent-pthreads-2.1-7 libopts25 sntp +Use 'apt autoremove' to remove them. +The following packages will be REMOVED: + tdengine +0 upgraded, 0 newly installed, 1 to remove and 18 not upgraded. +After this operation, 68.3 MB disk space will be freed. +Do you want to continue? [Y/n] y +(Reading database ... 135625 files and directories currently installed.) +Removing tdengine (3.0.0.0) ... +TDengine is removed successfully! + +``` + + Deb package of TDengine can be uninstalled as below: From 49148ae7c42e87401ce8bc851955d340e2f38eb2 Mon Sep 17 00:00:00 2001 From: gccgdb1234 Date: Thu, 25 Aug 2022 12:45:36 +0800 Subject: [PATCH 62/79] doc: add data model diagram in concpet chapter --- docs/en/04-concept/index.md | 4 +++- docs/en/04-concept/supertable.webp | Bin 0 -> 33420 bytes docs/en/07-develop/07-tmq.mdx | 2 +- docs/zh/04-concept/index.md | 3 +++ docs/zh/04-concept/supertable.webp | Bin 0 -> 33420 bytes 5 files changed, 7 insertions(+), 2 deletions(-) create mode 100644 docs/en/04-concept/supertable.webp create mode 100644 docs/zh/04-concept/supertable.webp diff --git a/docs/en/04-concept/index.md b/docs/en/04-concept/index.md index 44dcad82fc..1f76a66d1a 100644 --- a/docs/en/04-concept/index.md +++ b/docs/en/04-concept/index.md @@ -156,7 +156,9 @@ The relationship between a STable and the subtables created based on this STable Queries can be executed on both a table (subtable) and a STable. For a query on a STable, TDengine will treat the data in all its subtables as a whole data set for processing. TDengine will first find the subtables that meet the tag filter conditions, then scan the time-series data of these subtables to perform aggregation operation, which reduces the number of data sets to be scanned which in turn greatly improves the performance of data aggregation across multiple DCPs. In essence, querying a supertable is a very efficient aggregate query on multiple DCPs of the same type. -In TDengine, it is recommended to use a subtable instead of a regular table for a DCP. +In TDengine, it is recommended to use a subtable instead of a regular table for a DCP. In the meters example, we can create subtables like d1001, d1002, d1003, and d1004 under super table meters. + +To better understand the data model using super table and subtable, please refer to [Meters Data Model Diagram](supertable.webp) ## Database diff --git a/docs/en/04-concept/supertable.webp b/docs/en/04-concept/supertable.webp new file mode 100644 index 0000000000000000000000000000000000000000..764b8f3de7ee92a103b2fcd0e75c03773af5ee37 GIT binary patch literal 33420 zcmb@tV{~R+*De^_NyWBp+eXEzIH}mSZCjP3V%xT@ifya!dfw;lug^I>M*ry_dyHg{ zdre#uYtFUjQkD`I4+;PRQWq0e{I19;>jwCnL4CXt_=|#6^7_l|3{1A?oubK;M97?U zG#*X!U30pP1Bmzs$o+Wq7zh8#1&96`#N^NV6kPrlvHH=a*~Js^-rBlj3bnHPe8ese zBXeONV>1$MGB1f@>11hzd$-qBUZldIT;Vh)(#%N zGc`O(_XoYUPWMaXdzx8QB_(?OR>^P0h04xu_c<72{q1GA%hbAOwR=mO3OHxlU2dm} zT&kBzMxNIKe=u$Yt=6$yXPZXlafW|ZJGB@!_v44ytr?n%^JE#qi3k{)dQiggO&A{j zmQ72y=$X*4N>**2x@%KQ(4jl&x(xgD^fQ)8AeBl8yPIkN`-F^yHUE`;`JR+}wbn!{ zl|V}Ki5vF}PsnQ^iHQ<3K%O_@TEblI9Tq;dYrx$wN&I>RoIna#KU(nVYK9(G7UuRv zQA*+`m(FMKRk|kqgj0jkPZ3cNpRg|v#H6NZQ@J1?iCZr8Y)DPG91EB%jCiC z)99=AYsL3m;E~`h_G9?}&wuyc{||C1Cxb?*<@~B!IH~29hQU*L=NxNUUtox)>)Zkt z^cT^){xS5w8Ox?1yBavSK^~@jZT`Q6y8Ex#Qed%8a7&B12Zv6Uho^#Sw)=-pP-OO( z?XeqJYE8ZK`Z<$XyzWHYRn;D3&K^?vC2>>jEF}7{J2#e%G#KSjyunvfLA0@VTE#RL zPtdvLi#((_}|>$}K@Q^2^OpQk_}E*7yZl z2<(Xx+TQ%bl8T+d6s8yC+q50P-fVhxR!P0~Kh%@sYo7<_g$iDDZVQH_gbLih-{yZw z3e;O~JELIDe*NTe2%K2V=AmzqIc?R|{*oe1bo#OWnMo>Xa}CPm>8jWJ(Rd~p;vahw zckW8X@x$ecHyeE+`iNAQ>UWiPt@53thTQ#b`8UfL^_B01u11t%HbIzs?U$Ixo>H|61-TqZD<{ zszGp$qAq*&RvwDDx9WX|7>@6Q9wo0UYP!ivHo3L2r7*hE1*^6xIKE5$Xx}@r8={@_ z)>b%|5YK7JYgFAJ`}C8+b8^BKueHPcq8vk!f6HvLcBo({I}PLG(YF{v&(!8lxm27V=9`~#$MdwdODoDjS4iD1p@d3RUlSYg#3N4N(b#m)+xPv?;&#+ZwqXjESWZe(!WI;d!S|Yb%bnPGZyMm(x zvDxxzD-agD{yE9lGhCg9DfVnbjsjp*W}Hm?XQ3~N(B{sYTM5Tq_>a|*Zt)Ji*w7Ht z%FC)&rjk0dikc_6@OQOotF-eLDHQHM*%;>BQK*gTv-cH{3%P7*?YoFG&vqLpJsi0DngjDY0)-4dAPgwsF)&tx8 z5ir=96JTpe{55mjU=`(p#1FZsHmWm9fzXFLs!nNQ`x1YXfTcPvA#-ZZXdlS)6o(r- zVov^o9IqtG7-|9lp+^)TBDoAT9xVFPYxmrdsz}Hw&n3DeQ;h#5WrAVC;@T|ewpjY* zbNtIrE%4$kNrEu8?1ll*Y{{@QstI_ub5;7kM4I}l*m4KKBRzGav9?(iKh0Bi`7QFa zowH&odg!oZh{DM>Z(ahP1LwP^@D`s&hg(ySEqZ#~;2~`6GWof+q^mOiA;iVe2K|ia z{RA25+1a+`J~j@7bH>juT^X`$rtGm&t%J7-RND)sE$N^d=#R5ek{iPggjxudCQ0hp z$jrRI&CvTt57I-HMbKLyPJ%IXWEMDb26I=T2@UNW#l85zj1{wmh>(#f!Y^FZ!)Aat z+T8PAE3PG1KUh0Yh=Y02J3^WWV|Q5L5JryeWKagMc?0Jo#V=!n*c|!@0T_aS{KJfO zG+LnD4b3;T8=3U-h|ZYom*{8VZJ~D2$eLNvvIGsZVT9M`9&O88Wa&plAV*ZUlok|= zgd5wx%z?;fH#a!B8d`2+h5oc4tQ;Cqwgg6K<3ggI*m-r(YNB&`!H3*#k*Z-jVozKy z^!9Eb#TkgV0+wBDNM-?<`8Y6h;)dgok`p94iSTU`GeR0=6*U^8VG?rNx#`_DikQVT zeQrpD9)|dVv5bXr0m&A;wcc<%$I=A$*UCx8ylBv$_+P{L^Vpu2v_AzDmeb{;Gpd4Q zH^TqI37o;yr~-BtA^c&pplS?L)$(z9Gz6JICA7x*G4{@S4|z#Rl~X0ZPBIN2`~(i% zC7|Rz+P*T89^>!wxC$%+9R6BE^uffp!7Sxn%OE&J4S#4;GYa{|x$}T?C{CAn51NOv zX@=0UXB@{)4pnv2`D%&mo)*_(R+^q0K{M{j(?%0v4S^tJa zZZ=$jLVya&&dB_w+trVX%bo2py?s}is&|8SQXdn$m>;_wig{a*IC(!+e9&Akb9IQ& z8V-`d;;dhQo_p(Vd^hrf+8&zn6iBqgW3pB|1BO_~QoPO4+rphF{Rn942w0^AA7On) zh2tRxW%@Z@&miYlJK5VAG>Xn`h3e5^1T~0D;gEeVJgWbS^Jm}2ulb%|pIGcZCzfrZ z)a_sxINsn3=MvIVM72U|X+aZt49&!ZKUqHedeX*Ikz_--^be3I^Rm`<-ia2Fm0QQH zVmTnIABzJCUJx>_C9ho9-N29!JJf{p{p!lF-vFtZ@cFjwJ-AirB-yZOVPY@`txl9$ zyGPXU>g;BUg|WMxOK5EcgSf#0m;FqYmG3zW!kfBzSSjpEwne{{pmv%$1ICi7ptk1T zMj|jAhxPX@#-aGC=E}I8ERjr+gGOnUhwMy~eZ6mGRsLsnjI^+8P@JoB&Jv5KBvfDL z>@Tv&2=kG-r$e^H92%G%`;BQYN3}VRw)wT>+sv_1=Y>qTJPO%Kt zTZ(MP%eq{F==_G{MvKAwd9XGE`0Sf=^z1$9DD)S%*XF;q=^n5q?F&EUL$&Kyyuc?f ze#h&R*`W1*>(|`%|GRi?{(ly-xiFTGzvu51Eo>Vg*DgEeVj12Lw;u(z%G5N^vNP`f zUB(F@^Y>Z#Ps#c6w-)|y0n7Q-dI6}9j|3ib^OlS^CPp;#=2Pwu^XF5NhA~lz9Hyri zid3+)toLKY^sx&Y3~W-$q)M4-aZumxCXlNImI}X?XIHdso(_%-uv1d_dZy}s%6yH) zB0PQ`FDqNF_Pnh5Lq90dsU*sqXrLdyw}Z!r(K~AF`U%dZ`~-2zIna8=G(4RtoVQ%9 zd0CTaWwbXf$dw$YNuz;;06w1vgCJH6DUmXFDbl`aIz-XHF6+7}VPZE%9fZw3fNGzO;}8Vx_cVujK+m+Cc++W%68_aJfV z9^(a`cTk5y^{Vdny7`e7mX@T6JZ=U8icBdoPs+-pTh8I7wR?ro?pGCuk>_l_Bgm!ptN0VjsezK3-tB#W3k%kg1efo zH6p$CC$OYIX#!{CZNCkhrRyaJ%}{vu$0yPIW+=}~^wb0O*0RyS>HMD7W6ZcQ|=O#ePQ zq*J<$qd(Jo`mNQMdj#|!!&b_QD3j@A*LBaxyuPo;4k0HD43jK{j5iduIzAj$+5xdE zYOrak`V@L$L2<=L+zk0W58&?a@0-K6ZVQW!X)t15ynPVx8kwb+lT|~70ajwr_&^}Q zz>borrb7ku(qkhV!g9{Fu^n5H5{UH%^sX9G$DY|O(ZqKc!#37==9Kn2A6~fST1=sH z=<^IC z@{YP+wZIA%=2_Oh-Ff!8F1E^H^8y;Nc+{)98}lrUqv!zp3CJ4p_=f}ws#0_y{dVq| zNhUrtITIfi8d3Ti%|vdvKeY;YY1lsJ=(SftL{%1T!%S3^I;Q6iGlkg8i^FWRh5QsA z;>Hso=Bw-IL96L;Yp@*zKo-dz1F%Z3M=@h_>t`MQI}g!E`t58%-9sv>N8nzMGZ4|* znPrcr+XaO?;ta6#XTeP96@!WVQzWysRUv|jM`{O~(7tU+Go^JSilt)TQcR-;__Gj| zpM`!!E#GO2b<$aaKgybTLq$Uij{Fya&e}ftB~JhTaGSUo^5YjAb2K$6m;U#*8kcuh zI@uLumVM;24jV<#=igsl!Zh>B@^nCLE+@NwAO}oJ0vX?k)j6r5PQrL-=U1@SIEISy z?9yvH3~yDX$4*&Ec!eLM2J*mX8mMBSB+%H})n7(LyO6#W6*n)r?;$O5gdD8PIeV9j zt4V&k4ZveJv#VT8Hi^;nK2@m)(2Z;cbf53;=@0*@Ojsm1U9H1ZoDp-k?PnqT_%)|) zI7;)Yym=vIhS>>SS@uOeAN8%p>Uo(!KpXzn5J>ni5o+6%PzMV=2EK>Mje84zZgzmz zw*XSH$ineBBzA+*mw5px23G4r&3=H}HW|_m8Vqw9Nz(Bj{kmKj46#M>;?VpX#-3M_WOz_#BwZDzn^^fQ%hy2-xQ~oepaU!LbhYZjZC8u@_ZarY z6qhZh{`X_w2$7Eu4&KwE&LIholP_Y6eeSEnUJvTWKbN|+OmqheNzZGmu$e@$SMu_! zTZ+_arr*PKr1TrhZRwsty1>g`NQdE@f}}4uNzUw+LjyuS^~k}4$D(yXRB3w2q?Rvg^8-?zwK0#T*G@ zC`%Tve!+wj#7YN>S5E49)>|v-+wqhVIL52KD76O8j}O~On#pA;a;m8^O7tA5U1qRk zu1dtON^5x`U>0^(${DT{Uk^B3KpuPp>!>WAa*?iHZwa;U>L(f9O+O{s(6UaCN_43zYP$we{DVQ+0NJ- zL<pHqAkva=Zr7@MKih1GVxG;d@A?G`9L4c6_x^+`Qlp>RbA| zIvp5MoInd-{Yax+cx||FDQ~AS-O;v*&Jy}^{~ervW}2sz?Pipce1KLGCIR+;1sR%IIFh2$~-G?MVDr zU$|$6=Wg<}qs)xh+XOAuE)BYlCpQtyraM_C>Vy5HU*C#%y~PF?>gr6GrG77-DWN?9c2|ME`mX%?TeVQ(CRNRX28x6UQqEEU|gjxm@e*o}) zkuRLEt27s1(D6rgoC#zh;hIF)VDs*yqFVZ>g>Yw#in_7oagR5@*eKH=uO?%} zsd_hsHp<4AA;IXP5#toHbio}5uVxp-8IPnLV|l8%Y~}0JOeTag6R7j z_&hK+2((atFBkN}fheBgZ;Ut053-n|1<6Z>`b7 ziNp{i{502-#wm4ZP^>oux97=E2FI=au-dQZD|x%Ub?j0LbN3E9p+ofv$*L3Vf&#Rtl(AwGmCb@ z={QXEf^P+t3~>L&DHhcq} zT0&C1k0uYF0@WjPW|!$oX>X2!1~#$GiU7lX4{PdTR1I#3m8cb}<#n=UXG5plOCL)9 zmUWgE%Mi!;TRkwnw8%2Ss4`>>IFJtHhghCcTbD~;=*L^m{GVA-USBEGE`k@G&b3Wnu9$68F0Z-j>sLYUX}F?MfyrscM#uQEbbsxH zMr`WQHP*^JK__y^M)wWJiCT3B=$ICRODz9dSc7UN$6C@T(`$WM`hn8I~`X_j?ypE-zCp2?3n4AqM52CLkG0+V8h3z)9K$ig* zU!q3<@kRylL6os|CW-u?r~6_gsOvWX2?zEA$k@zNfMgyQl#(oV4{KgRs|IG9GU1ur z65^-W$HxnU2iRTgeLz7DTvo`)wsSxVd}`}KfEm~jyd z#Z6Fr>oO;t7F5UHaEKOsuSjbJ=eGB7rLsdXESZU&UzBLo&H<)R;q{Gs31ri;Nga=l z05dNz-9aXJk+PIXQX)&o1c z=WeQB{4n?rggPANj4T(EpMzN5w3E8@0=WkHkHc!sGb2QLYu z0Whl$nA&rx!gv?uRS1lqCXom3^m#=?7o2Kv(lmahQ13(=hbOuO_?^US`F^|kv!|z! z_ma$cmS+zJ0vkMT_RjDD$yGA)MCbGpyVeAQON=kWvHPT`?An`neloU*NSo7`}J@T5)%gzGK(EhrH3%iqs zYGA_eBG8cFV+-E;>bX~C#cwG5o&ykr%okffHQw(=BLi9=dt#d@JC#b{tf|s)yc&SK zv_M(N#stpWL62N%Pjj?^zJ)`f3e68Mchi3E(EA6Q*0Cf)XweKd<3Y_KHKdc6UbYL$ z@R-`bm~?g%B&-2RUOn$ds2SzLKw@)S{D4p!tUasZWH;eA4_(9gSM#v7kOvZcw23E# zOvqNNeR0WRWC5%s2x2yro4|0lCCo+xuWFb8U}uQvO-plr2uI`1kOMwJp-N*pne;zK z97xB!U%P%Itn_c&GEdJv}|37s!RME<9>W*uT| z@vuKJugM+hdpGXn;`q5DYYs?uP*;&QzqOeaF#ByiDBHcNY~#0g7&^%1i`pp^1`ub% zWrP^ajR`k8gMk?!ya5l#!m_mXw#k>0??&ke;s zD@K#HFlbV^G&UXq-n z7I4MI&oHmAYs5F0Lx3AW75YF#BtDIFNuRabneT~KRn)8Rkv1Js4+sFfDq1u1?8Fo_!ejao0;P!Y7->Xt zA6OK#IwS!3b=&=egw@Micb$E1R4vRGun%B2u+Lvih77*BC&!j&Gw(c6T`IkjZ7H_% zEx{>4UDQwteZ9ON^u_V$y{UcZe3>ZWz08)ZO@4z$6S8gTxY*LMggxmH)m0R?HiZ4< z^vr^E7cowO+|?-vvNpp7X6n=rD=%&py92e_>i9Ehr>1J*FB;T7<7|ZSu~~NK7|O0) z>avsSHIFlI@R31B6(0j5pjxO3uu_Xo>dZQMcO6fWQ+R0JHE+ZE;gKMkM;&pwO(7Dl5!#?Jy-)2HP`%o4!Z4&gjaV*W7y~@-;Ix1 zgT8En$bXVxyNUBo8^Y${q2~6+-p3j6TdIa}-kRmHyT_hav~*FxSy8UA?AEV!&rx3g zR1FTA#xEA?Uox%gTwU(+F_HSB#rxbnfA~TXs42P7<8yt98Q=1=SAO07MgX7NF6C)- zi>Lq7@%Pg)V*QYucHM;o&>?!N0{Ocr*lnCe9yKYJ@7h)|$yR0?`Bo>61)*QkZZYHq zU(P{y0;I|LMfc|8QCQkEMXih6f~e0($lkW^<5owueM~Vrn&ULDDfZ7b3TynYwHX$z ztDF`fXOn*FMkKy-M+*)8&`JgzE~R`w1&gh`Zy&D`&9iFEmWD& z)g~q{*xg-RrcR<`hx>sXMj`Xp{svR2#Rq4j0jPP}raus}!AcE<_n*@(rzRhqwLDaU<)sVGv#^@b@FZts&oeMU%Zb1{)eJFoXlisJ=xj>^we8)ze5$7m z5kG~TxR86cdC>rK3X~%zl!)?tvB-p)#$aDR4$m)Bn5d`PIzLG z0jIi->wu|-DZule(_s5gmHoq}uN(PE{m!d}a{;gQFSk;cI-l*b)&Iwo!GDf`vjE4y zf1m$MMf@Ex_-D-Es0r|n7F4E$Zj=u@CZ(_oM2q%Ci)Z#WCZzD-ceNiXXx>(iNs&uI zw$B%rRc>xB9hf59)j?2))Hhkgb7&rqD&y@JJ@JCFq z3O7?wU{&kQv$6Jl7l<1?;ES>b28v#L#EAk99mISq35bD*M_Nr~uch<8;w5}VV%TU3 zD;geOAy3SZPJdS(Bu;c8uRBtFA3I?$mq|Tfced+7H?6XyOJwi(o5r$})bIA4d@?C$ zxoqurWt9~<`U%cMiH*ns>j}gr@VAeK2o=r2f)aV=2|1$^&bFokxV##kqucIbkC%{@ zSp-PWO1zb_V}E*q*=?jY_gy&Pt^IAXK>CWm%h`p~vgG`xBY(_D0~&J~Fxvj~W=}78 zkPtG#VR5}T<>&~deL*aPiIoHqh$u~#O2_ikNzpz=JzjR(RklK@8*{63=_0gFjBN(% zsLhbjDYTTUBId+k5(Zl|J1kp;uMYv4U?={oOL~$oWVsbCdd#e|3oJ#AW1fdijv+|) zehE>etgc0^%C2zId-eFSmcTb$?%)w&g5bJrj?4`_imrmzW6x+gR^n@hGG?k=)*K$A zcb90MrPpA=#&)rRT!XVmv5NiB4^um${crED9>itEy@$`_>r%&Ji1WS!?i7k2Vz{>8 z<->-rf#gI2=I;+h>;!4pg|lnrrg9O}gm~&k{YOsUCY|RHX|+8EyP0sK#~bZO6eIQF zC$;UtY6!I2uaiRFz#Xmo?1=FRY_+%*VCa$L24ik_S5E|pm&85f%Be&|rRuPM!CB1} z5$Lh|>Ssv%iQP0Dj?p;FhwmkF3kW?x$_e_+T%So<^9!HaW22?|~baQ09F9++P)ZIOi-`;6hmxcuxIJ@oK;uSI+CxIwq|6yI&a5HzZ^m&LibIj7IqVMf$=Ni zHpu`Xa*5*|9n5e#NcL4XV4Q5W);gVQ8Q!;`I@)Pz3`r^Bw>V{g2GZA7tklJID5(lS z^Eryxa!b>UZ^ju@dkg` ztgvH-4%S$WfALi))9IlLvo-N)G}Pf`WFUH7>=&@FN>l+jfzF$f|9>^N8x(^ zt9=$}1u{VUzS|D$VM%5+GzA)O2mL$`-E%szZb9FeGem4E0dv&OoQ3m~3#`&gOP8W& zxCMDu=v9G2fY}!%FOVMwMzF4$?nHydCsY0l|CVFkj(5>Hr{0`=bn*v-{GXI5vPEv%cLF84)QE8_ha>eM&xNNEMobDMNAV7)E!wL#g?|%ET&)HVZtR5 ze=H8I(Q39AnBw||8*!U}IMvWEYBzEAb>N>ptL*Fd#2dTq_JgZV>@K2rTq_0-xb1C&x3#sN`WP%?7-_5}b5mGe5IM39c<2_&oFzfuj9|pg z$g?@DzlP|@SxCrhHjAn&K`u*?AomgmctIhz>G+-XuE+wMYLsY|ZWy_xrP*c4@wS8t zJ8{GV6l*ANr|I^bX&x+8NORfF^&S`$Kjx7_L!-YL9*Zbi67%ob2Be9DKgd|^BEY~H zgY(?yjpB>OS_V*5l0++vf^h8Hhmh^KZj9LF zYYvnLf64v{Z{Rv0-eB9Dp&p+YqVAUu@4Ko>c;2%#qJq36!|0N*$MJ6r!XR`JrOrBb zy4-vFkk%o~iCQT4k8tWs4K1qL;^bbcSN=K&0ZkQsYjC1bs#UrXC2ZJ5Jfjgb5EH8| zfd!Z>>pgGsUy~7yWr!EAp)>EUm*(-qCDU*e8u=DgS_*bnRfLi-zRCIVIfc1Pxev z3YJnc>nx(0^CPP3LgZd=`%^R6%dR=_J38%B;&=pf-WxL5QywMzRv@-x5th^*9Ha^z zeWb$a6~fum_k5p0)zN|zNJ@5NHR5%$(szGifj?;w{#PXcen(C*XvWLDOR=+!yNn># zDL)BeSG76d1FGkW_3T+jN>^VN1@KmjNB*CJ7$7G^0j==@yH+GfEPs=p{a@1R0_;-* zvO;E9=sKxWoPfyM1wcwu!{R``0I`As*Qibqwf9vO!`0(Rv zUu$Fg<0VjhV42KauTc)bA(M?-{`KR?t`fj;F6yL#6nPjRZ>U4*a+4fDS_c693*`PW zN_mM@TiFYO2-OEn^x5%%Xz2w+3osyBsH`&B zsQ5)L3q<=Az;rA~%ymA0m?OEHiOCqWx@6`O9jHK&2K9PMp-7BoMn1e@9?+{?4RKNp;yH0zW~Ts4Fi>2ZcCWq?ubeiH$I>Yde{_m5 zn-RPB-uQ4fSajIHY%WLRkTzQbE1$Ddg&%Z*vM6M_iMw zreL%3@Eihw!bin#(+@s**#R-m(??$OFI|UlBF5<=;qP zi%IEblw*NrjVtC->$O$ljjoP<&QMh`O;0suQ^s2AGB@hd-y8;`njBRH)MoFaH}+9G z&L?oG1P$qcz-t27T4jlKUD?ZpaR7+>42c}=I zJqKj2ZfoTsg-WgLFj$ta_%mvA=A`H#PtFv@sC3`TFCCx;zQZg1dW+2g*Qnf#{RjVp z-#4@p9SMu(CK^1(ela?Wk9n?^ zc)Lj!F{1%K@2Tu%O7(6Wos@m&v)E`#-rY&-%Ybrj>Pqr=p$ zDbkolL=zheU*?xQdgmcR06ssh#XINbq>ouc-}5?u$}dU`gZ<1BBseVe5?oB`Tm--4 z_)Dm+5$j1>h9-L+?KbeS32SdjSGg$|)`#BUS&gy#2g=|)xpt<1bwk4(mmI)`wwcA) zK8!En%?RJ3K|+Ej@Kit_>(Y;tQ@(S5<+Az%7}>h%Z1Y>5Hc$NOzwIVb2cL?4Jxs;& z_gIyEkd~>a`uxCj{=esO{yV?(e-4E2y_Z(*y)W}L!dP{xIDR@J9L~`%>J+FR?f#!@|F_BY|C!eLJH_)qE!^K05Fo(6P2&H&Kd?qnU&L`(qkun40_ARm6W3Oh#h&t|#V7>D^A@SX#(^wV+EB`~J)ANrKr{(G1nQ~nhoi@jvVKCDr# zjyr?|w(N;zb!=xn5w>j5kYE#XppY+a_35(7m70*>y($!*FXgRpN+6TPRdzcr1U@2- zqoEv*i!cNQGUl+dGMA-E6R>YuuDV?9ZM@ol?`8mW6$PCl9LF zmrRHIEdY0+?Wi5+Zg?eJMRoLi*JvNFeSCJZGIQ^Z@L*b4yNT(%Q7WV~#DQx8DHwZh zZ@h~fftfx*@5w*Yzo=RBBkJ#VxksJ&7t zYD-^@;ZJP$($dk;iUlEf4AsOW+gC14`}gB@vx(;EXDBKwYG#jLjp&sQ}q zEaUAMNE5B6Pg#liVrTkAzD6EjF;d&u?PDdOODyaM4CT{Cux4sIXEZs8zKvUPf}!4L z=f`Jbz^agk$~|o}JVML3UDLH!C6tth6BE;n2XDPn-VCbA&%!9*8PZgtl=NbCT;E#} zOm(Z#GCG&0%A=@8-^3$~DkGPgF(y6|;~!{9fBabL9Xd^8;2{`B?+WH^6C*T}+`~IP zZERm7Vek;Go$rD9NjpK_s4Wf|o!kmeX+^E9?P*j0B~NJe^I$3@4K@>RDgS|?@}2lx z{GNliNtUs>K##Km+dUur7@ z9w$fR;(!?}g*L(U8Nh`B)*g_(U`-4ApPDN1utfYYgxL?Mx8;$9>p6D7zZB(f{-EGi z`bLjkJ=)1VN*-fO73{n@3wT90pJdBe;a7rsQSIm+XNnRuUKk&o{Az?|CFWnjx?7H< zj{Wp{++RRRAbPHy>R!*ivG7%>CW3Ak$=p)yd``7^9U2siRdTxg!CJc?u(bE z>_Wr!_i)*lQAT2IG2XAGpVKJj2-F3z^SGY7-&#(MGv@4Q8i#U11u%Hv8+uG%@gDaD zO5d$ZM{w~Y=&ZOu6$C9_gzA3;Mk}c^NYmFcYtd8X!jrjf2+qDP|9an|U_*O*&ONR~ zm7&4Z;j|K7skVB>znjQfRX8VL?Vyaz|BI|JhSqWQon*n7j}!0>T=1rtYbZ$trGEME z7(h>ww`4NI;|k|becENGr(4XIOT3ba2J!}D5Fl*Bb*T|Mhy8f_*hM79Iu7%!x2C@b z(pHD?;2I3i|NEuI0tuX*nbyOzlkm1Y^?6*?r(P@p|M`dZHxDW6LppC&cVYs&Cz^#T zQ!O8L|K!~xW<}IZm>+h#zo8Ag=`~MQkAq@jI?QR1ody32*+xBv3C;uTjTcD^k}YrDa(GaBq5Yqi(a8QvDCx^kPX>cW;PLc2_|>&?mYjuRp3iXG!Ki$(`R6u;6DGz_)CjE5Qslzucvhn_UrM$a~Z1`lF zvkQ}Ia|0HMS zS-xonN_^Kr?eo#C=`hUTj`w1ciCCDi5x4l%FdrtC;X%?*rNXM8-@n4g3iL+ z^~kzS20So_#8j4BmBV2O$(Q6?u|qZQ^2xY;T|r8~2LG|zrvOvDH32(;66C#do>PA5 zEo7VHu@}VK>uVF6_1>r~74wT!g^5%2H+N_MKdg$#+0D&wsUt zm~7<{KL2Z+=e{0f|5L4#D0PoI$&7uimbL-7u7koL3^mu%*vo-8UhI_ZW=)LDaeUx^ zy&D!kMsvmr`3$QQjZ*ixEUO>um&oGx7V4Cu7eH z6Y%Q|t|t%rXxkA1g$zde4?pv=ppv}~iM(pU;y0*}O0g>{l+PJ#;?ylD&|9UEC6$wW z{g&%1S(GZ`_`p zK26jyAKjm3xC0*Ie+{ce?f#oe*POLT!{tx(Muz94Qd$Z|rT)Tf{BT>JHYI_SGBe-Y zJl|O z49MD?SsDMi;1n4Jsd4r;)-==g$^*Vy=jaP(kTYShydT4Qs2qd%H?Y$d%f&5F6!WNL zY1(Z<^c;jRS!`ZZd0Z;Si+3=2X+EvZ*0gC?WIr@~WX#_vV!{5g&ME`kf9a2x+Nps! z(tYsMdnN%{RObNZ%3rX?V2F*p+w&fQ58*1O zO^?NA$-^VAUfdJ0mgk~I1zoj~=t|j`Dg#QpB|`#Gn(IqVL>J#|{Ix;WyQqbGWB^9% zs>f54aLGU;!HI0d=NO?H)7Qm21+a8!-fKMF0|E<6Yq^bZ_ZFHlYNHO&hszCKRIF?{ zt3LjSB&#}G1^6BCBb3sWs416iT%AWAzEyAm}CzA=ek1Qv0z#R=UFklkr7 zqOxPv2{=T<$dAzfy4D}0|K#RZy3+=V2(shT8g0qQWn;umZsezi6y!Fw!lqW=v?w6* zAum7%skE8gur8>gs7<_SZ!cA~2QTPJ7tylseg7^(V7qtc;~}kXN-~8E@L$KCvCZfi zc!Pvoz;?iZpal7>Oz4#FJ~yJUf}g=Q%*ovyd#eZbySARuL8jHI%sOjn3o9c%NY?aA z@jj$apNLR0O9&?=G-ePHK0d2qo5CT9&ik(QzZFL zArG~W;dS_gWl&PG-tc&tun-PO3J(MP(+K7mt9)N&15_PIwL6(l zCG{&;1Whnz!{5VrVIX^H>r}cg+RaFuJY&1AK z?x<$H8E};E*2EA*;e3~>;n@Kv53??1{qS)<6D-lFNT<;o(2hjB`nY|uTZse%l#q$H zo+X%WXp1dRBg?9Mm2kI?O4lC=gr`7yFm4#!6f}#=Uk4a-IIwcmf?AR7fbS}DIZ(g} zADISMveD1CgT1i}19A;ir%8!q*RH_PEZC)jraM1wH!dLD{GorWQ^{e;Xb3)Vgq&^Y zo)E_@b&DUkiU{c_g8BLE(!zAHgud zoZ|~cU)ao6Cx8xYYC}fXv18$ey-xS8O!Kec}y~?x<3|yY7HSft^*(r zC_XE77n&%4feAcPXk?Fi#@#t;_I&j9yNWivgmD|Bf~*BUzB~X=an_pdy84#BPEHiz z&FkB2{C?w+kqx4-#ooHSlxE|8#Q&n$+`40VZM1qhpOMqcbG%s2o#g!Ez&E_+Sg2<{ z-?rS)sX0CkIrY#Q=)wE;`EE^x%~mE5>rwG_T^#z6*4uZH^eKRt)o+B~UhT3sW$T4w zcCL>xcJ;F9nr$9z4V@rk`h|WkyvHii(@DYoOD;+@!OkCW7t% zl`A4t5N=KAbOK8u$@*vspA|Npy(8&_G@;&FX)<)&JZ8Z^kQa^Y8`vu6|W2b0km| znVRPKeba`{*T7?+Uq`K?;wiA>-%p|c_R{}c&i-FW{HM|Xe|h!49|fpvP8OHHDpq0h zk)5e1bWy8nl;X~mEiU>_k!F8s(2GNIckrl3SEtg%pt^vUC*CfO&QmEqVCBBVoSq6$ zT>RV9|IdZ>|Frf0`FeXUQ0j$ZTeFPWRd;2V5Oa^ZUby+^p1^BWmEmg3Y2^%~f<8Nv zLW*dx(ys>?$YID?7=Pz5e^O$8;*Mc&ApiM1X2$}Yfwx$7ZO^dI^|I4^4QBO8Sg7}$ zYVjL%L5qcvr5My<0OHU5(?ziUTIB!%wi&C7=w2nlzIy(S+#kv5N;uj_s1syc{JSCy z!K%!B`Jg?Av|Xrm*-7>fLSAGQt29HTUwzGkp(IxyHl0I5k!pITfH^& zpoeBYtBH|`V*A-gJDrip6iR3A|2P>!>sD_f2a9UXnvgm>6?TNBc+ud>#0gWUXRiPg zI)*q2humFlhCPi}mL6^UXOuSuWY)8Y%UXE>NV|*eG!WLXi)!*<$HgV$s6#_RLrT9= zdz{Fk0<~@Rixq|S6$=IJ@ro!(aGYAphM8rH#0y7cX6*p)H~sd4Bq{ilM%zS85BgHWEsRLu&2@mFY)~R>2@e%N1k-%N9v|fZ5^6~BK>@J{Hl~xLd%EqPu(r*lF z0Xlv1Er4q}$s4OVWgIe)uIy!5igEeZ_%cSq)|7CGg7`B*Sq3t)O)9is9=-{_0X2~sS}Jx`P7wgU7mJt!SKf+@ z(|=uIQf>#RC;Wh$sA80og7c61^MwDO0C!ZnuO;ZiH`F-D#K<~^5ZQ{d?eSk|NWV=4 zh@PpsNjTrBlj3j@(5u1|HFOZ<6J6uamZe z?X$Sf=UU?`t~c_PQ24}COS@8gNO<<=GB&~oQnHn$bg^M-ciUcg%6u?*58D@c-gd*h)%C9I_Fl0mHfa<#wH2LgGlXl; z^8vfct-(SYRG+(9a}i#A2q)PgV<4&B8O1W_W4ZVCAme85w}IV?`q|~38hmTL+Kw1o zJ~J<+Vy9ZNWu4afrw?j4-kmSV=c#aWBy$LNw)%ecR>s7`_-6UwF$ZPMf%nZON7QjO z9ojWNd|9!>y&1e!J`b5}OWU>Z`#SgWLwStQ_U^Irey)}e23G`TEDdk z%CIN6xRdXf>=Q!&N0oF8pVnJ8aI`i^Ula zGoQYk6%8vx)^HMN9sMQDCcp4{5f5SEd9UM6d*v_-LX=Xg8@i`7-tqaWO9I-S(>;pJ z$#Da&Zx`r2GTOG>du4O|eod34QdKSkckvG~`iyiB_9i8!G+u1E$>YOPOYEpHqoCRN8JMD70!7Wz#dDV(BFaBo>J z%ehtT(voiSAuRH5Ck1(XdljzXx#Oh0DglE1%5x7*9d!{Az%l7MS4D@C@UV2g1aqlR zp~PsaLE=mfYf0GD22yG(;W=H&iqnI&bXU7s`I8_f%f04~pgcNB)U_`jlL!7y{9wMvF*ciP`9{aEn@Ia9rvp_vJUal2;U|ZC) zlslqDhRZ2$RZL5LJC!6XzwUOU`L#{-GGC$+mVTuC{ZIH^S{bn*7j;i%3e%+0W9A%C z?7h@SNXeNz%~wR?|p} zwUq7Az^d~5hu5vfm?mZP4z?6twyY@8Hvd-9D27m?I?W5_lV0-Av6i-5#;a$$Y zJ*0Jc)wq_E7d(3FJ%c&3?Z^7oCrN>(3^@F)`JTt#&nx>I0@Vmkn4R@y*qG}ZyYIG& z#^($8=NH+Qiw3 zL6Jd3I-IX^lThUEHg2C!T&@@b;M@|Do`s<7)m|_OY@>Ljp?drA}+7L)#>IE^ELYNC7SZGW>*aJ=Wlqd4J6jkT!R zMGH;|9h1NY9F5$}Ut?Hl9U`|m?I`q_t?uzXzk84!>r!D}e$qNUw+sK>KjLs*wCsgv zM`MlEnKfw-CIeqk&&@ARa$KxvdV#EJnI1*kUH6)aI=wf$n8M#^rYfwh+;{8^qkd7w zqN(n(j@pxbt(1@_<)y;Y_wtp~C9bR@;EV}(LzPWz4|}Xc;*`!abH_;RnQo)dX6-M>|c!OGu}r6ASOr@DZ2!=Oo^ zNj?O8@is73@8>Mdtlo`@vFW$5mw;e98@}4ac4r^>{b(YiYPe=}-Y4L_yBSaZXP^DE zZ{FV(B{EubKSS^B^lgPKZ+6nI<}t`L^DiB7J4M4zZmXncX?z#=YhDghjziQd9H_8} zOGKzJ9Vs*|-Ls$iToYm&_8~F@KdpLYY;7&WEZIKctM)-i;xXp9UJd$`CQXts+=Y60 z@CO0S94C#K4yF6aqqaD%;bPNq z5xY-QyLU|OKl&6EO^!A6_68NFWn?8FcgsrQTw-o@V}LY^=`p zpA=1+{k4hiI4U)z=T1K85uUrEbBpQ4#~Q623(Jg?H%BnuH*bEwYbY?DME^mOS1*l$ zWuak#$}7NkgXsE=kY-uZ{?RiXDZ2I{=kT-0yM%GY3XjA*-jZ#NsdhxYg1%75rA=ik z7FLFBA+HYYW|OCE!lIU|gEGwn$u<}p2OKFC8MW2phgnAa`cg5Fj?dEoIf{?6CqTO0Bf*HKp=U`}CpIF06IO_TzbT`WVPd0)cTqRvbdK}0ICPKu!S$!M(X)@%9foU2QgU{yaPATHK0A0_YD3Gi z+*>jfu-pnQdyM>sXgq8BZ?av&e8|LQL7hZnB78q<@+;A3?EY}VMcNuRUfR0 z@59T?N0cww;%|*VV?gkAm-DDO97Jt32N20pcqRR6dOp28_}Mw>5Yx1_pWj4noj;53 zP{pnIw6HWFs`wuFjW=#fk5(bXik^IC(7Nt5Yq`GSD45Y;VUjewOhMs&2IXfl@1)oM zQyhKLizAiVdy1P1ox0^!SLtgn`L$b5l1?8-c9$Hiw;NnKl|>yU7&2*QD@w zcj8fgI4A!yHe_2LQmN>Bk%_9Q$l5% z)bi_-O-EeadP}~g-xIYaL*gpYgI)WZ?n3Jby2|rQ;JW2c#xmYp>&U^&9RB$08}5*4 zwsOx$mUQ2087`n|vPCPd`IOY-7aybYs#NqGTc?p0#oVkFtZjeW?7a%gOyjLk2LAX^ zGPwpxC`mnFE;Y~@3}B}^P+EZyV__Y-h14dk47o@s5z|!6mA*M&5KaQeqLHA4zxWtQ z=V?Gq(i=KBsF%TO6SW}s!*q$|_80a7|lcNp?Gu@+or-UP=9{{)4-i-S*kML1-Z7%kpctnhrLj?ku~*kW_oaC@gW%6ZKq@4MLA zFx)B8BLo^pq4YIwIDw(yI_hHPGlNs7&^VJmU3l4tMgxtiH4p-VYoQ@j5kSFpujn#Qj zQe8D=9Jmyu2ykgJNV|xo8a7~8jw#k<-Z?|)s+x85e6mzJqH_ii0?gstpE%#AkZ<8R zZ=4^oR*7F9HyFX)5TWQ1t$KjEnET8GB-bS_jg1BdRmB&$T&2eN;kqaR$Yctpk^Af| zA307NDY`&COL||?`Z`Lx{RiqRlV4|c=Xa5Fx~=sT{LmQPUq@t8@gYLF4)D~6mtDIk zIFT@;*1{y2i-JQ#zBuA2nYC(~UBb>=(KJbaonUu?xR0c^r_WDLPp;c7;7P)peJ}hFk4dJ5Axb)>ImoLD$UpA2eU9OX#Zuxg*djCFr%)> z+zztQPl#G1t$;6ZxMEW9wdH5!I;GYC?J|Km!UzE@Q>@4z9~+#)ypJUKrutxkwlhsX zceZ>Q@uwVUS-wK|CSi>rQ!f3AGJ*(nGH6{KF>B~Wpc{1@naz7#mR2KZ9eQCZF<%MS zsT1R$Dp-q+s64S&dR3zV_JN3?!_I;0@?6y&hZ4{&cTtxCv4uyKDfqxwa_P;>Dq4&E zGZAK#d_j+>TtKYGfA|NmaJ*z;0ZWMBNeeC+djA52fJux3nsjdm2>U(7x1*qKrrn0Ijb_KGY>>K`%tP-6!KJ^|x zIpY@aI&_n=?zj$DlPpp{`SHVF;djTS$%I#fMF#NtOs*s6^`xT~t<cj04!f^r(!`#{@1#;q`_0?e8erD8-g&!B z<<9PMsVUZ#rfi|=Z++?ydVFSz`r6{A3d=gIpVg3|>heJ=ZFf&QmV>bXx>1e zEaAbwOd7b`%*2s4QK!Xl<7VYB=QQC6dG)edg_EW~CA4STh{~^@SmL|CLq_MF7Xh(q zZ+lZl4kb7u{tbd%;Kx#fmu=w1?B|)PBl2C+n{gk=!08jsf4PizG zRkWet1=4iHGl}XRy1DaMWtVyT>-^m;MN&DwlmdgVd^a^PxW(|eJ)&c-Rc*M2eajz2 zy7xL-V#Q-4fmE{Sdkyhlu%&{KPLyWt0I;R++0c>8u4dfu>`~h<;^P3nVEP`1L@;be z&n$6$!W-689#G>cj;^H!2##DL$PN%})8wi_ASiF$OKja`Q`Pa7#Ws8M2uU?Gh`sv7 z#}1^&Y@v^*hIrUVDuaruwmPv=Y%h`$@(#wYWdRuJy8L%-+pfxC6- z@$OQlqGZCz?a|x;vpq6qEkWG5u5c=x>OU4{fEWP@-KI%o=Yn77ADsD4HICaOF1I1J zi4I9XZ@Gw$;}%C(LX^SxAUi?mM{)_`-p>?sri_+-%?xXEj4Fef4PO;7>6&o*c&S%= zMQ0R*`|QxUT-vzjfb8AO$qklRySRgKG^KT9!qJy89)|e9*J~f1^r^wi9swsOGZ^!+ z0f_yhev9z6jVJ~$-H)^G2|P}072ut|g?YqAZokDSh0ke=+Ap>h1`(|?KBOMBL%-uP zbRhv-P((U67suuc&F>Z1|J!*%};_~YPMQmY_Cc`N+ zy`Kc}RoteS%auMoS5p2|Ikv@jLm(?=ex0el;_`?qY=~tz;o|Y>mPg@j?|Sg82mU}u zzIhP}A-gn51DJ%|FZRW_BhZuZ@R`XTkcQ0wq#eCYcLOraj_b%cR{L(ZfqgSO`h@x6bmG zxU=%x(j~s9S#61-l)Q4_5wbQWN)lxMBT`UP1sm$Y=eR=XY7=!OPv}n#AN!#)Y?uZl zMuo&t@=cE@w)tDeQR2()XT=9+&E1u(*Xt5=<5cwWQY4gJ8BlnVM?O)yGa&ZjC-3`| zlk-(dvdMth^taA#_nuu#zhzuDU?wn$yk=vfY=rY~JVbpAGtzQ@vkda!SLVWfTmb;% z%v**N%g!jG>$p#^wKXe!@%B@kZdXpBmHv8vBSyMZkuT9(rOC#ePyWHL($BJQbGkAH zCg(mEyIZI7QMyPwJJ!fsWDi=HR?X zJW==4lO&9M&+Z(BwEIa<%=wP2DpxA{%%RAjHdVadc5uWZ1}Pxcwn>B(biG(TpRYkGaRSdQ4Rc;5ydxWw(J1|YpL zb)Ey*5sPr+7FVI*rQgym8#8m`x!;;P@%*51>$vz9OM?c&6B`0dU6n?icf@Sb(A;b_ zKk2w(bT=y*BJH}wv0(jZ)Gx`1VynmMJ$gewW<~puIX~8e|7%uNd-3S0{R3(H$7Ro4}GKyfmMLYNTKD3=hi(sXz822X#j{e@?m1vS7*c zHb;?4RHP69|HVvlg^6Kw+>r9+74TSRuN31*6Zi*QeQvz5Q3U1@3m&oAUb{xaYzu>> zo``c8G0?Nhmv%`?i!-%Rsld)>M#F6|^m@+hyUppIv3FqgU#avNs3bI&G}4>GN{G+wbEL{|C~#hTJ)94Ze#^PzSMVZLSaHJX!3X4=C43^H zPmQDR)m)~>H6*{*F}+Cb&xv-eh~uY~IVwn{*>S{@IUZq57_MlW)EvsgKqs+i>l<7^ z^YVP0n?iJEZ z;zg=dgPtNKR~F^5q@S@Sl0u||i09%dF^$A9l6Tb#F5zlWVCZF)d3Q^{Y|0FW8p~JG z{9gRfQs_^3j@pJeq-6QK>y#hc$)KdvVS^}iZ9-UsQUcpTR1>$3YlagZE7qo(F` zm4xJ1$=Y+Z|BVC1M&`(#$Eo#sjfD+ip0#|(TmAZps1hS-fc@m0&UjWsf~Q(2KOFD!#} zaE16)_TUMqCDizaP z_fl_B6fqsZro@8y1^Y(}%Mw3T>+{OTL%KEw>_SdIe(6)ek{iW|#`lOT3*&x$ zaz-m8DmJ26ORZ3LSDF*8Yi>15^~2()6B{&%71XYiq=GQvuEc9APuUwJ^zrZ!RG7u@ z+=Calx7C>^y=RX?xm5?K-rZVbFe0S)!i&c9h$DuG3XLcx)}Dw$IG10G@>@ac__=Bs z#Z0CGjEyJ!pmkIK9usiU6Z5A5MaN+xpow1p_elR}OG58Sv_SswS%sQiM6_e`QM8U; z#>c~dj~BQ&s}}LR$i<)Yo%FKC!kuru*~N3!0$IhjG8ALBe7O(Pb=x&5hT5NXvWy_* zAe;QGC)cJ{CfBS~jiMu7uO=HCiXrKTe!>05FFG*zMIvSbzuxVwbHhX;g3xsv(NFbC9o`0MpZfeW1>@esQ%G7+}Rh4!p^nrxt z4Ib~+o1*2PrGyWZH)zgNx1iEmX**8H{ZJKY3^)^6vdb1tP{VnCj6x!>SP%;BE%IJ# zp=9ZN8dgm5=IL-6yGn6M%7kwguODG}ui(mm2>f!gO zm_5ekfvI#hKor-2UvBf_JwsjDO%1+n(Epc-37LaEQGWo1fe1Mc&1adPK}|bLML3>? zXFgCq@GgXvC2p~l@XEpje=DZafM?tm2n+2J7-nZ@WP#h?>^MbA0P}!1)vdl(?iG zuA~_vqAu4lEODMSthBgL1qmL)DM%9t={4GPibb>ZKoC~DI4V*AOraxzhkJ7U`yf7d zui~?xCOG=KH*$eP>u_{0>JeuJ4S{(1gp@F!5`pu!leB*pYv-|m)G~}I3$S5(EqWPb zWx%vv;5?iZe+Q=efM+1}f`Mrzv?vz=9kb&k-OsGd_dhfYsf!HP6aC#UXoSn-{*xy7 zhVZdK?5`V_nWMTL`@^Ox?M=`S{x_f)=k2dLZZWEk#8$I^Tr~h(1)8a!N76@20_?N_ zc1-7;PpS-5yJ=UMv4hW&NL}WOg%t{FPf25f{utAdaTK|5>8cp{a2G^0%H0;J&yQM& zjTd{hYGW;ZmPCf~g&0AH&AqMC%Xtrt9G>rW~3pB_x8lPQeOe{BxYVYPUJa6}p@xGMO*LLJ9@>r^ElM zM<+0+`5#0|$W+F|0p}XX??E~T4)$yW=*|BLcj#p#0lfc&I}p}Me*?q>ahIz#von%A z#`yCMN39Eq4Y!)7TUxMzP>`UCkIfsZ4^URk#9~o=5dne^%9lngIrEoA5LaU-6>gdI zMtkFvMNH6CqC)?H1zX+bz~MU%>a>05jN^L%<9TFq@e$11yE=D>NW z5_GfT=bw4sogO2`Curt81QGm{pf^pUE=UG0^aYo?0hjoTOP^`xYJ?E8ynh3IZy`i9 z@5y_Wy+$ba^Mt<1^y?PTqyq4$yRab%DEyEclyXs$pK{0!Yap>@IM z3GI>XdFRh$_)nb@cJ)+;p68ZP@vkLwKj42K@N;1DU`AA&?{m>weggrTHn|2K=t?ND#+#+i~%ehT2QI4?&ETiUl?65aNXjDqSiz zG|^{3&?pm~?d>Zjw<+}rE}0=>kB!A5vOmB5Z-5C*;xAPn3`_D<>MJ_L0DJCvJ#k0kaT2C#RT2^9(wO}lnRJOWBH59pjZZ2iUjwtf>OH0cUGdv4zThO0xp!E>)z-MyMGHJ-!Ie4<+d)m&PF_ zhA7dgy-R=@8Nj-`;#V%v$3Ash%;0$irmzGo#$4Dx3^rMN%!GspV{2VkF1G7g_*d_N z=#4v12tn4zV+$31W_arK2od0yVPyyc{4-PfmrJnKk$R-^Vwk^y%{ajGBCHcAszy(vgs7BWb+db@^jY?3s?>|fDjqc94zS^lHql2+jB3y$u zl~}QMk3ro;=uGd>>}cG0D)NOCrdq<+t;(x^d!JIJ1buUB@21nzttw+?7ulPYEk?h2=hszA4=g?*H3?DE6K*sHuq6SqApDLv};; zD0DTcl&22qn<|3$6BGSC7oTfqG{m!$wr;le^rNXV6&1vD7a#l*Eb~c!P0G31mh`M_ z%SUZ*R5w)DJgu72Sks@0CYhbQ4@T@+AEIsGz-#h4o_Y=%Pp!fBQ4T6Pp4KAX+X*w> z&R@J5xT_YrVeD%Y0yMVSTd;;9LpW73BoB-_p7H~=Dd;y>LO;}-rQeGBz40E1$>m4x zXr>quDmv~*pkTz~)@f_*lKa!r^a6Iwz9LE04HFf8$jSlumYJ zo9*cAq~ij2v6-|soV(5Y8*lrIWoAKVE-|tr>|S+SuBbs4MZj5e=qitE2{&eMtIAp$ z(p|HX-KAZCRXnSa7X1!}n`<9>XVl_~>?iLkyZ4dKPr3k{G~VIzfiD%Cz^fPNJ@Dyi z;ML0};!{}Q55!ASAU<eR6G_p%qC4nN0$X3gB!;`9eV%%H^cwB z^yilO>R$4Xtp9VCDv!1v{6yaqHs83C_NL^1=G423JOYA8$P2fUq3k~MYioXmf1h{J zM|hSobBZ^1QuYm|^-L{kTSH-a?T9k&^7)+vbNI>Tfm^;Q(aFMtW*_{$!l%`2LR!&Y ztzYeqQ@{FfUYq;@Mki#lmU|g&)@-SlVmqERw=Fyn`-Mgm70Y`Nm=w6NDWt`@mZi>` z3@-m@7Y%+*!ep~X$9b%e6R@YyglhHmTe5e+&p@Rva5?CSnY6Is+q5G2Q@!f}dzfI@ z9t=z2hbnae0jofOdo`Dk7XGu=uZG8|^1m2qjePn?hH0=f)R=%`-f!hAb3(h4y+7lK zleEu>A_EPl-16VS)Q^36z~zy4(a2jI5Bf*wO}8t_U`sc>usX-y@gD4l{aTJ!=pV_U zUeefj{LnR@dM7x#V_P8lv$EvjL0S>%fqLm1S#bG)E80RHC!Y5poYc286!J{g6!}BN z@`L8WFpE5fINk$U{5=Jc3j>;FEjiBn=ywTX{KRb5B8DrCm3V?zX2S-S~Pngo6v zkjeUMf(Mv7fg`?jDDzyU-GmMpS_KTv43oYv6j`*Fe)X}UYyP`gG#Z2<5x|g*3q#Z| z3<0jr_6pRCNh@Oh*I9pSitqDQ{#7xnb80U zw;oc%q2m>BR2Sv)yC5izDo9{UvB26dz@^#Lk#FLFr!9fC1sWb!;K_xHsKfpn(@-Tv z#AWvLGl3rQTY8fV)WC$VfFWD?)M}h_%igHeSqU|1NOYu z8ENa@RTy=SCtN8b(?2rb&lPPC6BfzShAIKfl9K|bpY0l%Ih~y*IGUZ5Mfdlil}76+ zoz;o&F9xsaCVYdHMSp&9tP)7B771eqK@$`yqJ zwZ6B1l5)G5cY6wQ(9s#`SLObu_%VF!=nn6)#A>Ft>{(rlmNX2i%&0?BS`tQhtaeZp zur5Luz6wKFU?V*WKJsRr0%aI|Pq9*pPxOV-K9Le53>Q>A($yINZr-;@a~k=cSK6)O;(KhP4;S2Ks2 zf%#`3>vBL2$_@r5{}~`=f4Er!x|9^*cHq9{UMq8OT2WF2%qUOE*>i{;3~0a+{iOF= zgxN0E(?FSo~`5Qra*#cY~$hegz58fzX$s6BNT! zClhf&kyRbgMfx1EE_|@fO>48p2wHja;%2S}asz5W8&C4=yk!4~GME@wT?FJK6pN58 zMQFy;x*P=RS^<1)F{CQq^buI;7RrITNa#xoWIRO3xF@oj07wHZK&stq%}{EIIV+H; z5CX+)72Q#5-+}vQ4WQTt+xre%u4^%f zsM7PRV9Ikl}R%0*%}1d0qdm9$>Iw(BQz!90|%96p`rl`EjS}Wa2qg*C@R}E)>T3o z5EO@Hj#uiLBB`#Q(gcGwIWyqpYR=5NXVmak$jNuC%YT3_0L$NhWWEo6>zL`IjQ@HR zTBO+r>1KwnjpVniCx(WNZ6c7A9qI;zam>c^L~mzKvop_CXmsg_j^|a}2zIfS z2BO5T1B&|{NV>j!*GvrC3EQ;{$8p5&YYy|h4mv60#Qrj~f0J0c{fEkDzOUkC5u`U{ znUjf7jCV#+-EDEzt}A;N-lg>`2<37k9v4IhItgwc4`S09W9qa8_TtBVWMq>6AfAou z+@TjgkjE#=%`oo1yeIupe$DIir2srp|7_`%9Hg~nlLp?EdXtv+00$i^k> z(6jWgeWm!4>f;(tjJ0sN3p!xw;=B`2Y#1EImaLN5EY#YmC`-u|S;3U0Z12Lspt9dX zC+PwsejP=1?GMkXknfO}VYXE2wVi)WR>0|s6)vYG`hHwE&lUzbA0rKV3t63KbH6Kl zLlc7@VL59{O5okHha3zCx_ocKdwJT|>wB=cAO{iQC#mA)(izy;SxG7g)ANq)kQ=LC z)W#r@=d=ZVes4N%0%=hVJSkI??VZ_oAud)me*Z{?b}CTgB5-7k!kM%9h$argY04{5 z%>CDKo=4G?-{65Z2}3JKOf+XuFKp+PeB73jPV(XB6G@YUnz7R$LNGNdBr2nLprq8~ zwD;Fu;Mp9RWOb0@&L}3Piawzeov)#`Q=Xvz94ZLLU$eh#*xu;hc)c8!|CJ#G_fkB{ z=iZI#0n_NuP8AwQ|4ge^cn?dLszh#zy4K_#&Tmd+)`!H+E5fl)A`1U`5=H;ee?4Rl zup3EqjT2Nxb@PIz;|-#}{8d8!L9O@t6T0rigUNjY+Q-VLuMl&2dG>$S@7$r1(8%J| z)Mwok{G}FjCr76GTF$W@`gZbML`3ByzlpQ@*P^+~NyhUJfVS6ZEvXRmW2-Za3Up}o z2yg?x2dew>X&vr>`Ew3iKOQsXC6>D&f=c7l4k2-|H06Cx@+R`ML<*$3i{LX?(WfV9 z-@$%G^27xbOso6^%g3)6Aeqa-qceLK@6Lnt1Vw*mw0B02z8K41)%f=Aecq4R&6?MO?@H zt#8fonT-}%K0jKwfp?2HHJ(yoZ2o_qW%%bOn?Fa;U+vBQ^YP+&v;W#czC6U?zrT*I z{=F%n&GJ49GhgE1m{eFqcx*_k*iPWhpmZkv*T2M+%Am8$JnYtt5 z(c>q5&v#<>GUhRuD`$k@wkHff+>#zduj{>F%3Qk1u^E`+gEF3GdGP!HZSnT!JMh0% R`h(d2t?>V!QQ{7-{|8DPdy4=7 literal 0 HcmV?d00001 diff --git a/docs/en/07-develop/07-tmq.mdx b/docs/en/07-develop/07-tmq.mdx index ceeea64fca..17b3f5caa0 100644 --- a/docs/en/07-develop/07-tmq.mdx +++ b/docs/en/07-develop/07-tmq.mdx @@ -16,7 +16,7 @@ import CDemo from "./_sub_c.mdx"; TDengine provides data subscription and consumption interfaces similar to message queue products. These interfaces make it easier for applications to obtain data written to TDengine either in real time and to process data in the order that events occurred. This simplifies your time-series data processing systems and reduces your costs because it is no longer necessary to deploy a message queue product such as Kafka. -To use TDengine data subscription, you define topics like in Kafka. However, a topic in TDengine is based on query conditions for an existing supertable, standard table, or subtable - in other words, a SELECT statement. You can use SQL to filter data by tag, table name, column, or expression and then perform a scalar function or user-defined function on the data. Aggregate functions are not supported. This gives TDengine data subscription more flexibility than similar products. The granularity of data can be controlled on demand by applications, while filtering and preprocessing are handled by TDengine instead of the application layer. This implementation reduces the amount of data transmitted and the complexity of applications. +To use TDengine data subscription, you define topics like in Kafka. However, a topic in TDengine is based on query conditions for an existing supertable, table, or subtable - in other words, a SELECT statement. You can use SQL to filter data by tag, table name, column, or expression and then perform a scalar function or user-defined function on the data. Aggregate functions are not supported. This gives TDengine data subscription more flexibility than similar products. The granularity of data can be controlled on demand by applications, while filtering and preprocessing are handled by TDengine instead of the application layer. This implementation reduces the amount of data transmitted and the complexity of applications. By subscribing to a topic, a consumer can obtain the latest data in that topic in real time. Multiple consumers can be formed into a consumer group that consumes messages together. Consumer groups enable faster speed through multi-threaded, distributed data consumption. Note that consumers in different groups that are subscribed to the same topic do not consume messages together. A single consumer can subscribe to multiple topics. If the data in a supertable is sharded across multiple vnodes, consumer groups can consume it much more efficiently than single consumers. TDengine also includes an acknowledgement mechanism that ensures at-least-once delivery in complicated environments where machines may crash or restart. diff --git a/docs/zh/04-concept/index.md b/docs/zh/04-concept/index.md index c6d83ce4c3..a433e064a1 100644 --- a/docs/zh/04-concept/index.md +++ b/docs/zh/04-concept/index.md @@ -131,6 +131,7 @@ TDengine 建议用数据采集点的名字(如上表中的 D1001)来做表 对于复杂的设备,比如汽车,它有多个数据采集点,那么就需要为一台汽车建立多张表。 + ## 超级表 (STable) 由于一个数据采集点一张表,导致表的数量巨增,难以管理,而且应用经常需要做采集点之间的聚合操作,聚合的操作也变得复杂起来。为解决这个问题,TDengine 引入超级表(Super Table,简称为 STable)的概念。 @@ -158,6 +159,8 @@ TDengine 建议用数据采集点的名字(如上表中的 D1001)来做表 TDengine系统建议给一个数据采集点建表,需要通过超级表建表,而不是建普通表。在智能电表的示例中,我们可以通过超级表meters创建子表d1001, d1002, d1003, d1004等。 +为了更好地理解超级与子表的关系,可以参考 [智能电表数据模型示意图](supertable.webp) + ## 库 (database) 库是指一组表的集合。TDengine 容许一个运行实例有多个库,而且每个库可以配置不同的存储策略。不同类型的数据采集点往往具有不同的数据特征,包括数据采集频率的高低,数据保留时间的长短,副本的数目,数据块的大小,是否允许更新数据等等。为了在各种场景下 TDengine 都能最大效率的工作,TDengine 建议将不同数据特征的超级表创建在不同的库里。 diff --git a/docs/zh/04-concept/supertable.webp b/docs/zh/04-concept/supertable.webp new file mode 100644 index 0000000000000000000000000000000000000000..764b8f3de7ee92a103b2fcd0e75c03773af5ee37 GIT binary patch literal 33420 zcmb@tV{~R+*De^_NyWBp+eXEzIH}mSZCjP3V%xT@ifya!dfw;lug^I>M*ry_dyHg{ zdre#uYtFUjQkD`I4+;PRQWq0e{I19;>jwCnL4CXt_=|#6^7_l|3{1A?oubK;M97?U zG#*X!U30pP1Bmzs$o+Wq7zh8#1&96`#N^NV6kPrlvHH=a*~Js^-rBlj3bnHPe8ese zBXeONV>1$MGB1f@>11hzd$-qBUZldIT;Vh)(#%N zGc`O(_XoYUPWMaXdzx8QB_(?OR>^P0h04xu_c<72{q1GA%hbAOwR=mO3OHxlU2dm} zT&kBzMxNIKe=u$Yt=6$yXPZXlafW|ZJGB@!_v44ytr?n%^JE#qi3k{)dQiggO&A{j zmQ72y=$X*4N>**2x@%KQ(4jl&x(xgD^fQ)8AeBl8yPIkN`-F^yHUE`;`JR+}wbn!{ zl|V}Ki5vF}PsnQ^iHQ<3K%O_@TEblI9Tq;dYrx$wN&I>RoIna#KU(nVYK9(G7UuRv zQA*+`m(FMKRk|kqgj0jkPZ3cNpRg|v#H6NZQ@J1?iCZr8Y)DPG91EB%jCiC z)99=AYsL3m;E~`h_G9?}&wuyc{||C1Cxb?*<@~B!IH~29hQU*L=NxNUUtox)>)Zkt z^cT^){xS5w8Ox?1yBavSK^~@jZT`Q6y8Ex#Qed%8a7&B12Zv6Uho^#Sw)=-pP-OO( z?XeqJYE8ZK`Z<$XyzWHYRn;D3&K^?vC2>>jEF}7{J2#e%G#KSjyunvfLA0@VTE#RL zPtdvLi#((_}|>$}K@Q^2^OpQk_}E*7yZl z2<(Xx+TQ%bl8T+d6s8yC+q50P-fVhxR!P0~Kh%@sYo7<_g$iDDZVQH_gbLih-{yZw z3e;O~JELIDe*NTe2%K2V=AmzqIc?R|{*oe1bo#OWnMo>Xa}CPm>8jWJ(Rd~p;vahw zckW8X@x$ecHyeE+`iNAQ>UWiPt@53thTQ#b`8UfL^_B01u11t%HbIzs?U$Ixo>H|61-TqZD<{ zszGp$qAq*&RvwDDx9WX|7>@6Q9wo0UYP!ivHo3L2r7*hE1*^6xIKE5$Xx}@r8={@_ z)>b%|5YK7JYgFAJ`}C8+b8^BKueHPcq8vk!f6HvLcBo({I}PLG(YF{v&(!8lxm27V=9`~#$MdwdODoDjS4iD1p@d3RUlSYg#3N4N(b#m)+xPv?;&#+ZwqXjESWZe(!WI;d!S|Yb%bnPGZyMm(x zvDxxzD-agD{yE9lGhCg9DfVnbjsjp*W}Hm?XQ3~N(B{sYTM5Tq_>a|*Zt)Ji*w7Ht z%FC)&rjk0dikc_6@OQOotF-eLDHQHM*%;>BQK*gTv-cH{3%P7*?YoFG&vqLpJsi0DngjDY0)-4dAPgwsF)&tx8 z5ir=96JTpe{55mjU=`(p#1FZsHmWm9fzXFLs!nNQ`x1YXfTcPvA#-ZZXdlS)6o(r- zVov^o9IqtG7-|9lp+^)TBDoAT9xVFPYxmrdsz}Hw&n3DeQ;h#5WrAVC;@T|ewpjY* zbNtIrE%4$kNrEu8?1ll*Y{{@QstI_ub5;7kM4I}l*m4KKBRzGav9?(iKh0Bi`7QFa zowH&odg!oZh{DM>Z(ahP1LwP^@D`s&hg(ySEqZ#~;2~`6GWof+q^mOiA;iVe2K|ia z{RA25+1a+`J~j@7bH>juT^X`$rtGm&t%J7-RND)sE$N^d=#R5ek{iPggjxudCQ0hp z$jrRI&CvTt57I-HMbKLyPJ%IXWEMDb26I=T2@UNW#l85zj1{wmh>(#f!Y^FZ!)Aat z+T8PAE3PG1KUh0Yh=Y02J3^WWV|Q5L5JryeWKagMc?0Jo#V=!n*c|!@0T_aS{KJfO zG+LnD4b3;T8=3U-h|ZYom*{8VZJ~D2$eLNvvIGsZVT9M`9&O88Wa&plAV*ZUlok|= zgd5wx%z?;fH#a!B8d`2+h5oc4tQ;Cqwgg6K<3ggI*m-r(YNB&`!H3*#k*Z-jVozKy z^!9Eb#TkgV0+wBDNM-?<`8Y6h;)dgok`p94iSTU`GeR0=6*U^8VG?rNx#`_DikQVT zeQrpD9)|dVv5bXr0m&A;wcc<%$I=A$*UCx8ylBv$_+P{L^Vpu2v_AzDmeb{;Gpd4Q zH^TqI37o;yr~-BtA^c&pplS?L)$(z9Gz6JICA7x*G4{@S4|z#Rl~X0ZPBIN2`~(i% zC7|Rz+P*T89^>!wxC$%+9R6BE^uffp!7Sxn%OE&J4S#4;GYa{|x$}T?C{CAn51NOv zX@=0UXB@{)4pnv2`D%&mo)*_(R+^q0K{M{j(?%0v4S^tJa zZZ=$jLVya&&dB_w+trVX%bo2py?s}is&|8SQXdn$m>;_wig{a*IC(!+e9&Akb9IQ& z8V-`d;;dhQo_p(Vd^hrf+8&zn6iBqgW3pB|1BO_~QoPO4+rphF{Rn942w0^AA7On) zh2tRxW%@Z@&miYlJK5VAG>Xn`h3e5^1T~0D;gEeVJgWbS^Jm}2ulb%|pIGcZCzfrZ z)a_sxINsn3=MvIVM72U|X+aZt49&!ZKUqHedeX*Ikz_--^be3I^Rm`<-ia2Fm0QQH zVmTnIABzJCUJx>_C9ho9-N29!JJf{p{p!lF-vFtZ@cFjwJ-AirB-yZOVPY@`txl9$ zyGPXU>g;BUg|WMxOK5EcgSf#0m;FqYmG3zW!kfBzSSjpEwne{{pmv%$1ICi7ptk1T zMj|jAhxPX@#-aGC=E}I8ERjr+gGOnUhwMy~eZ6mGRsLsnjI^+8P@JoB&Jv5KBvfDL z>@Tv&2=kG-r$e^H92%G%`;BQYN3}VRw)wT>+sv_1=Y>qTJPO%Kt zTZ(MP%eq{F==_G{MvKAwd9XGE`0Sf=^z1$9DD)S%*XF;q=^n5q?F&EUL$&Kyyuc?f ze#h&R*`W1*>(|`%|GRi?{(ly-xiFTGzvu51Eo>Vg*DgEeVj12Lw;u(z%G5N^vNP`f zUB(F@^Y>Z#Ps#c6w-)|y0n7Q-dI6}9j|3ib^OlS^CPp;#=2Pwu^XF5NhA~lz9Hyri zid3+)toLKY^sx&Y3~W-$q)M4-aZumxCXlNImI}X?XIHdso(_%-uv1d_dZy}s%6yH) zB0PQ`FDqNF_Pnh5Lq90dsU*sqXrLdyw}Z!r(K~AF`U%dZ`~-2zIna8=G(4RtoVQ%9 zd0CTaWwbXf$dw$YNuz;;06w1vgCJH6DUmXFDbl`aIz-XHF6+7}VPZE%9fZw3fNGzO;}8Vx_cVujK+m+Cc++W%68_aJfV z9^(a`cTk5y^{Vdny7`e7mX@T6JZ=U8icBdoPs+-pTh8I7wR?ro?pGCuk>_l_Bgm!ptN0VjsezK3-tB#W3k%kg1efo zH6p$CC$OYIX#!{CZNCkhrRyaJ%}{vu$0yPIW+=}~^wb0O*0RyS>HMD7W6ZcQ|=O#ePQ zq*J<$qd(Jo`mNQMdj#|!!&b_QD3j@A*LBaxyuPo;4k0HD43jK{j5iduIzAj$+5xdE zYOrak`V@L$L2<=L+zk0W58&?a@0-K6ZVQW!X)t15ynPVx8kwb+lT|~70ajwr_&^}Q zz>borrb7ku(qkhV!g9{Fu^n5H5{UH%^sX9G$DY|O(ZqKc!#37==9Kn2A6~fST1=sH z=<^IC z@{YP+wZIA%=2_Oh-Ff!8F1E^H^8y;Nc+{)98}lrUqv!zp3CJ4p_=f}ws#0_y{dVq| zNhUrtITIfi8d3Ti%|vdvKeY;YY1lsJ=(SftL{%1T!%S3^I;Q6iGlkg8i^FWRh5QsA z;>Hso=Bw-IL96L;Yp@*zKo-dz1F%Z3M=@h_>t`MQI}g!E`t58%-9sv>N8nzMGZ4|* znPrcr+XaO?;ta6#XTeP96@!WVQzWysRUv|jM`{O~(7tU+Go^JSilt)TQcR-;__Gj| zpM`!!E#GO2b<$aaKgybTLq$Uij{Fya&e}ftB~JhTaGSUo^5YjAb2K$6m;U#*8kcuh zI@uLumVM;24jV<#=igsl!Zh>B@^nCLE+@NwAO}oJ0vX?k)j6r5PQrL-=U1@SIEISy z?9yvH3~yDX$4*&Ec!eLM2J*mX8mMBSB+%H})n7(LyO6#W6*n)r?;$O5gdD8PIeV9j zt4V&k4ZveJv#VT8Hi^;nK2@m)(2Z;cbf53;=@0*@Ojsm1U9H1ZoDp-k?PnqT_%)|) zI7;)Yym=vIhS>>SS@uOeAN8%p>Uo(!KpXzn5J>ni5o+6%PzMV=2EK>Mje84zZgzmz zw*XSH$ineBBzA+*mw5px23G4r&3=H}HW|_m8Vqw9Nz(Bj{kmKj46#M>;?VpX#-3M_WOz_#BwZDzn^^fQ%hy2-xQ~oepaU!LbhYZjZC8u@_ZarY z6qhZh{`X_w2$7Eu4&KwE&LIholP_Y6eeSEnUJvTWKbN|+OmqheNzZGmu$e@$SMu_! zTZ+_arr*PKr1TrhZRwsty1>g`NQdE@f}}4uNzUw+LjyuS^~k}4$D(yXRB3w2q?Rvg^8-?zwK0#T*G@ zC`%Tve!+wj#7YN>S5E49)>|v-+wqhVIL52KD76O8j}O~On#pA;a;m8^O7tA5U1qRk zu1dtON^5x`U>0^(${DT{Uk^B3KpuPp>!>WAa*?iHZwa;U>L(f9O+O{s(6UaCN_43zYP$we{DVQ+0NJ- zL<pHqAkva=Zr7@MKih1GVxG;d@A?G`9L4c6_x^+`Qlp>RbA| zIvp5MoInd-{Yax+cx||FDQ~AS-O;v*&Jy}^{~ervW}2sz?Pipce1KLGCIR+;1sR%IIFh2$~-G?MVDr zU$|$6=Wg<}qs)xh+XOAuE)BYlCpQtyraM_C>Vy5HU*C#%y~PF?>gr6GrG77-DWN?9c2|ME`mX%?TeVQ(CRNRX28x6UQqEEU|gjxm@e*o}) zkuRLEt27s1(D6rgoC#zh;hIF)VDs*yqFVZ>g>Yw#in_7oagR5@*eKH=uO?%} zsd_hsHp<4AA;IXP5#toHbio}5uVxp-8IPnLV|l8%Y~}0JOeTag6R7j z_&hK+2((atFBkN}fheBgZ;Ut053-n|1<6Z>`b7 ziNp{i{502-#wm4ZP^>oux97=E2FI=au-dQZD|x%Ub?j0LbN3E9p+ofv$*L3Vf&#Rtl(AwGmCb@ z={QXEf^P+t3~>L&DHhcq} zT0&C1k0uYF0@WjPW|!$oX>X2!1~#$GiU7lX4{PdTR1I#3m8cb}<#n=UXG5plOCL)9 zmUWgE%Mi!;TRkwnw8%2Ss4`>>IFJtHhghCcTbD~;=*L^m{GVA-USBEGE`k@G&b3Wnu9$68F0Z-j>sLYUX}F?MfyrscM#uQEbbsxH zMr`WQHP*^JK__y^M)wWJiCT3B=$ICRODz9dSc7UN$6C@T(`$WM`hn8I~`X_j?ypE-zCp2?3n4AqM52CLkG0+V8h3z)9K$ig* zU!q3<@kRylL6os|CW-u?r~6_gsOvWX2?zEA$k@zNfMgyQl#(oV4{KgRs|IG9GU1ur z65^-W$HxnU2iRTgeLz7DTvo`)wsSxVd}`}KfEm~jyd z#Z6Fr>oO;t7F5UHaEKOsuSjbJ=eGB7rLsdXESZU&UzBLo&H<)R;q{Gs31ri;Nga=l z05dNz-9aXJk+PIXQX)&o1c z=WeQB{4n?rggPANj4T(EpMzN5w3E8@0=WkHkHc!sGb2QLYu z0Whl$nA&rx!gv?uRS1lqCXom3^m#=?7o2Kv(lmahQ13(=hbOuO_?^US`F^|kv!|z! z_ma$cmS+zJ0vkMT_RjDD$yGA)MCbGpyVeAQON=kWvHPT`?An`neloU*NSo7`}J@T5)%gzGK(EhrH3%iqs zYGA_eBG8cFV+-E;>bX~C#cwG5o&ykr%okffHQw(=BLi9=dt#d@JC#b{tf|s)yc&SK zv_M(N#stpWL62N%Pjj?^zJ)`f3e68Mchi3E(EA6Q*0Cf)XweKd<3Y_KHKdc6UbYL$ z@R-`bm~?g%B&-2RUOn$ds2SzLKw@)S{D4p!tUasZWH;eA4_(9gSM#v7kOvZcw23E# zOvqNNeR0WRWC5%s2x2yro4|0lCCo+xuWFb8U}uQvO-plr2uI`1kOMwJp-N*pne;zK z97xB!U%P%Itn_c&GEdJv}|37s!RME<9>W*uT| z@vuKJugM+hdpGXn;`q5DYYs?uP*;&QzqOeaF#ByiDBHcNY~#0g7&^%1i`pp^1`ub% zWrP^ajR`k8gMk?!ya5l#!m_mXw#k>0??&ke;s zD@K#HFlbV^G&UXq-n z7I4MI&oHmAYs5F0Lx3AW75YF#BtDIFNuRabneT~KRn)8Rkv1Js4+sFfDq1u1?8Fo_!ejao0;P!Y7->Xt zA6OK#IwS!3b=&=egw@Micb$E1R4vRGun%B2u+Lvih77*BC&!j&Gw(c6T`IkjZ7H_% zEx{>4UDQwteZ9ON^u_V$y{UcZe3>ZWz08)ZO@4z$6S8gTxY*LMggxmH)m0R?HiZ4< z^vr^E7cowO+|?-vvNpp7X6n=rD=%&py92e_>i9Ehr>1J*FB;T7<7|ZSu~~NK7|O0) z>avsSHIFlI@R31B6(0j5pjxO3uu_Xo>dZQMcO6fWQ+R0JHE+ZE;gKMkM;&pwO(7Dl5!#?Jy-)2HP`%o4!Z4&gjaV*W7y~@-;Ix1 zgT8En$bXVxyNUBo8^Y${q2~6+-p3j6TdIa}-kRmHyT_hav~*FxSy8UA?AEV!&rx3g zR1FTA#xEA?Uox%gTwU(+F_HSB#rxbnfA~TXs42P7<8yt98Q=1=SAO07MgX7NF6C)- zi>Lq7@%Pg)V*QYucHM;o&>?!N0{Ocr*lnCe9yKYJ@7h)|$yR0?`Bo>61)*QkZZYHq zU(P{y0;I|LMfc|8QCQkEMXih6f~e0($lkW^<5owueM~Vrn&ULDDfZ7b3TynYwHX$z ztDF`fXOn*FMkKy-M+*)8&`JgzE~R`w1&gh`Zy&D`&9iFEmWD& z)g~q{*xg-RrcR<`hx>sXMj`Xp{svR2#Rq4j0jPP}raus}!AcE<_n*@(rzRhqwLDaU<)sVGv#^@b@FZts&oeMU%Zb1{)eJFoXlisJ=xj>^we8)ze5$7m z5kG~TxR86cdC>rK3X~%zl!)?tvB-p)#$aDR4$m)Bn5d`PIzLG z0jIi->wu|-DZule(_s5gmHoq}uN(PE{m!d}a{;gQFSk;cI-l*b)&Iwo!GDf`vjE4y zf1m$MMf@Ex_-D-Es0r|n7F4E$Zj=u@CZ(_oM2q%Ci)Z#WCZzD-ceNiXXx>(iNs&uI zw$B%rRc>xB9hf59)j?2))Hhkgb7&rqD&y@JJ@JCFq z3O7?wU{&kQv$6Jl7l<1?;ES>b28v#L#EAk99mISq35bD*M_Nr~uch<8;w5}VV%TU3 zD;geOAy3SZPJdS(Bu;c8uRBtFA3I?$mq|Tfced+7H?6XyOJwi(o5r$})bIA4d@?C$ zxoqurWt9~<`U%cMiH*ns>j}gr@VAeK2o=r2f)aV=2|1$^&bFokxV##kqucIbkC%{@ zSp-PWO1zb_V}E*q*=?jY_gy&Pt^IAXK>CWm%h`p~vgG`xBY(_D0~&J~Fxvj~W=}78 zkPtG#VR5}T<>&~deL*aPiIoHqh$u~#O2_ikNzpz=JzjR(RklK@8*{63=_0gFjBN(% zsLhbjDYTTUBId+k5(Zl|J1kp;uMYv4U?={oOL~$oWVsbCdd#e|3oJ#AW1fdijv+|) zehE>etgc0^%C2zId-eFSmcTb$?%)w&g5bJrj?4`_imrmzW6x+gR^n@hGG?k=)*K$A zcb90MrPpA=#&)rRT!XVmv5NiB4^um${crED9>itEy@$`_>r%&Ji1WS!?i7k2Vz{>8 z<->-rf#gI2=I;+h>;!4pg|lnrrg9O}gm~&k{YOsUCY|RHX|+8EyP0sK#~bZO6eIQF zC$;UtY6!I2uaiRFz#Xmo?1=FRY_+%*VCa$L24ik_S5E|pm&85f%Be&|rRuPM!CB1} z5$Lh|>Ssv%iQP0Dj?p;FhwmkF3kW?x$_e_+T%So<^9!HaW22?|~baQ09F9++P)ZIOi-`;6hmxcuxIJ@oK;uSI+CxIwq|6yI&a5HzZ^m&LibIj7IqVMf$=Ni zHpu`Xa*5*|9n5e#NcL4XV4Q5W);gVQ8Q!;`I@)Pz3`r^Bw>V{g2GZA7tklJID5(lS z^Eryxa!b>UZ^ju@dkg` ztgvH-4%S$WfALi))9IlLvo-N)G}Pf`WFUH7>=&@FN>l+jfzF$f|9>^N8x(^ zt9=$}1u{VUzS|D$VM%5+GzA)O2mL$`-E%szZb9FeGem4E0dv&OoQ3m~3#`&gOP8W& zxCMDu=v9G2fY}!%FOVMwMzF4$?nHydCsY0l|CVFkj(5>Hr{0`=bn*v-{GXI5vPEv%cLF84)QE8_ha>eM&xNNEMobDMNAV7)E!wL#g?|%ET&)HVZtR5 ze=H8I(Q39AnBw||8*!U}IMvWEYBzEAb>N>ptL*Fd#2dTq_JgZV>@K2rTq_0-xb1C&x3#sN`WP%?7-_5}b5mGe5IM39c<2_&oFzfuj9|pg z$g?@DzlP|@SxCrhHjAn&K`u*?AomgmctIhz>G+-XuE+wMYLsY|ZWy_xrP*c4@wS8t zJ8{GV6l*ANr|I^bX&x+8NORfF^&S`$Kjx7_L!-YL9*Zbi67%ob2Be9DKgd|^BEY~H zgY(?yjpB>OS_V*5l0++vf^h8Hhmh^KZj9LF zYYvnLf64v{Z{Rv0-eB9Dp&p+YqVAUu@4Ko>c;2%#qJq36!|0N*$MJ6r!XR`JrOrBb zy4-vFkk%o~iCQT4k8tWs4K1qL;^bbcSN=K&0ZkQsYjC1bs#UrXC2ZJ5Jfjgb5EH8| zfd!Z>>pgGsUy~7yWr!EAp)>EUm*(-qCDU*e8u=DgS_*bnRfLi-zRCIVIfc1Pxev z3YJnc>nx(0^CPP3LgZd=`%^R6%dR=_J38%B;&=pf-WxL5QywMzRv@-x5th^*9Ha^z zeWb$a6~fum_k5p0)zN|zNJ@5NHR5%$(szGifj?;w{#PXcen(C*XvWLDOR=+!yNn># zDL)BeSG76d1FGkW_3T+jN>^VN1@KmjNB*CJ7$7G^0j==@yH+GfEPs=p{a@1R0_;-* zvO;E9=sKxWoPfyM1wcwu!{R``0I`As*Qibqwf9vO!`0(Rv zUu$Fg<0VjhV42KauTc)bA(M?-{`KR?t`fj;F6yL#6nPjRZ>U4*a+4fDS_c693*`PW zN_mM@TiFYO2-OEn^x5%%Xz2w+3osyBsH`&B zsQ5)L3q<=Az;rA~%ymA0m?OEHiOCqWx@6`O9jHK&2K9PMp-7BoMn1e@9?+{?4RKNp;yH0zW~Ts4Fi>2ZcCWq?ubeiH$I>Yde{_m5 zn-RPB-uQ4fSajIHY%WLRkTzQbE1$Ddg&%Z*vM6M_iMw zreL%3@Eihw!bin#(+@s**#R-m(??$OFI|UlBF5<=;qP zi%IEblw*NrjVtC->$O$ljjoP<&QMh`O;0suQ^s2AGB@hd-y8;`njBRH)MoFaH}+9G z&L?oG1P$qcz-t27T4jlKUD?ZpaR7+>42c}=I zJqKj2ZfoTsg-WgLFj$ta_%mvA=A`H#PtFv@sC3`TFCCx;zQZg1dW+2g*Qnf#{RjVp z-#4@p9SMu(CK^1(ela?Wk9n?^ zc)Lj!F{1%K@2Tu%O7(6Wos@m&v)E`#-rY&-%Ybrj>Pqr=p$ zDbkolL=zheU*?xQdgmcR06ssh#XINbq>ouc-}5?u$}dU`gZ<1BBseVe5?oB`Tm--4 z_)Dm+5$j1>h9-L+?KbeS32SdjSGg$|)`#BUS&gy#2g=|)xpt<1bwk4(mmI)`wwcA) zK8!En%?RJ3K|+Ej@Kit_>(Y;tQ@(S5<+Az%7}>h%Z1Y>5Hc$NOzwIVb2cL?4Jxs;& z_gIyEkd~>a`uxCj{=esO{yV?(e-4E2y_Z(*y)W}L!dP{xIDR@J9L~`%>J+FR?f#!@|F_BY|C!eLJH_)qE!^K05Fo(6P2&H&Kd?qnU&L`(qkun40_ARm6W3Oh#h&t|#V7>D^A@SX#(^wV+EB`~J)ANrKr{(G1nQ~nhoi@jvVKCDr# zjyr?|w(N;zb!=xn5w>j5kYE#XppY+a_35(7m70*>y($!*FXgRpN+6TPRdzcr1U@2- zqoEv*i!cNQGUl+dGMA-E6R>YuuDV?9ZM@ol?`8mW6$PCl9LF zmrRHIEdY0+?Wi5+Zg?eJMRoLi*JvNFeSCJZGIQ^Z@L*b4yNT(%Q7WV~#DQx8DHwZh zZ@h~fftfx*@5w*Yzo=RBBkJ#VxksJ&7t zYD-^@;ZJP$($dk;iUlEf4AsOW+gC14`}gB@vx(;EXDBKwYG#jLjp&sQ}q zEaUAMNE5B6Pg#liVrTkAzD6EjF;d&u?PDdOODyaM4CT{Cux4sIXEZs8zKvUPf}!4L z=f`Jbz^agk$~|o}JVML3UDLH!C6tth6BE;n2XDPn-VCbA&%!9*8PZgtl=NbCT;E#} zOm(Z#GCG&0%A=@8-^3$~DkGPgF(y6|;~!{9fBabL9Xd^8;2{`B?+WH^6C*T}+`~IP zZERm7Vek;Go$rD9NjpK_s4Wf|o!kmeX+^E9?P*j0B~NJe^I$3@4K@>RDgS|?@}2lx z{GNliNtUs>K##Km+dUur7@ z9w$fR;(!?}g*L(U8Nh`B)*g_(U`-4ApPDN1utfYYgxL?Mx8;$9>p6D7zZB(f{-EGi z`bLjkJ=)1VN*-fO73{n@3wT90pJdBe;a7rsQSIm+XNnRuUKk&o{Az?|CFWnjx?7H< zj{Wp{++RRRAbPHy>R!*ivG7%>CW3Ak$=p)yd``7^9U2siRdTxg!CJc?u(bE z>_Wr!_i)*lQAT2IG2XAGpVKJj2-F3z^SGY7-&#(MGv@4Q8i#U11u%Hv8+uG%@gDaD zO5d$ZM{w~Y=&ZOu6$C9_gzA3;Mk}c^NYmFcYtd8X!jrjf2+qDP|9an|U_*O*&ONR~ zm7&4Z;j|K7skVB>znjQfRX8VL?Vyaz|BI|JhSqWQon*n7j}!0>T=1rtYbZ$trGEME z7(h>ww`4NI;|k|becENGr(4XIOT3ba2J!}D5Fl*Bb*T|Mhy8f_*hM79Iu7%!x2C@b z(pHD?;2I3i|NEuI0tuX*nbyOzlkm1Y^?6*?r(P@p|M`dZHxDW6LppC&cVYs&Cz^#T zQ!O8L|K!~xW<}IZm>+h#zo8Ag=`~MQkAq@jI?QR1ody32*+xBv3C;uTjTcD^k}YrDa(GaBq5Yqi(a8QvDCx^kPX>cW;PLc2_|>&?mYjuRp3iXG!Ki$(`R6u;6DGz_)CjE5Qslzucvhn_UrM$a~Z1`lF zvkQ}Ia|0HMS zS-xonN_^Kr?eo#C=`hUTj`w1ciCCDi5x4l%FdrtC;X%?*rNXM8-@n4g3iL+ z^~kzS20So_#8j4BmBV2O$(Q6?u|qZQ^2xY;T|r8~2LG|zrvOvDH32(;66C#do>PA5 zEo7VHu@}VK>uVF6_1>r~74wT!g^5%2H+N_MKdg$#+0D&wsUt zm~7<{KL2Z+=e{0f|5L4#D0PoI$&7uimbL-7u7koL3^mu%*vo-8UhI_ZW=)LDaeUx^ zy&D!kMsvmr`3$QQjZ*ixEUO>um&oGx7V4Cu7eH z6Y%Q|t|t%rXxkA1g$zde4?pv=ppv}~iM(pU;y0*}O0g>{l+PJ#;?ylD&|9UEC6$wW z{g&%1S(GZ`_`p zK26jyAKjm3xC0*Ie+{ce?f#oe*POLT!{tx(Muz94Qd$Z|rT)Tf{BT>JHYI_SGBe-Y zJl|O z49MD?SsDMi;1n4Jsd4r;)-==g$^*Vy=jaP(kTYShydT4Qs2qd%H?Y$d%f&5F6!WNL zY1(Z<^c;jRS!`ZZd0Z;Si+3=2X+EvZ*0gC?WIr@~WX#_vV!{5g&ME`kf9a2x+Nps! z(tYsMdnN%{RObNZ%3rX?V2F*p+w&fQ58*1O zO^?NA$-^VAUfdJ0mgk~I1zoj~=t|j`Dg#QpB|`#Gn(IqVL>J#|{Ix;WyQqbGWB^9% zs>f54aLGU;!HI0d=NO?H)7Qm21+a8!-fKMF0|E<6Yq^bZ_ZFHlYNHO&hszCKRIF?{ zt3LjSB&#}G1^6BCBb3sWs416iT%AWAzEyAm}CzA=ek1Qv0z#R=UFklkr7 zqOxPv2{=T<$dAzfy4D}0|K#RZy3+=V2(shT8g0qQWn;umZsezi6y!Fw!lqW=v?w6* zAum7%skE8gur8>gs7<_SZ!cA~2QTPJ7tylseg7^(V7qtc;~}kXN-~8E@L$KCvCZfi zc!Pvoz;?iZpal7>Oz4#FJ~yJUf}g=Q%*ovyd#eZbySARuL8jHI%sOjn3o9c%NY?aA z@jj$apNLR0O9&?=G-ePHK0d2qo5CT9&ik(QzZFL zArG~W;dS_gWl&PG-tc&tun-PO3J(MP(+K7mt9)N&15_PIwL6(l zCG{&;1Whnz!{5VrVIX^H>r}cg+RaFuJY&1AK z?x<$H8E};E*2EA*;e3~>;n@Kv53??1{qS)<6D-lFNT<;o(2hjB`nY|uTZse%l#q$H zo+X%WXp1dRBg?9Mm2kI?O4lC=gr`7yFm4#!6f}#=Uk4a-IIwcmf?AR7fbS}DIZ(g} zADISMveD1CgT1i}19A;ir%8!q*RH_PEZC)jraM1wH!dLD{GorWQ^{e;Xb3)Vgq&^Y zo)E_@b&DUkiU{c_g8BLE(!zAHgud zoZ|~cU)ao6Cx8xYYC}fXv18$ey-xS8O!Kec}y~?x<3|yY7HSft^*(r zC_XE77n&%4feAcPXk?Fi#@#t;_I&j9yNWivgmD|Bf~*BUzB~X=an_pdy84#BPEHiz z&FkB2{C?w+kqx4-#ooHSlxE|8#Q&n$+`40VZM1qhpOMqcbG%s2o#g!Ez&E_+Sg2<{ z-?rS)sX0CkIrY#Q=)wE;`EE^x%~mE5>rwG_T^#z6*4uZH^eKRt)o+B~UhT3sW$T4w zcCL>xcJ;F9nr$9z4V@rk`h|WkyvHii(@DYoOD;+@!OkCW7t% zl`A4t5N=KAbOK8u$@*vspA|Npy(8&_G@;&FX)<)&JZ8Z^kQa^Y8`vu6|W2b0km| znVRPKeba`{*T7?+Uq`K?;wiA>-%p|c_R{}c&i-FW{HM|Xe|h!49|fpvP8OHHDpq0h zk)5e1bWy8nl;X~mEiU>_k!F8s(2GNIckrl3SEtg%pt^vUC*CfO&QmEqVCBBVoSq6$ zT>RV9|IdZ>|Frf0`FeXUQ0j$ZTeFPWRd;2V5Oa^ZUby+^p1^BWmEmg3Y2^%~f<8Nv zLW*dx(ys>?$YID?7=Pz5e^O$8;*Mc&ApiM1X2$}Yfwx$7ZO^dI^|I4^4QBO8Sg7}$ zYVjL%L5qcvr5My<0OHU5(?ziUTIB!%wi&C7=w2nlzIy(S+#kv5N;uj_s1syc{JSCy z!K%!B`Jg?Av|Xrm*-7>fLSAGQt29HTUwzGkp(IxyHl0I5k!pITfH^& zpoeBYtBH|`V*A-gJDrip6iR3A|2P>!>sD_f2a9UXnvgm>6?TNBc+ud>#0gWUXRiPg zI)*q2humFlhCPi}mL6^UXOuSuWY)8Y%UXE>NV|*eG!WLXi)!*<$HgV$s6#_RLrT9= zdz{Fk0<~@Rixq|S6$=IJ@ro!(aGYAphM8rH#0y7cX6*p)H~sd4Bq{ilM%zS85BgHWEsRLu&2@mFY)~R>2@e%N1k-%N9v|fZ5^6~BK>@J{Hl~xLd%EqPu(r*lF z0Xlv1Er4q}$s4OVWgIe)uIy!5igEeZ_%cSq)|7CGg7`B*Sq3t)O)9is9=-{_0X2~sS}Jx`P7wgU7mJt!SKf+@ z(|=uIQf>#RC;Wh$sA80og7c61^MwDO0C!ZnuO;ZiH`F-D#K<~^5ZQ{d?eSk|NWV=4 zh@PpsNjTrBlj3j@(5u1|HFOZ<6J6uamZe z?X$Sf=UU?`t~c_PQ24}COS@8gNO<<=GB&~oQnHn$bg^M-ciUcg%6u?*58D@c-gd*h)%C9I_Fl0mHfa<#wH2LgGlXl; z^8vfct-(SYRG+(9a}i#A2q)PgV<4&B8O1W_W4ZVCAme85w}IV?`q|~38hmTL+Kw1o zJ~J<+Vy9ZNWu4afrw?j4-kmSV=c#aWBy$LNw)%ecR>s7`_-6UwF$ZPMf%nZON7QjO z9ojWNd|9!>y&1e!J`b5}OWU>Z`#SgWLwStQ_U^Irey)}e23G`TEDdk z%CIN6xRdXf>=Q!&N0oF8pVnJ8aI`i^Ula zGoQYk6%8vx)^HMN9sMQDCcp4{5f5SEd9UM6d*v_-LX=Xg8@i`7-tqaWO9I-S(>;pJ z$#Da&Zx`r2GTOG>du4O|eod34QdKSkckvG~`iyiB_9i8!G+u1E$>YOPOYEpHqoCRN8JMD70!7Wz#dDV(BFaBo>J z%ehtT(voiSAuRH5Ck1(XdljzXx#Oh0DglE1%5x7*9d!{Az%l7MS4D@C@UV2g1aqlR zp~PsaLE=mfYf0GD22yG(;W=H&iqnI&bXU7s`I8_f%f04~pgcNB)U_`jlL!7y{9wMvF*ciP`9{aEn@Ia9rvp_vJUal2;U|ZC) zlslqDhRZ2$RZL5LJC!6XzwUOU`L#{-GGC$+mVTuC{ZIH^S{bn*7j;i%3e%+0W9A%C z?7h@SNXeNz%~wR?|p} zwUq7Az^d~5hu5vfm?mZP4z?6twyY@8Hvd-9D27m?I?W5_lV0-Av6i-5#;a$$Y zJ*0Jc)wq_E7d(3FJ%c&3?Z^7oCrN>(3^@F)`JTt#&nx>I0@Vmkn4R@y*qG}ZyYIG& z#^($8=NH+Qiw3 zL6Jd3I-IX^lThUEHg2C!T&@@b;M@|Do`s<7)m|_OY@>Ljp?drA}+7L)#>IE^ELYNC7SZGW>*aJ=Wlqd4J6jkT!R zMGH;|9h1NY9F5$}Ut?Hl9U`|m?I`q_t?uzXzk84!>r!D}e$qNUw+sK>KjLs*wCsgv zM`MlEnKfw-CIeqk&&@ARa$KxvdV#EJnI1*kUH6)aI=wf$n8M#^rYfwh+;{8^qkd7w zqN(n(j@pxbt(1@_<)y;Y_wtp~C9bR@;EV}(LzPWz4|}Xc;*`!abH_;RnQo)dX6-M>|c!OGu}r6ASOr@DZ2!=Oo^ zNj?O8@is73@8>Mdtlo`@vFW$5mw;e98@}4ac4r^>{b(YiYPe=}-Y4L_yBSaZXP^DE zZ{FV(B{EubKSS^B^lgPKZ+6nI<}t`L^DiB7J4M4zZmXncX?z#=YhDghjziQd9H_8} zOGKzJ9Vs*|-Ls$iToYm&_8~F@KdpLYY;7&WEZIKctM)-i;xXp9UJd$`CQXts+=Y60 z@CO0S94C#K4yF6aqqaD%;bPNq z5xY-QyLU|OKl&6EO^!A6_68NFWn?8FcgsrQTw-o@V}LY^=`p zpA=1+{k4hiI4U)z=T1K85uUrEbBpQ4#~Q623(Jg?H%BnuH*bEwYbY?DME^mOS1*l$ zWuak#$}7NkgXsE=kY-uZ{?RiXDZ2I{=kT-0yM%GY3XjA*-jZ#NsdhxYg1%75rA=ik z7FLFBA+HYYW|OCE!lIU|gEGwn$u<}p2OKFC8MW2phgnAa`cg5Fj?dEoIf{?6CqTO0Bf*HKp=U`}CpIF06IO_TzbT`WVPd0)cTqRvbdK}0ICPKu!S$!M(X)@%9foU2QgU{yaPATHK0A0_YD3Gi z+*>jfu-pnQdyM>sXgq8BZ?av&e8|LQL7hZnB78q<@+;A3?EY}VMcNuRUfR0 z@59T?N0cww;%|*VV?gkAm-DDO97Jt32N20pcqRR6dOp28_}Mw>5Yx1_pWj4noj;53 zP{pnIw6HWFs`wuFjW=#fk5(bXik^IC(7Nt5Yq`GSD45Y;VUjewOhMs&2IXfl@1)oM zQyhKLizAiVdy1P1ox0^!SLtgn`L$b5l1?8-c9$Hiw;NnKl|>yU7&2*QD@w zcj8fgI4A!yHe_2LQmN>Bk%_9Q$l5% z)bi_-O-EeadP}~g-xIYaL*gpYgI)WZ?n3Jby2|rQ;JW2c#xmYp>&U^&9RB$08}5*4 zwsOx$mUQ2087`n|vPCPd`IOY-7aybYs#NqGTc?p0#oVkFtZjeW?7a%gOyjLk2LAX^ zGPwpxC`mnFE;Y~@3}B}^P+EZyV__Y-h14dk47o@s5z|!6mA*M&5KaQeqLHA4zxWtQ z=V?Gq(i=KBsF%TO6SW}s!*q$|_80a7|lcNp?Gu@+or-UP=9{{)4-i-S*kML1-Z7%kpctnhrLj?ku~*kW_oaC@gW%6ZKq@4MLA zFx)B8BLo^pq4YIwIDw(yI_hHPGlNs7&^VJmU3l4tMgxtiH4p-VYoQ@j5kSFpujn#Qj zQe8D=9Jmyu2ykgJNV|xo8a7~8jw#k<-Z?|)s+x85e6mzJqH_ii0?gstpE%#AkZ<8R zZ=4^oR*7F9HyFX)5TWQ1t$KjEnET8GB-bS_jg1BdRmB&$T&2eN;kqaR$Yctpk^Af| zA307NDY`&COL||?`Z`Lx{RiqRlV4|c=Xa5Fx~=sT{LmQPUq@t8@gYLF4)D~6mtDIk zIFT@;*1{y2i-JQ#zBuA2nYC(~UBb>=(KJbaonUu?xR0c^r_WDLPp;c7;7P)peJ}hFk4dJ5Axb)>ImoLD$UpA2eU9OX#Zuxg*djCFr%)> z+zztQPl#G1t$;6ZxMEW9wdH5!I;GYC?J|Km!UzE@Q>@4z9~+#)ypJUKrutxkwlhsX zceZ>Q@uwVUS-wK|CSi>rQ!f3AGJ*(nGH6{KF>B~Wpc{1@naz7#mR2KZ9eQCZF<%MS zsT1R$Dp-q+s64S&dR3zV_JN3?!_I;0@?6y&hZ4{&cTtxCv4uyKDfqxwa_P;>Dq4&E zGZAK#d_j+>TtKYGfA|NmaJ*z;0ZWMBNeeC+djA52fJux3nsjdm2>U(7x1*qKrrn0Ijb_KGY>>K`%tP-6!KJ^|x zIpY@aI&_n=?zj$DlPpp{`SHVF;djTS$%I#fMF#NtOs*s6^`xT~t<cj04!f^r(!`#{@1#;q`_0?e8erD8-g&!B z<<9PMsVUZ#rfi|=Z++?ydVFSz`r6{A3d=gIpVg3|>heJ=ZFf&QmV>bXx>1e zEaAbwOd7b`%*2s4QK!Xl<7VYB=QQC6dG)edg_EW~CA4STh{~^@SmL|CLq_MF7Xh(q zZ+lZl4kb7u{tbd%;Kx#fmu=w1?B|)PBl2C+n{gk=!08jsf4PizG zRkWet1=4iHGl}XRy1DaMWtVyT>-^m;MN&DwlmdgVd^a^PxW(|eJ)&c-Rc*M2eajz2 zy7xL-V#Q-4fmE{Sdkyhlu%&{KPLyWt0I;R++0c>8u4dfu>`~h<;^P3nVEP`1L@;be z&n$6$!W-689#G>cj;^H!2##DL$PN%})8wi_ASiF$OKja`Q`Pa7#Ws8M2uU?Gh`sv7 z#}1^&Y@v^*hIrUVDuaruwmPv=Y%h`$@(#wYWdRuJy8L%-+pfxC6- z@$OQlqGZCz?a|x;vpq6qEkWG5u5c=x>OU4{fEWP@-KI%o=Yn77ADsD4HICaOF1I1J zi4I9XZ@Gw$;}%C(LX^SxAUi?mM{)_`-p>?sri_+-%?xXEj4Fef4PO;7>6&o*c&S%= zMQ0R*`|QxUT-vzjfb8AO$qklRySRgKG^KT9!qJy89)|e9*J~f1^r^wi9swsOGZ^!+ z0f_yhev9z6jVJ~$-H)^G2|P}072ut|g?YqAZokDSh0ke=+Ap>h1`(|?KBOMBL%-uP zbRhv-P((U67suuc&F>Z1|J!*%};_~YPMQmY_Cc`N+ zy`Kc}RoteS%auMoS5p2|Ikv@jLm(?=ex0el;_`?qY=~tz;o|Y>mPg@j?|Sg82mU}u zzIhP}A-gn51DJ%|FZRW_BhZuZ@R`XTkcQ0wq#eCYcLOraj_b%cR{L(ZfqgSO`h@x6bmG zxU=%x(j~s9S#61-l)Q4_5wbQWN)lxMBT`UP1sm$Y=eR=XY7=!OPv}n#AN!#)Y?uZl zMuo&t@=cE@w)tDeQR2()XT=9+&E1u(*Xt5=<5cwWQY4gJ8BlnVM?O)yGa&ZjC-3`| zlk-(dvdMth^taA#_nuu#zhzuDU?wn$yk=vfY=rY~JVbpAGtzQ@vkda!SLVWfTmb;% z%v**N%g!jG>$p#^wKXe!@%B@kZdXpBmHv8vBSyMZkuT9(rOC#ePyWHL($BJQbGkAH zCg(mEyIZI7QMyPwJJ!fsWDi=HR?X zJW==4lO&9M&+Z(BwEIa<%=wP2DpxA{%%RAjHdVadc5uWZ1}Pxcwn>B(biG(TpRYkGaRSdQ4Rc;5ydxWw(J1|YpL zb)Ey*5sPr+7FVI*rQgym8#8m`x!;;P@%*51>$vz9OM?c&6B`0dU6n?icf@Sb(A;b_ zKk2w(bT=y*BJH}wv0(jZ)Gx`1VynmMJ$gewW<~puIX~8e|7%uNd-3S0{R3(H$7Ro4}GKyfmMLYNTKD3=hi(sXz822X#j{e@?m1vS7*c zHb;?4RHP69|HVvlg^6Kw+>r9+74TSRuN31*6Zi*QeQvz5Q3U1@3m&oAUb{xaYzu>> zo``c8G0?Nhmv%`?i!-%Rsld)>M#F6|^m@+hyUppIv3FqgU#avNs3bI&G}4>GN{G+wbEL{|C~#hTJ)94Ze#^PzSMVZLSaHJX!3X4=C43^H zPmQDR)m)~>H6*{*F}+Cb&xv-eh~uY~IVwn{*>S{@IUZq57_MlW)EvsgKqs+i>l<7^ z^YVP0n?iJEZ z;zg=dgPtNKR~F^5q@S@Sl0u||i09%dF^$A9l6Tb#F5zlWVCZF)d3Q^{Y|0FW8p~JG z{9gRfQs_^3j@pJeq-6QK>y#hc$)KdvVS^}iZ9-UsQUcpTR1>$3YlagZE7qo(F` zm4xJ1$=Y+Z|BVC1M&`(#$Eo#sjfD+ip0#|(TmAZps1hS-fc@m0&UjWsf~Q(2KOFD!#} zaE16)_TUMqCDizaP z_fl_B6fqsZro@8y1^Y(}%Mw3T>+{OTL%KEw>_SdIe(6)ek{iW|#`lOT3*&x$ zaz-m8DmJ26ORZ3LSDF*8Yi>15^~2()6B{&%71XYiq=GQvuEc9APuUwJ^zrZ!RG7u@ z+=Calx7C>^y=RX?xm5?K-rZVbFe0S)!i&c9h$DuG3XLcx)}Dw$IG10G@>@ac__=Bs z#Z0CGjEyJ!pmkIK9usiU6Z5A5MaN+xpow1p_elR}OG58Sv_SswS%sQiM6_e`QM8U; z#>c~dj~BQ&s}}LR$i<)Yo%FKC!kuru*~N3!0$IhjG8ALBe7O(Pb=x&5hT5NXvWy_* zAe;QGC)cJ{CfBS~jiMu7uO=HCiXrKTe!>05FFG*zMIvSbzuxVwbHhX;g3xsv(NFbC9o`0MpZfeW1>@esQ%G7+}Rh4!p^nrxt z4Ib~+o1*2PrGyWZH)zgNx1iEmX**8H{ZJKY3^)^6vdb1tP{VnCj6x!>SP%;BE%IJ# zp=9ZN8dgm5=IL-6yGn6M%7kwguODG}ui(mm2>f!gO zm_5ekfvI#hKor-2UvBf_JwsjDO%1+n(Epc-37LaEQGWo1fe1Mc&1adPK}|bLML3>? zXFgCq@GgXvC2p~l@XEpje=DZafM?tm2n+2J7-nZ@WP#h?>^MbA0P}!1)vdl(?iG zuA~_vqAu4lEODMSthBgL1qmL)DM%9t={4GPibb>ZKoC~DI4V*AOraxzhkJ7U`yf7d zui~?xCOG=KH*$eP>u_{0>JeuJ4S{(1gp@F!5`pu!leB*pYv-|m)G~}I3$S5(EqWPb zWx%vv;5?iZe+Q=efM+1}f`Mrzv?vz=9kb&k-OsGd_dhfYsf!HP6aC#UXoSn-{*xy7 zhVZdK?5`V_nWMTL`@^Ox?M=`S{x_f)=k2dLZZWEk#8$I^Tr~h(1)8a!N76@20_?N_ zc1-7;PpS-5yJ=UMv4hW&NL}WOg%t{FPf25f{utAdaTK|5>8cp{a2G^0%H0;J&yQM& zjTd{hYGW;ZmPCf~g&0AH&AqMC%Xtrt9G>rW~3pB_x8lPQeOe{BxYVYPUJa6}p@xGMO*LLJ9@>r^ElM zM<+0+`5#0|$W+F|0p}XX??E~T4)$yW=*|BLcj#p#0lfc&I}p}Me*?q>ahIz#von%A z#`yCMN39Eq4Y!)7TUxMzP>`UCkIfsZ4^URk#9~o=5dne^%9lngIrEoA5LaU-6>gdI zMtkFvMNH6CqC)?H1zX+bz~MU%>a>05jN^L%<9TFq@e$11yE=D>NW z5_GfT=bw4sogO2`Curt81QGm{pf^pUE=UG0^aYo?0hjoTOP^`xYJ?E8ynh3IZy`i9 z@5y_Wy+$ba^Mt<1^y?PTqyq4$yRab%DEyEclyXs$pK{0!Yap>@IM z3GI>XdFRh$_)nb@cJ)+;p68ZP@vkLwKj42K@N;1DU`AA&?{m>weggrTHn|2K=t?ND#+#+i~%ehT2QI4?&ETiUl?65aNXjDqSiz zG|^{3&?pm~?d>Zjw<+}rE}0=>kB!A5vOmB5Z-5C*;xAPn3`_D<>MJ_L0DJCvJ#k0kaT2C#RT2^9(wO}lnRJOWBH59pjZZ2iUjwtf>OH0cUGdv4zThO0xp!E>)z-MyMGHJ-!Ie4<+d)m&PF_ zhA7dgy-R=@8Nj-`;#V%v$3Ash%;0$irmzGo#$4Dx3^rMN%!GspV{2VkF1G7g_*d_N z=#4v12tn4zV+$31W_arK2od0yVPyyc{4-PfmrJnKk$R-^Vwk^y%{ajGBCHcAszy(vgs7BWb+db@^jY?3s?>|fDjqc94zS^lHql2+jB3y$u zl~}QMk3ro;=uGd>>}cG0D)NOCrdq<+t;(x^d!JIJ1buUB@21nzttw+?7ulPYEk?h2=hszA4=g?*H3?DE6K*sHuq6SqApDLv};; zD0DTcl&22qn<|3$6BGSC7oTfqG{m!$wr;le^rNXV6&1vD7a#l*Eb~c!P0G31mh`M_ z%SUZ*R5w)DJgu72Sks@0CYhbQ4@T@+AEIsGz-#h4o_Y=%Pp!fBQ4T6Pp4KAX+X*w> z&R@J5xT_YrVeD%Y0yMVSTd;;9LpW73BoB-_p7H~=Dd;y>LO;}-rQeGBz40E1$>m4x zXr>quDmv~*pkTz~)@f_*lKa!r^a6Iwz9LE04HFf8$jSlumYJ zo9*cAq~ij2v6-|soV(5Y8*lrIWoAKVE-|tr>|S+SuBbs4MZj5e=qitE2{&eMtIAp$ z(p|HX-KAZCRXnSa7X1!}n`<9>XVl_~>?iLkyZ4dKPr3k{G~VIzfiD%Cz^fPNJ@Dyi z;ML0};!{}Q55!ASAU<eR6G_p%qC4nN0$X3gB!;`9eV%%H^cwB z^yilO>R$4Xtp9VCDv!1v{6yaqHs83C_NL^1=G423JOYA8$P2fUq3k~MYioXmf1h{J zM|hSobBZ^1QuYm|^-L{kTSH-a?T9k&^7)+vbNI>Tfm^;Q(aFMtW*_{$!l%`2LR!&Y ztzYeqQ@{FfUYq;@Mki#lmU|g&)@-SlVmqERw=Fyn`-Mgm70Y`Nm=w6NDWt`@mZi>` z3@-m@7Y%+*!ep~X$9b%e6R@YyglhHmTe5e+&p@Rva5?CSnY6Is+q5G2Q@!f}dzfI@ z9t=z2hbnae0jofOdo`Dk7XGu=uZG8|^1m2qjePn?hH0=f)R=%`-f!hAb3(h4y+7lK zleEu>A_EPl-16VS)Q^36z~zy4(a2jI5Bf*wO}8t_U`sc>usX-y@gD4l{aTJ!=pV_U zUeefj{LnR@dM7x#V_P8lv$EvjL0S>%fqLm1S#bG)E80RHC!Y5poYc286!J{g6!}BN z@`L8WFpE5fINk$U{5=Jc3j>;FEjiBn=ywTX{KRb5B8DrCm3V?zX2S-S~Pngo6v zkjeUMf(Mv7fg`?jDDzyU-GmMpS_KTv43oYv6j`*Fe)X}UYyP`gG#Z2<5x|g*3q#Z| z3<0jr_6pRCNh@Oh*I9pSitqDQ{#7xnb80U zw;oc%q2m>BR2Sv)yC5izDo9{UvB26dz@^#Lk#FLFr!9fC1sWb!;K_xHsKfpn(@-Tv z#AWvLGl3rQTY8fV)WC$VfFWD?)M}h_%igHeSqU|1NOYu z8ENa@RTy=SCtN8b(?2rb&lPPC6BfzShAIKfl9K|bpY0l%Ih~y*IGUZ5Mfdlil}76+ zoz;o&F9xsaCVYdHMSp&9tP)7B771eqK@$`yqJ zwZ6B1l5)G5cY6wQ(9s#`SLObu_%VF!=nn6)#A>Ft>{(rlmNX2i%&0?BS`tQhtaeZp zur5Luz6wKFU?V*WKJsRr0%aI|Pq9*pPxOV-K9Le53>Q>A($yINZr-;@a~k=cSK6)O;(KhP4;S2Ks2 zf%#`3>vBL2$_@r5{}~`=f4Er!x|9^*cHq9{UMq8OT2WF2%qUOE*>i{;3~0a+{iOF= zgxN0E(?FSo~`5Qra*#cY~$hegz58fzX$s6BNT! zClhf&kyRbgMfx1EE_|@fO>48p2wHja;%2S}asz5W8&C4=yk!4~GME@wT?FJK6pN58 zMQFy;x*P=RS^<1)F{CQq^buI;7RrITNa#xoWIRO3xF@oj07wHZK&stq%}{EIIV+H; z5CX+)72Q#5-+}vQ4WQTt+xre%u4^%f zsM7PRV9Ikl}R%0*%}1d0qdm9$>Iw(BQz!90|%96p`rl`EjS}Wa2qg*C@R}E)>T3o z5EO@Hj#uiLBB`#Q(gcGwIWyqpYR=5NXVmak$jNuC%YT3_0L$NhWWEo6>zL`IjQ@HR zTBO+r>1KwnjpVniCx(WNZ6c7A9qI;zam>c^L~mzKvop_CXmsg_j^|a}2zIfS z2BO5T1B&|{NV>j!*GvrC3EQ;{$8p5&YYy|h4mv60#Qrj~f0J0c{fEkDzOUkC5u`U{ znUjf7jCV#+-EDEzt}A;N-lg>`2<37k9v4IhItgwc4`S09W9qa8_TtBVWMq>6AfAou z+@TjgkjE#=%`oo1yeIupe$DIir2srp|7_`%9Hg~nlLp?EdXtv+00$i^k> z(6jWgeWm!4>f;(tjJ0sN3p!xw;=B`2Y#1EImaLN5EY#YmC`-u|S;3U0Z12Lspt9dX zC+PwsejP=1?GMkXknfO}VYXE2wVi)WR>0|s6)vYG`hHwE&lUzbA0rKV3t63KbH6Kl zLlc7@VL59{O5okHha3zCx_ocKdwJT|>wB=cAO{iQC#mA)(izy;SxG7g)ANq)kQ=LC z)W#r@=d=ZVes4N%0%=hVJSkI??VZ_oAud)me*Z{?b}CTgB5-7k!kM%9h$argY04{5 z%>CDKo=4G?-{65Z2}3JKOf+XuFKp+PeB73jPV(XB6G@YUnz7R$LNGNdBr2nLprq8~ zwD;Fu;Mp9RWOb0@&L}3Piawzeov)#`Q=Xvz94ZLLU$eh#*xu;hc)c8!|CJ#G_fkB{ z=iZI#0n_NuP8AwQ|4ge^cn?dLszh#zy4K_#&Tmd+)`!H+E5fl)A`1U`5=H;ee?4Rl zup3EqjT2Nxb@PIz;|-#}{8d8!L9O@t6T0rigUNjY+Q-VLuMl&2dG>$S@7$r1(8%J| z)Mwok{G}FjCr76GTF$W@`gZbML`3ByzlpQ@*P^+~NyhUJfVS6ZEvXRmW2-Za3Up}o z2yg?x2dew>X&vr>`Ew3iKOQsXC6>D&f=c7l4k2-|H06Cx@+R`ML<*$3i{LX?(WfV9 z-@$%G^27xbOso6^%g3)6Aeqa-qceLK@6Lnt1Vw*mw0B02z8K41)%f=Aecq4R&6?MO?@H zt#8fonT-}%K0jKwfp?2HHJ(yoZ2o_qW%%bOn?Fa;U+vBQ^YP+&v;W#czC6U?zrT*I z{=F%n&GJ49GhgE1m{eFqcx*_k*iPWhpmZkv*T2M+%Am8$JnYtt5 z(c>q5&v#<>GUhRuD`$k@wkHff+>#zduj{>F%3Qk1u^E`+gEF3GdGP!HZSnT!JMh0% R`h(d2t?>V!QQ{7-{|8DPdy4=7 literal 0 HcmV?d00001 From b5018e8712b22c2c50a68093142651911d435de2 Mon Sep 17 00:00:00 2001 From: gccgdb1234 Date: Thu, 25 Aug 2022 12:59:02 +0800 Subject: [PATCH 63/79] doc: add data model diagram in concept page --- docs/en/04-concept/index.md | 2 +- docs/zh/04-concept/index.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/04-concept/index.md b/docs/en/04-concept/index.md index 1f76a66d1a..28548da8f9 100644 --- a/docs/en/04-concept/index.md +++ b/docs/en/04-concept/index.md @@ -158,7 +158,7 @@ Queries can be executed on both a table (subtable) and a STable. For a query on In TDengine, it is recommended to use a subtable instead of a regular table for a DCP. In the meters example, we can create subtables like d1001, d1002, d1003, and d1004 under super table meters. -To better understand the data model using super table and subtable, please refer to [Meters Data Model Diagram](supertable.webp) +To better understand the data model using super table and subtable, please refer to ![Meters Data Model Diagram](./supertable.webp) ## Database diff --git a/docs/zh/04-concept/index.md b/docs/zh/04-concept/index.md index a433e064a1..57f1e7d602 100644 --- a/docs/zh/04-concept/index.md +++ b/docs/zh/04-concept/index.md @@ -159,7 +159,7 @@ TDengine 建议用数据采集点的名字(如上表中的 D1001)来做表 TDengine系统建议给一个数据采集点建表,需要通过超级表建表,而不是建普通表。在智能电表的示例中,我们可以通过超级表meters创建子表d1001, d1002, d1003, d1004等。 -为了更好地理解超级与子表的关系,可以参考 [智能电表数据模型示意图](supertable.webp) +为了更好地理解超级与子表的关系,可以参考 ![智能电表数据模型示意图](./supertable.webp) ## 库 (database) From 87410e5a39d3b5659fd689a722d06ead32def805 Mon Sep 17 00:00:00 2001 From: wade zhang <95411902+gccgdb1234@users.noreply.github.com> Date: Thu, 25 Aug 2022 13:02:27 +0800 Subject: [PATCH 64/79] Update index.md --- docs/zh/04-concept/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/04-concept/index.md b/docs/zh/04-concept/index.md index 57f1e7d602..8c1c1bbf17 100644 --- a/docs/zh/04-concept/index.md +++ b/docs/zh/04-concept/index.md @@ -159,7 +159,7 @@ TDengine 建议用数据采集点的名字(如上表中的 D1001)来做表 TDengine系统建议给一个数据采集点建表,需要通过超级表建表,而不是建普通表。在智能电表的示例中,我们可以通过超级表meters创建子表d1001, d1002, d1003, d1004等。 -为了更好地理解超级与子表的关系,可以参考 ![智能电表数据模型示意图](./supertable.webp) +为了更好地理解超级与子表的关系,可以参考下图 ![智能电表数据模型示意图](./supertable.webp) ## 库 (database) From ec2de0a5969681815c2077228faaee1d2b89a9e1 Mon Sep 17 00:00:00 2001 From: wade zhang <95411902+gccgdb1234@users.noreply.github.com> Date: Thu, 25 Aug 2022 13:03:31 +0800 Subject: [PATCH 65/79] Update index.md --- docs/en/04-concept/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/04-concept/index.md b/docs/en/04-concept/index.md index 28548da8f9..efb9f1079b 100644 --- a/docs/en/04-concept/index.md +++ b/docs/en/04-concept/index.md @@ -158,7 +158,7 @@ Queries can be executed on both a table (subtable) and a STable. For a query on In TDengine, it is recommended to use a subtable instead of a regular table for a DCP. In the meters example, we can create subtables like d1001, d1002, d1003, and d1004 under super table meters. -To better understand the data model using super table and subtable, please refer to ![Meters Data Model Diagram](./supertable.webp) +To better understand the data model using super table and subtable, please refer to the diagram below which demonstrates the data model of meters example. ![Meters Data Model Diagram](./supertable.webp) ## Database From efa13ee71bd13424db16838083d7c5ea9db78cc9 Mon Sep 17 00:00:00 2001 From: wade zhang <95411902+gccgdb1234@users.noreply.github.com> Date: Thu, 25 Aug 2022 13:04:16 +0800 Subject: [PATCH 66/79] Update index.md --- docs/zh/04-concept/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/04-concept/index.md b/docs/zh/04-concept/index.md index 8c1c1bbf17..9493a62457 100644 --- a/docs/zh/04-concept/index.md +++ b/docs/zh/04-concept/index.md @@ -159,7 +159,7 @@ TDengine 建议用数据采集点的名字(如上表中的 D1001)来做表 TDengine系统建议给一个数据采集点建表,需要通过超级表建表,而不是建普通表。在智能电表的示例中,我们可以通过超级表meters创建子表d1001, d1002, d1003, d1004等。 -为了更好地理解超级与子表的关系,可以参考下图 ![智能电表数据模型示意图](./supertable.webp) +为了更好地理解超级与子表的关系,可以参考下面关于智能电表数据模型的示意图。 ![智能电表数据模型示意图](./supertable.webp) ## 库 (database) From 667917787e24baee4b6e08e7f081e7e6a47d7a95 Mon Sep 17 00:00:00 2001 From: gccgdb1234 Date: Thu, 25 Aug 2022 13:29:13 +0800 Subject: [PATCH 67/79] doc: remove improper output from apt-get remove --- docs/en/13-operation/01-pkg-install.md | 3 --- docs/zh/17-operation/01-pkg-install.md | 3 --- 2 files changed, 6 deletions(-) diff --git a/docs/en/13-operation/01-pkg-install.md b/docs/en/13-operation/01-pkg-install.md index caaa920945..b6cc0582bc 100644 --- a/docs/en/13-operation/01-pkg-install.md +++ b/docs/en/13-operation/01-pkg-install.md @@ -24,9 +24,6 @@ $ sudo apt-get remove tdengine Reading package lists... Done Building dependency tree Reading state information... Done -The following packages were automatically installed and are no longer required: - libevent-core-2.1-7 libevent-pthreads-2.1-7 libopts25 sntp -Use 'apt autoremove' to remove them. The following packages will be REMOVED: tdengine 0 upgraded, 0 newly installed, 1 to remove and 18 not upgraded. diff --git a/docs/zh/17-operation/01-pkg-install.md b/docs/zh/17-operation/01-pkg-install.md index 838ec538b0..671dc00cee 100644 --- a/docs/zh/17-operation/01-pkg-install.md +++ b/docs/zh/17-operation/01-pkg-install.md @@ -54,9 +54,6 @@ $ sudo apt-get remove tdengine Reading package lists... Done Building dependency tree Reading state information... Done -The following packages were automatically installed and are no longer required: - libevent-core-2.1-7 libevent-pthreads-2.1-7 libopts25 sntp -Use 'apt autoremove' to remove them. The following packages will be REMOVED: tdengine 0 upgraded, 0 newly installed, 1 to remove and 18 not upgraded. From c2b16acc3130e894fef26796185064664fb40942 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Thu, 25 Aug 2022 13:44:26 +0800 Subject: [PATCH 68/79] fix: fix code merge issue --- include/common/tmsg.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/common/tmsg.h b/include/common/tmsg.h index b091668d67..ab90bd200a 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -457,6 +457,7 @@ typedef struct { uint64_t suid; uint64_t tuid; int32_t vgId; + int8_t sysInfo; SSchema* pSchemas; } STableMetaRsp; From e11f43ad610afb26f2c5f19f09f2f5de8fa1a657 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 25 Aug 2022 13:51:49 +0800 Subject: [PATCH 69/79] docs: update k8s docs for3.0 (#16401) * docs: update helm doc for 3.0 * docs: update helm parameters list to reflect 3.0 reality --- docs/en/10-deployment/05-helm.md | 131 ++----------------------------- docs/zh/10-deployment/05-helm.md | 130 ++---------------------------- 2 files changed, 11 insertions(+), 250 deletions(-) diff --git a/docs/en/10-deployment/05-helm.md b/docs/en/10-deployment/05-helm.md index 48cd9df32c..302730f1b5 100644 --- a/docs/en/10-deployment/05-helm.md +++ b/docs/en/10-deployment/05-helm.md @@ -170,71 +170,21 @@ taoscfg: # number of replications, for cluster only TAOS_REPLICA: "1" - - # number of days per DB file - # TAOS_DAYS: "10" - - # number of days to keep DB file, default is 10 years. - #TAOS_KEEP: "3650" - - # cache block size (Mbyte) - #TAOS_CACHE: "16" - - # number of cache blocks per vnode - #TAOS_BLOCKS: "6" - - # minimum rows of records in file block - #TAOS_MIN_ROWS: "100" - - # maximum rows of records in file block - #TAOS_MAX_ROWS: "4096" - # - # TAOS_NUM_OF_THREADS_PER_CORE: number of threads per CPU core - #TAOS_NUM_OF_THREADS_PER_CORE: "1.0" + # TAOS_NUM_OF_RPC_THREADS: number of threads for RPC + #TAOS_NUM_OF_RPC_THREADS: "2" + # # TAOS_NUM_OF_COMMIT_THREADS: number of threads to commit cache data #TAOS_NUM_OF_COMMIT_THREADS: "4" - # - # TAOS_RATIO_OF_QUERY_CORES: - # the proportion of total CPU cores available for query processing - # 2.0: the query threads will be set to double of the CPU cores. - # 1.0: all CPU cores are available for query processing [default]. - # 0.5: only half of the CPU cores are available for query. - # 0.0: only one core available. - #TAOS_RATIO_OF_QUERY_CORES: "1.0" - - # - # TAOS_KEEP_COLUMN_NAME: - # the last_row/first/last aggregator will not change the original column name in the result fields - #TAOS_KEEP_COLUMN_NAME: "0" - - # enable/disable backuping vnode directory when removing vnode - #TAOS_VNODE_BAK: "1" - # enable/disable installation / usage report #TAOS_TELEMETRY_REPORTING: "1" - # enable/disable load balancing - #TAOS_BALANCE: "1" - - # max timer control blocks - #TAOS_MAX_TMR_CTRL: "512" - # time interval of system monitor, seconds #TAOS_MONITOR_INTERVAL: "30" - # number of seconds allowed for a dnode to be offline, for cluster only - #TAOS_OFFLINE_THRESHOLD: "8640000" - - # RPC re-try timer, millisecond - #TAOS_RPC_TIMER: "1000" - - # RPC maximum time for ack, seconds. - #TAOS_RPC_MAX_TIME: "600" - # time interval of dnode status reporting to mnode, seconds, for cluster only #TAOS_STATUS_INTERVAL: "1" @@ -245,37 +195,7 @@ taoscfg: #TAOS_MIN_SLIDING_TIME: "10" # minimum time window, milli-second - #TAOS_MIN_INTERVAL_TIME: "10" - - # maximum delay before launching a stream computation, milli-second - #TAOS_MAX_STREAM_COMP_DELAY: "20000" - - # maximum delay before launching a stream computation for the first time, milli-second - #TAOS_MAX_FIRST_STREAM_COMP_DELAY: "10000" - - # retry delay when a stream computation fails, milli-second - #TAOS_RETRY_STREAM_COMP_DELAY: "10" - - # the delayed time for launching a stream computation, from 0.1(default, 10% of whole computing time window) to 0.9 - #TAOS_STREAM_COMP_DELAY_RATIO: "0.1" - - # max number of vgroups per db, 0 means configured automatically - #TAOS_MAX_VGROUPS_PER_DB: "0" - - # max number of tables per vnode - #TAOS_MAX_TABLES_PER_VNODE: "1000000" - - # the number of acknowledgments required for successful data writing - #TAOS_QUORUM: "1" - - # enable/disable compression - #TAOS_COMP: "2" - - # write ahead log (WAL) level, 0: no wal; 1: write wal, but no fysnc; 2: write wal, and call fsync - #TAOS_WAL_LEVEL: "1" - - # if walLevel is set to 2, the cycle of fsync being executed, if set to 0, fsync is called right away - #TAOS_FSYNC: "3000" + #TAOS_MIN_INTERVAL_TIME: "1" # the compressed rpc message, option: # -1 (no compression) @@ -283,17 +203,8 @@ taoscfg: # > 0 (rpc message body which larger than this value will be compressed) #TAOS_COMPRESS_MSG_SIZE: "-1" - # max length of an SQL - #TAOS_MAX_SQL_LENGTH: "1048576" - - # the maximum number of records allowed for super table time sorting - #TAOS_MAX_NUM_OF_ORDERED_RES: "100000" - # max number of connections allowed in dnode - #TAOS_MAX_SHELL_CONNS: "5000" - - # max number of connections allowed in client - #TAOS_MAX_CONNECTIONS: "5000" + #TAOS_MAX_SHELL_CONNS: "50000" # stop writing logs when the disk size of the log folder is less than this value #TAOS_MINIMAL_LOG_DIR_G_B: "0.1" @@ -313,21 +224,8 @@ taoscfg: # enable/disable system monitor #TAOS_MONITOR: "1" - # enable/disable recording the SQL statements via restful interface - #TAOS_HTTP_ENABLE_RECORD_SQL: "0" - - # number of threads used to process http requests - #TAOS_HTTP_MAX_THREADS: "2" - - # maximum number of rows returned by the restful interface - #TAOS_RESTFUL_ROW_LIMIT: "10240" - - # The following parameter is used to limit the maximum number of lines in log files. - # max number of lines per log filters - # numOfLogLines 10000000 - # enable/disable async log - #TAOS_ASYNC_LOG: "0" + #TAOS_ASYNC_LOG: "1" # # time of keeping log files, days @@ -344,25 +242,8 @@ taoscfg: # debug flag for all log type, take effect when non-zero value\ #TAOS_DEBUG_FLAG: "143" - # enable/disable recording the SQL in taos client - #TAOS_ENABLE_RECORD_SQL: "0" - # generate core file when service crash #TAOS_ENABLE_CORE_FILE: "1" - - # maximum display width of binary and nchar fields in the shell. The parts exceeding this limit will be hidden - #TAOS_MAX_BINARY_DISPLAY_WIDTH: "30" - - # enable/disable stream (continuous query) - #TAOS_STREAM: "1" - - # in retrieve blocking model, only in 50% query threads will be used in query processing in dnode - #TAOS_RETRIEVE_BLOCKING_MODEL: "0" - - # the maximum allowed query buffer size in MB during query processing for each data node - # -1 no limit (default) - # 0 no query allowed, queries are disabled - #TAOS_QUERY_BUFFER_SIZE: "-1" ``` ## Scaling Out diff --git a/docs/zh/10-deployment/05-helm.md b/docs/zh/10-deployment/05-helm.md index 9a723ff62f..34f3a7c5d9 100644 --- a/docs/zh/10-deployment/05-helm.md +++ b/docs/zh/10-deployment/05-helm.md @@ -171,70 +171,19 @@ taoscfg: TAOS_REPLICA: "1" - # number of days per DB file - # TAOS_DAYS: "10" - - # number of days to keep DB file, default is 10 years. - #TAOS_KEEP: "3650" - - # cache block size (Mbyte) - #TAOS_CACHE: "16" - - # number of cache blocks per vnode - #TAOS_BLOCKS: "6" - - # minimum rows of records in file block - #TAOS_MIN_ROWS: "100" - - # maximum rows of records in file block - #TAOS_MAX_ROWS: "4096" - - # - # TAOS_NUM_OF_THREADS_PER_CORE: number of threads per CPU core - #TAOS_NUM_OF_THREADS_PER_CORE: "1.0" + # TAOS_NUM_OF_RPC_THREADS: number of threads for RPC + #TAOS_NUM_OF_RPC_THREADS: "2" # # TAOS_NUM_OF_COMMIT_THREADS: number of threads to commit cache data #TAOS_NUM_OF_COMMIT_THREADS: "4" - # - # TAOS_RATIO_OF_QUERY_CORES: - # the proportion of total CPU cores available for query processing - # 2.0: the query threads will be set to double of the CPU cores. - # 1.0: all CPU cores are available for query processing [default]. - # 0.5: only half of the CPU cores are available for query. - # 0.0: only one core available. - #TAOS_RATIO_OF_QUERY_CORES: "1.0" - - # - # TAOS_KEEP_COLUMN_NAME: - # the last_row/first/last aggregator will not change the original column name in the result fields - #TAOS_KEEP_COLUMN_NAME: "0" - - # enable/disable backuping vnode directory when removing vnode - #TAOS_VNODE_BAK: "1" - # enable/disable installation / usage report #TAOS_TELEMETRY_REPORTING: "1" - # enable/disable load balancing - #TAOS_BALANCE: "1" - - # max timer control blocks - #TAOS_MAX_TMR_CTRL: "512" - # time interval of system monitor, seconds #TAOS_MONITOR_INTERVAL: "30" - # number of seconds allowed for a dnode to be offline, for cluster only - #TAOS_OFFLINE_THRESHOLD: "8640000" - - # RPC re-try timer, millisecond - #TAOS_RPC_TIMER: "1000" - - # RPC maximum time for ack, seconds. - #TAOS_RPC_MAX_TIME: "600" - # time interval of dnode status reporting to mnode, seconds, for cluster only #TAOS_STATUS_INTERVAL: "1" @@ -245,37 +194,7 @@ taoscfg: #TAOS_MIN_SLIDING_TIME: "10" # minimum time window, milli-second - #TAOS_MIN_INTERVAL_TIME: "10" - - # maximum delay before launching a stream computation, milli-second - #TAOS_MAX_STREAM_COMP_DELAY: "20000" - - # maximum delay before launching a stream computation for the first time, milli-second - #TAOS_MAX_FIRST_STREAM_COMP_DELAY: "10000" - - # retry delay when a stream computation fails, milli-second - #TAOS_RETRY_STREAM_COMP_DELAY: "10" - - # the delayed time for launching a stream computation, from 0.1(default, 10% of whole computing time window) to 0.9 - #TAOS_STREAM_COMP_DELAY_RATIO: "0.1" - - # max number of vgroups per db, 0 means configured automatically - #TAOS_MAX_VGROUPS_PER_DB: "0" - - # max number of tables per vnode - #TAOS_MAX_TABLES_PER_VNODE: "1000000" - - # the number of acknowledgments required for successful data writing - #TAOS_QUORUM: "1" - - # enable/disable compression - #TAOS_COMP: "2" - - # write ahead log (WAL) level, 0: no wal; 1: write wal, but no fysnc; 2: write wal, and call fsync - #TAOS_WAL_LEVEL: "1" - - # if walLevel is set to 2, the cycle of fsync being executed, if set to 0, fsync is called right away - #TAOS_FSYNC: "3000" + #TAOS_MIN_INTERVAL_TIME: "1" # the compressed rpc message, option: # -1 (no compression) @@ -283,17 +202,8 @@ taoscfg: # > 0 (rpc message body which larger than this value will be compressed) #TAOS_COMPRESS_MSG_SIZE: "-1" - # max length of an SQL - #TAOS_MAX_SQL_LENGTH: "1048576" - - # the maximum number of records allowed for super table time sorting - #TAOS_MAX_NUM_OF_ORDERED_RES: "100000" - # max number of connections allowed in dnode - #TAOS_MAX_SHELL_CONNS: "5000" - - # max number of connections allowed in client - #TAOS_MAX_CONNECTIONS: "5000" + #TAOS_MAX_SHELL_CONNS: "50000" # stop writing logs when the disk size of the log folder is less than this value #TAOS_MINIMAL_LOG_DIR_G_B: "0.1" @@ -313,21 +223,8 @@ taoscfg: # enable/disable system monitor #TAOS_MONITOR: "1" - # enable/disable recording the SQL statements via restful interface - #TAOS_HTTP_ENABLE_RECORD_SQL: "0" - - # number of threads used to process http requests - #TAOS_HTTP_MAX_THREADS: "2" - - # maximum number of rows returned by the restful interface - #TAOS_RESTFUL_ROW_LIMIT: "10240" - - # The following parameter is used to limit the maximum number of lines in log files. - # max number of lines per log filters - # numOfLogLines 10000000 - # enable/disable async log - #TAOS_ASYNC_LOG: "0" + #TAOS_ASYNC_LOG: "1" # # time of keeping log files, days @@ -344,25 +241,8 @@ taoscfg: # debug flag for all log type, take effect when non-zero value\ #TAOS_DEBUG_FLAG: "143" - # enable/disable recording the SQL in taos client - #TAOS_ENABLE_RECORD_SQL: "0" - # generate core file when service crash #TAOS_ENABLE_CORE_FILE: "1" - - # maximum display width of binary and nchar fields in the shell. The parts exceeding this limit will be hidden - #TAOS_MAX_BINARY_DISPLAY_WIDTH: "30" - - # enable/disable stream (continuous query) - #TAOS_STREAM: "1" - - # in retrieve blocking model, only in 50% query threads will be used in query processing in dnode - #TAOS_RETRIEVE_BLOCKING_MODEL: "0" - - # the maximum allowed query buffer size in MB during query processing for each data node - # -1 no limit (default) - # 0 no query allowed, queries are disabled - #TAOS_QUERY_BUFFER_SIZE: "-1" ``` ## 扩容 From a5d76be1387ea1958d754e206e5c62edcddad986 Mon Sep 17 00:00:00 2001 From: Pan YANG Date: Thu, 25 Aug 2022 14:06:14 +0800 Subject: [PATCH 70/79] docs: correct epoch definition --- docs/zh/12-taos-sql/01-data-type.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/12-taos-sql/01-data-type.md b/docs/zh/12-taos-sql/01-data-type.md index 628086f5a9..aa0f7ce97d 100644 --- a/docs/zh/12-taos-sql/01-data-type.md +++ b/docs/zh/12-taos-sql/01-data-type.md @@ -11,7 +11,7 @@ description: "TDengine 支持的数据类型: 时间戳、浮点型、JSON 类 - 时间格式为 `YYYY-MM-DD HH:mm:ss.MS`,默认时间分辨率为毫秒。比如:`2017-08-12 18:25:58.128` - 内部函数 now 是客户端的当前时间 - 插入记录时,如果时间戳为 now,插入数据时使用提交这条记录的客户端的当前时间 -- Epoch Time:时间戳也可以是一个长整数,表示从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始的毫秒数(相应地,如果所在 Database 的时间精度设置为“微秒”,则长整型格式的时间戳含义也就对应于从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始的微秒数;纳秒精度逻辑类似。) +- Epoch Time:时间戳也可以是一个长整数,表示从 UTC 时间 1970-01-01 00:00:00 开始的毫秒数。相应的,如果所在 Database 的时间精度设置为“微秒”,则长整型格式的时间戳含义也就对应于从 UTC 时间 1970-01-01 00:00:00 开始的微秒数;纳秒精度逻辑类似。 - 时间可以加减,比如 now-2h,表明查询时刻向前推 2 个小时(最近 2 小时)。数字后面的时间单位可以是 b(纳秒)、u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。 比如 `select * from t1 where ts > now-2w and ts <= now-1w`,表示查询两周前整整一周的数据。在指定降采样操作(down sampling)的时间窗口(interval)时,时间单位还可以使用 n (自然月) 和 y (自然年)。 TDengine 缺省的时间戳精度是毫秒,但通过在 `CREATE DATABASE` 时传递的 PRECISION 参数也可以支持微秒和纳秒。 From 619f11f02c8b4711a1ae2ecce1154b601d89879e Mon Sep 17 00:00:00 2001 From: gccgdb1234 Date: Thu, 25 Aug 2022 14:10:42 +0800 Subject: [PATCH 71/79] doc: add meters example in concept page --- docs/en/04-concept/index.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/en/04-concept/index.md b/docs/en/04-concept/index.md index efb9f1079b..7b0665a5b8 100644 --- a/docs/en/04-concept/index.md +++ b/docs/en/04-concept/index.md @@ -104,15 +104,15 @@ Each row contains the device ID, time stamp, collected metrics (current, voltage ## Metric -Metric refers to the physical quantity collected by sensors, equipment or other types of data collection devices, such as current, voltage, temperature, pressure, GPS position, etc., which change with time, and the data type can be integer, float, Boolean, or strings. As time goes by, the amount of collected metric data stored increases. +Metric refers to the physical quantity collected by sensors, equipment or other types of data collection devices, such as current, voltage, temperature, pressure, GPS position, etc., which change with time, and the data type can be integer, float, Boolean, or strings. As time goes by, the amount of collected metric data stored increases. In the meters example, current, voltage and phase are the metrics. ## Label/Tag -Label/Tag refers to the static properties of sensors, equipment or other types of data collection devices, which do not change with time, such as device model, color, fixed location of the device, etc. The data type can be any type. Although static, TDengine allows users to add, delete or update tag values at any time. Unlike the collected metric data, the amount of tag data stored does not change over time. +Label/Tag refers to the static properties of sensors, equipment or other types of data collection devices, which do not change with time, such as device model, color, fixed location of the device, etc. The data type can be any type. Although static, TDengine allows users to add, delete or update tag values at any time. Unlike the collected metric data, the amount of tag data stored does not change over time. In the meters example, `location` and `groupid` are the tags. ## Data Collection Point -Data Collection Point (DCP) refers to hardware or software that collects metrics based on preset time periods or triggered by events. A data collection point can collect one or multiple metrics, but these metrics are collected at the same time and have the same time stamp. For some complex equipment, there are often multiple data collection points, and the sampling rate of each collection point may be different, and fully independent. For example, for a car, there could be a data collection point to collect GPS position metrics, a data collection point to collect engine status metrics, and a data collection point to collect the environment metrics inside the car. So in this example the car would have three data collection points. +Data Collection Point (DCP) refers to hardware or software that collects metrics based on preset time periods or triggered by events. A data collection point can collect one or multiple metrics, but these metrics are collected at the same time and have the same time stamp. For some complex equipment, there are often multiple data collection points, and the sampling rate of each collection point may be different, and fully independent. For example, for a car, there could be a data collection point to collect GPS position metrics, a data collection point to collect engine status metrics, and a data collection point to collect the environment metrics inside the car. So in this example the car would have three data collection points. In the meters example, d1001, d1002, d1003, and d1004 are the data collection points. ## Table @@ -137,7 +137,7 @@ The design of one table for one data collection point will require a huge number STable is a template for a type of data collection point. A STable contains a set of data collection points (tables) that have the same schema or data structure, but with different static attributes (tags). To describe a STable, in addition to defining the table structure of the metrics, it is also necessary to define the schema of its tags. The data type of tags can be int, float, string, and there can be multiple tags, which can be added, deleted, or modified afterward. If the whole system has N different types of data collection points, N STables need to be established. -In the design of TDengine, **a table is used to represent a specific data collection point, and STable is used to represent a set of data collection points of the same type**. +In the design of TDengine, **a table is used to represent a specific data collection point, and STable is used to represent a set of data collection points of the same type**. In the meters example, we can create a super table named `meters`. ## Subtable From 79410403a5475c6f2b5fff3a2c0376d44e74fabf Mon Sep 17 00:00:00 2001 From: gccgdb1234 Date: Thu, 25 Aug 2022 14:15:49 +0800 Subject: [PATCH 72/79] doc: fix one typo --- docs/zh/12-taos-sql/01-data-type.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/12-taos-sql/01-data-type.md b/docs/zh/12-taos-sql/01-data-type.md index aa0f7ce97d..b8ef050fb7 100644 --- a/docs/zh/12-taos-sql/01-data-type.md +++ b/docs/zh/12-taos-sql/01-data-type.md @@ -11,7 +11,7 @@ description: "TDengine 支持的数据类型: 时间戳、浮点型、JSON 类 - 时间格式为 `YYYY-MM-DD HH:mm:ss.MS`,默认时间分辨率为毫秒。比如:`2017-08-12 18:25:58.128` - 内部函数 now 是客户端的当前时间 - 插入记录时,如果时间戳为 now,插入数据时使用提交这条记录的客户端的当前时间 -- Epoch Time:时间戳也可以是一个长整数,表示从 UTC 时间 1970-01-01 00:00:00 开始的毫秒数。相应的,如果所在 Database 的时间精度设置为“微秒”,则长整型格式的时间戳含义也就对应于从 UTC 时间 1970-01-01 00:00:00 开始的微秒数;纳秒精度逻辑类似。 +- Epoch Time:时间戳也可以是一个长整数,表示从 UTC 时间 1970-01-01 00:00:00 开始的毫秒数。相应地,如果所在 Database 的时间精度设置为“微秒”,则长整型格式的时间戳含义也就对应于从 UTC 时间 1970-01-01 00:00:00 开始的微秒数;纳秒精度逻辑类似。 - 时间可以加减,比如 now-2h,表明查询时刻向前推 2 个小时(最近 2 小时)。数字后面的时间单位可以是 b(纳秒)、u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。 比如 `select * from t1 where ts > now-2w and ts <= now-1w`,表示查询两周前整整一周的数据。在指定降采样操作(down sampling)的时间窗口(interval)时,时间单位还可以使用 n (自然月) 和 y (自然年)。 TDengine 缺省的时间戳精度是毫秒,但通过在 `CREATE DATABASE` 时传递的 PRECISION 参数也可以支持微秒和纳秒。 From 48c3cffe27856ff10f3fd6c3c2226c3cfad4949b Mon Sep 17 00:00:00 2001 From: Pan YANG Date: Thu, 25 Aug 2022 14:17:28 +0800 Subject: [PATCH 73/79] docs: correct epoch definition in English --- docs/en/12-taos-sql/01-data-type.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/12-taos-sql/01-data-type.md b/docs/en/12-taos-sql/01-data-type.md index b830994ac9..876de50f35 100644 --- a/docs/en/12-taos-sql/01-data-type.md +++ b/docs/en/12-taos-sql/01-data-type.md @@ -11,7 +11,7 @@ When using TDengine to store and query data, the most important part of the data - The format must be `YYYY-MM-DD HH:mm:ss.MS`, the default time precision is millisecond (ms), for example `2017-08-12 18:25:58.128` - Internal function `now` can be used to get the current timestamp on the client side - The current timestamp of the client side is applied when `now` is used to insert data -- Epoch Time:timestamp can also be a long integer number, which means the number of seconds, milliseconds or nanoseconds, depending on the time precision, from 1970-01-01 00:00:00.000 (UTC/GMT) +- Epoch Time:timestamp can also be a long integer number, which means the number of seconds, milliseconds or nanoseconds, depending on the time precision, from UTC 1970-01-01 00:00:00. - Add/subtract operations can be carried out on timestamps. For example `now-2h` means 2 hours prior to the time at which query is executed. The units of time in operations can be b(nanosecond), u(microsecond), a(millisecond), s(second), m(minute), h(hour), d(day), or w(week). So `select * from t1 where ts > now-2w and ts <= now-1w` means the data between two weeks ago and one week ago. The time unit can also be n (calendar month) or y (calendar year) when specifying the time window for down sampling operations. Time precision in TDengine can be set by the `PRECISION` parameter when executing `CREATE DATABASE`. The default time precision is millisecond. In the statement below, the precision is set to nanonseconds. From a0ae0c78d901172c273d2c4ffcb9a30544ce7e4e Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Thu, 25 Aug 2022 15:16:14 +0800 Subject: [PATCH 74/79] enh: update table meta after auto creating table --- include/common/tmsg.h | 58 ++++++++++++++------------- source/client/src/clientImpl.c | 6 +++ source/common/src/tmsg.c | 29 ++++++++++++++ source/dnode/vnode/src/vnd/vnodeSvr.c | 12 +++--- 4 files changed, 72 insertions(+), 33 deletions(-) diff --git a/include/common/tmsg.h b/include/common/tmsg.h index ab90bd200a..47bd0d0b02 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -268,6 +268,35 @@ STSRow* tGetSubmitBlkNext(SSubmitBlkIter* pIter); // for debug int32_t tPrintFixedSchemaSubmitReq(SSubmitReq* pReq, STSchema* pSchema); +struct SSchema { + int8_t type; + int8_t flags; + col_id_t colId; + int32_t bytes; + char name[TSDB_COL_NAME_LEN]; +}; + + +typedef struct { + char tbName[TSDB_TABLE_NAME_LEN]; + char stbName[TSDB_TABLE_NAME_LEN]; + char dbFName[TSDB_DB_FNAME_LEN]; + int64_t dbId; + int32_t numOfTags; + int32_t numOfColumns; + int8_t precision; + int8_t tableType; + int32_t sversion; + int32_t tversion; + uint64_t suid; + uint64_t tuid; + int32_t vgId; + int8_t sysInfo; + SSchema* pSchemas; +} STableMetaRsp; + + + typedef struct { int32_t code; int8_t hashMeta; @@ -276,6 +305,7 @@ typedef struct { int32_t numOfRows; int32_t affectedRows; int64_t sver; + STableMetaRsp* pMeta; } SSubmitBlkRsp; typedef struct { @@ -290,6 +320,7 @@ typedef struct { int32_t tEncodeSSubmitRsp(SEncoder* pEncoder, const SSubmitRsp* pRsp); int32_t tDecodeSSubmitRsp(SDecoder* pDecoder, SSubmitRsp* pRsp); +void tFreeSSubmitBlkRsp(void* param); void tFreeSSubmitRsp(SSubmitRsp* pRsp); #define COL_SMA_ON ((int8_t)0x1) @@ -297,13 +328,6 @@ void tFreeSSubmitRsp(SSubmitRsp* pRsp); #define COL_SET_NULL ((int8_t)0x10) #define COL_SET_VAL ((int8_t)0x20) #define COL_IS_SYSINFO ((int8_t)0x40) -struct SSchema { - int8_t type; - int8_t flags; - col_id_t colId; - int32_t bytes; - char name[TSDB_COL_NAME_LEN]; -}; #define COL_IS_SET(FLG) (((FLG) & (COL_SET_VAL | COL_SET_NULL)) != 0) #define COL_CLR_SET(FLG) ((FLG) &= (~(COL_SET_VAL | COL_SET_NULL))) @@ -442,26 +466,6 @@ static FORCE_INLINE int32_t tDecodeSSchemaWrapperEx(SDecoder* pDecoder, SSchemaW STSchema* tdGetSTSChemaFromSSChema(SSchema* pSchema, int32_t nCols, int32_t sver); - -typedef struct { - char tbName[TSDB_TABLE_NAME_LEN]; - char stbName[TSDB_TABLE_NAME_LEN]; - char dbFName[TSDB_DB_FNAME_LEN]; - int64_t dbId; - int32_t numOfTags; - int32_t numOfColumns; - int8_t precision; - int8_t tableType; - int32_t sversion; - int32_t tversion; - uint64_t suid; - uint64_t tuid; - int32_t vgId; - int8_t sysInfo; - SSchema* pSchemas; -} STableMetaRsp; - - typedef struct { char name[TSDB_TABLE_FNAME_LEN]; int8_t igExists; diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 640cd8ed4d..0480c49ca1 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -723,6 +723,12 @@ int32_t handleSubmitExecRes(SRequestObj* pRequest, void* res, SCatalog* pCatalog for (int32_t i = 0; i < pRsp->nBlocks; ++i) { SSubmitBlkRsp* blk = pRsp->pBlocks + i; + if (blk->pMeta) { + handleCreateTbExecRes(blk->pMeta, pCatalog); + tFreeSTableMetaRsp(blk->pMeta); + taosMemoryFreeClear(blk->pMeta); + } + if (NULL == blk->tblFName || 0 == blk->tblFName[0]) { continue; } diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index e86eb6ff62..058f26d145 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -5324,6 +5324,10 @@ static int32_t tEncodeSSubmitBlkRsp(SEncoder *pEncoder, const SSubmitBlkRsp *pBl if (tEncodeI32v(pEncoder, pBlock->numOfRows) < 0) return -1; if (tEncodeI32v(pEncoder, pBlock->affectedRows) < 0) return -1; if (tEncodeI64v(pEncoder, pBlock->sver) < 0) return -1; + if (tEncodeI32(pEncoder, pBlock->pMeta ? 1 : 0) < 0) return -1; + if (pBlock->pMeta) { + if (tEncodeSTableMetaRsp(pEncoder, pBlock->pMeta) < 0) return -1; + } tEndEncode(pEncoder); return 0; @@ -5341,6 +5345,16 @@ static int32_t tDecodeSSubmitBlkRsp(SDecoder *pDecoder, SSubmitBlkRsp *pBlock) { if (tDecodeI32v(pDecoder, &pBlock->numOfRows) < 0) return -1; if (tDecodeI32v(pDecoder, &pBlock->affectedRows) < 0) return -1; if (tDecodeI64v(pDecoder, &pBlock->sver) < 0) return -1; + + int32_t meta = 0; + if (tDecodeI32(pDecoder, &meta) < 0) return -1; + if (meta) { + pBlock->pMeta = taosMemoryCalloc(1, sizeof(STableMetaRsp)); + if (NULL == pBlock->pMeta) return -1; + if (tDecodeSTableMetaRsp(pDecoder, pBlock->pMeta) < 0) return -1; + } else { + pBlock->pMeta = NULL; + } tEndDecode(pDecoder); return 0; @@ -5379,6 +5393,21 @@ int32_t tDecodeSSubmitRsp(SDecoder *pDecoder, SSubmitRsp *pRsp) { return 0; } +void tFreeSSubmitBlkRsp(void* param) { + if (NULL == param) { + return; + } + + SSubmitBlkRsp* pRsp = (SSubmitBlkRsp*)param; + + taosMemoryFree(pRsp->tblFName); + if (pRsp->pMeta) { + taosMemoryFree(pRsp->pMeta->pSchemas); + taosMemoryFree(pRsp->pMeta); + } +} + + void tFreeSSubmitRsp(SSubmitRsp *pRsp) { if (NULL == pRsp) return; diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index 51a1347383..85feecff1a 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -867,7 +867,7 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq goto _exit; } - if (metaCreateTable(pVnode->pMeta, version, &createTbReq, NULL) < 0) { + if (metaCreateTable(pVnode->pMeta, version, &createTbReq, &submitBlkRsp.pMeta) < 0) { if (terrno != TSDB_CODE_TDB_TABLE_ALREADY_EXIST) { submitBlkRsp.code = terrno; pRsp->code = terrno; @@ -875,6 +875,10 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq taosArrayDestroy(createTbReq.ctb.tagName); goto _exit; } + } else { + if (NULL != submitBlkRsp.pMeta) { + vnodeUpdateMetaRsp(pVnode, submitBlkRsp.pMeta); + } } taosArrayPush(newTbUids, &createTbReq.uid); @@ -918,11 +922,7 @@ _exit: tEncodeSSubmitRsp(&encoder, &submitRsp); tEncoderClear(&encoder); - for (int32_t i = 0; i < taosArrayGetSize(submitRsp.pArray); i++) { - taosMemoryFree(((SSubmitBlkRsp *)taosArrayGet(submitRsp.pArray, i))[0].tblFName); - } - - taosArrayDestroy(submitRsp.pArray); + taosArrayDestroyEx(submitRsp.pArray, tFreeSSubmitBlkRsp); // TODO: the partial success scenario and the error case // => If partial success, extract the success submitted rows and reconstruct a new submit msg, and push to level From c2eb45fad7da9ec4fa74630cd3f8e0e64084bafc Mon Sep 17 00:00:00 2001 From: Jeff Tao Date: Thu, 25 Aug 2022 15:16:51 +0800 Subject: [PATCH 75/79] Update index.md --- docs/en/04-concept/index.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/en/04-concept/index.md b/docs/en/04-concept/index.md index 7b0665a5b8..5a9c55fdd6 100644 --- a/docs/en/04-concept/index.md +++ b/docs/en/04-concept/index.md @@ -104,7 +104,7 @@ Each row contains the device ID, time stamp, collected metrics (current, voltage ## Metric -Metric refers to the physical quantity collected by sensors, equipment or other types of data collection devices, such as current, voltage, temperature, pressure, GPS position, etc., which change with time, and the data type can be integer, float, Boolean, or strings. As time goes by, the amount of collected metric data stored increases. In the meters example, current, voltage and phase are the metrics. +Metric refers to the physical quantity collected by sensors, equipment or other types of data collection devices, such as current, voltage, temperature, pressure, GPS position, etc., which change with time, and the data type can be integer, float, Boolean, or strings. As time goes by, the amount of collected metric data stored increases. In the smart meters example, current, voltage and phase are the metrics. ## Label/Tag @@ -112,7 +112,7 @@ Label/Tag refers to the static properties of sensors, equipment or other types o ## Data Collection Point -Data Collection Point (DCP) refers to hardware or software that collects metrics based on preset time periods or triggered by events. A data collection point can collect one or multiple metrics, but these metrics are collected at the same time and have the same time stamp. For some complex equipment, there are often multiple data collection points, and the sampling rate of each collection point may be different, and fully independent. For example, for a car, there could be a data collection point to collect GPS position metrics, a data collection point to collect engine status metrics, and a data collection point to collect the environment metrics inside the car. So in this example the car would have three data collection points. In the meters example, d1001, d1002, d1003, and d1004 are the data collection points. +Data Collection Point (DCP) refers to hardware or software that collects metrics based on preset time periods or triggered by events. A data collection point can collect one or multiple metrics, but these metrics are collected at the same time and have the same time stamp. For some complex equipment, there are often multiple data collection points, and the sampling rate of each collection point may be different, and fully independent. For example, for a car, there could be a data collection point to collect GPS position metrics, a data collection point to collect engine status metrics, and a data collection point to collect the environment metrics inside the car. So in this example the car would have three data collection points. In the smart meters example, d1001, d1002, d1003, and d1004 are the data collection points. ## Table @@ -137,7 +137,7 @@ The design of one table for one data collection point will require a huge number STable is a template for a type of data collection point. A STable contains a set of data collection points (tables) that have the same schema or data structure, but with different static attributes (tags). To describe a STable, in addition to defining the table structure of the metrics, it is also necessary to define the schema of its tags. The data type of tags can be int, float, string, and there can be multiple tags, which can be added, deleted, or modified afterward. If the whole system has N different types of data collection points, N STables need to be established. -In the design of TDengine, **a table is used to represent a specific data collection point, and STable is used to represent a set of data collection points of the same type**. In the meters example, we can create a super table named `meters`. +In the design of TDengine, **a table is used to represent a specific data collection point, and STable is used to represent a set of data collection points of the same type**. In the smart meters example, we can create a super table named `meters`. ## Subtable @@ -156,9 +156,9 @@ The relationship between a STable and the subtables created based on this STable Queries can be executed on both a table (subtable) and a STable. For a query on a STable, TDengine will treat the data in all its subtables as a whole data set for processing. TDengine will first find the subtables that meet the tag filter conditions, then scan the time-series data of these subtables to perform aggregation operation, which reduces the number of data sets to be scanned which in turn greatly improves the performance of data aggregation across multiple DCPs. In essence, querying a supertable is a very efficient aggregate query on multiple DCPs of the same type. -In TDengine, it is recommended to use a subtable instead of a regular table for a DCP. In the meters example, we can create subtables like d1001, d1002, d1003, and d1004 under super table meters. +In TDengine, it is recommended to use a subtable instead of a regular table for a DCP. In the smart meters example, we can create subtables like d1001, d1002, d1003, and d1004 under super table meters. -To better understand the data model using super table and subtable, please refer to the diagram below which demonstrates the data model of meters example. ![Meters Data Model Diagram](./supertable.webp) +To better understand the data model using metri, tags, super table and subtable, please refer to the diagram below which demonstrates the data model of the smart meters example. ![Meters Data Model Diagram](./supertable.webp) ## Database From 91659c8c509df1783bae26f28ce54d3fc3b38485 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Thu, 25 Aug 2022 15:17:04 +0800 Subject: [PATCH 76/79] refactor: make uptimeInterval can be configured --- source/common/src/tglobal.c | 2 + source/dnode/mnode/impl/src/mndMain.c | 2 +- tests/script/tmp/monitor.sim | 3 +- tests/script/tsim/user/privilege_sysinfo.sim | 130 ++++++++++++++++++- 4 files changed, 130 insertions(+), 7 deletions(-) diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index bb2729c776..ee9d751555 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -420,6 +420,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "mqRebalanceInterval", tsMqRebalanceInterval, 1, 10000, 1) != 0) return -1; if (cfgAddInt32(pCfg, "ttlUnit", tsTtlUnit, 1, 86400 * 365, 1) != 0) return -1; if (cfgAddInt32(pCfg, "ttlPushInterval", tsTtlPushInterval, 1, 100000, 1) != 0) return -1; + if (cfgAddInt32(pCfg, "uptimeInterval", tsUptimeInterval, 1, 100000, 1) != 0) return -1; if (cfgAddBool(pCfg, "udf", tsStartUdfd, 0) != 0) return -1; GRANT_CFG_ADD; @@ -567,6 +568,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tsMqRebalanceInterval = cfgGetItem(pCfg, "mqRebalanceInterval")->i32; tsTtlUnit = cfgGetItem(pCfg, "ttlUnit")->i32; tsTtlPushInterval = cfgGetItem(pCfg, "ttlPushInterval")->i32; + tsUptimeInterval = cfgGetItem(pCfg, "uptimeInterval")->i32; tsStartUdfd = cfgGetItem(pCfg, "udf")->bval; diff --git a/source/dnode/mnode/impl/src/mndMain.c b/source/dnode/mnode/impl/src/mndMain.c index 65a539bc90..2221718023 100644 --- a/source/dnode/mnode/impl/src/mndMain.c +++ b/source/dnode/mnode/impl/src/mndMain.c @@ -132,7 +132,7 @@ static void *mndThreadFp(void *param) { mndCalMqRebalance(pMnode); } - if (lastTime % (tsTelemInterval * 10) == 1) { + if (lastTime % (tsTelemInterval * 10) == ((tsTelemInterval - 1) * 10)) { mndPullupTelem(pMnode); } diff --git a/tests/script/tmp/monitor.sim b/tests/script/tmp/monitor.sim index c0c1da567c..b410e1b6ad 100644 --- a/tests/script/tmp/monitor.sim +++ b/tests/script/tmp/monitor.sim @@ -4,6 +4,7 @@ system sh/cfg.sh -n dnode1 -c monitorfqdn -v localhost system sh/cfg.sh -n dnode1 -c monitorport -v 80 system sh/cfg.sh -n dnode1 -c monitorInterval -v 1 system sh/cfg.sh -n dnode1 -c monitorComp -v 1 +system sh/cfg.sh -n dnode1 -c uptimeInterval -v 3 #system sh/cfg.sh -n dnode1 -c supportVnodes -v 128 #system sh/cfg.sh -n dnode1 -c telemetryReporting -v 1 @@ -14,7 +15,7 @@ system sh/cfg.sh -n dnode1 -c monitorComp -v 1 system sh/exec.sh -n dnode1 -s start sql connect -print =============== select * from information_schema.ins_dnodes +print =============== create database sql create database db vgroups 2; sql use db; sql create table db.stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 binary(16)) comment "abd"; diff --git a/tests/script/tsim/user/privilege_sysinfo.sim b/tests/script/tsim/user/privilege_sysinfo.sim index 5a892cb2c4..3008599427 100644 --- a/tests/script/tsim/user/privilege_sysinfo.sim +++ b/tests/script/tsim/user/privilege_sysinfo.sim @@ -8,7 +8,20 @@ sql create user sysinfo0 pass 'taosdata' sql create user sysinfo1 pass 'taosdata' sql alter user sysinfo0 sysinfo 0 sql alter user sysinfo1 sysinfo 1 + sql create database db +sql use db +sql create table db.stb (ts timestamp, i int) tags (t int) +sql create table db.ctb using db.stb tags (1) +sql create table db.ntb (ts timestamp, i int) +sql insert into db.ctb values (now, 1); +sql insert into db.ntb values (now, 1); +sql select * from db.stb +sql select * from db.ctb +sql select * from db.ntb + +sql create database d2 +sql GRANT all ON d2.* to sysinfo0; print user sysinfo0 login sql close @@ -17,11 +30,31 @@ sql connect sysinfo0 print =============== check oper sql_error create user u1 pass 'u1' sql_error drop user sysinfo1 -sql_error alter user sysinfo1 pass '1' sql_error alter user sysinfo0 pass '1' +sql_error alter user sysinfo0 enable 0 +sql_error alter user sysinfo0 enable 1 +sql_error alter user sysinfo1 pass '1' +sql_error alter user sysinfo1 enable 1 +sql_error alter user sysinfo1 enable 1 +sql_error GRANT read ON db.* to sysinfo0; +sql_error GRANT read ON *.* to sysinfo0; +sql_error REVOKE read ON db.* from sysinfo0; +sql_error REVOKE read ON *.* from sysinfo0; +sql_error GRANT write ON db.* to sysinfo0; +sql_error GRANT write ON *.* to sysinfo0; +sql_error REVOKE write ON db.* from sysinfo0; +sql_error REVOKE write ON *.* from sysinfo0; +sql_error REVOKE write ON *.* from sysinfo0; sql_error create dnode $hostname port 7200 sql_error drop dnode 1 +sql_error alter dnode 1 'debugFlag 135' +sql_error alter dnode 1 'dDebugFlag 131' +sql_error alter dnode 1 'resetlog' +sql_error alter dnode 1 'monitor' '1' +sql_error alter dnode 1 'monitor' '0' +sql_error alter dnode 1 'monitor 1' +sql_error alter dnode 1 'monitor 0' sql_error create qnode on dnode 1 sql_error drop qnode on dnode 1 @@ -44,20 +77,107 @@ sql_error create database d1 sql_error drop database db sql_error use db sql_error alter database db replica 1; +sql_error alter database db keep 21 sql_error show db.vgroups -sql select * from information_schema.ins_stables where db_name = 'db' -sql select * from information_schema.ins_tables where db_name = 'db' + +sql_error create table db.stb1 (ts timestamp, i int) tags (t int) +sql_error create table db.ctb1 using db.stb1 tags (1) +sql_error create table db.ntb1 (ts timestamp, i int) +sql_error insert into db.ctb values (now, 1); +sql_error insert into db.ntb values (now, 1); +sql_error select * from db.stb +sql_error select * from db.ctb +sql_error select * from db.ntb + +sql use d2 +sql create table d2.stb2 (ts timestamp, i int) tags (t int) +sql create table d2.ctb2 using d2.stb2 tags (1) +sql create table d2.ntb2 (ts timestamp, i int) +sql insert into d2.ctb2 values (now, 1); +sql insert into d2.ntb2 values (now, 1); +sql select * from d2.stb2 +sql select * from d2.ctb2 +sql select * from d2.ntb2 print =============== check show -sql select * from information_schema.ins_users +sql_error show users sql_error show cluster sql_error select * from information_schema.ins_dnodes sql_error select * from information_schema.ins_mnodes sql_error show snodes sql_error select * from information_schema.ins_qnodes +sql_error show dnodes +sql_error show snodes +sql_error show qnodes +sql_error show mnodes sql_error show bnodes +sql_error show db.vgroups +sql_error show db.stables +sql_error show db.tables +sql_error show indexes from stb from db +sql show databases +sql_error show d2.vgroups +sql show d2.stables +sql show d2.tables +sql show indexes from stb2 from d2 +#sql_error show create database db +sql_error show create table db.stb; +sql_error show create table db.ctb; +sql_error show create table db.ntb; +sql show streams +sql show consumers +sql show topics +sql show subscriptions +sql show functions sql_error show grants +sql show queries +sql show connections +sql show apps +sql_error show transactions +#sql_error show create database d2 +sql show create table d2.stb2; +sql show create table d2.ctb2; +sql show create table d2.ntb2; +sql_error show variables; +sql show local variables; sql_error show dnode 1 variables; sql_error show variables; -system sh/exec.sh -n dnode1 -s stop -x SIGINT + +print =============== check information_schema +sql show databases +if $rows != 3 then + return -1 +endi + +sql use information_schema; +sql_error select * from information_schema.ins_dnodes +sql_error select * from information_schema.ins_mnodes +sql_error select * from information_schema.ins_modules +sql_error select * from information_schema.ins_qnodes +sql_error select * from information_schema.ins_cluster +sql select * from information_schema.ins_databases +sql select * from information_schema.ins_functions +sql select * from information_schema.ins_indexes +sql select * from information_schema.ins_stables +sql select * from information_schema.ins_tables +sql select * from information_schema.ins_tags +sql select * from information_schema.ins_users +sql_error select * from information_schema.ins_grants +sql_error select * from information_schema.ins_vgroups +sql_error select * from information_schema.ins_configs +sql_error select * from information_schema.ins_dnode_variables + +print =============== check performance_schema +sql use performance_schema; +sql select * from performance_schema.perf_connections +sql select * from performance_schema.perf_queries +sql select * from performance_schema.perf_topics +sql select * from performance_schema.perf_consumers +sql select * from performance_schema.perf_subscriptions +#sql_error select * from performance_schema.perf_trans +#sql_error select * from performance_schema.perf_smas +#sql_error select * from information_schema.perf_streams +#sql_error select * from information_schema.perf_apps + +#system sh/exec.sh -n dnode1 -s stop -x SIGINT From d558793ab6c0a1253253e7749df2c2473cde4aee Mon Sep 17 00:00:00 2001 From: gccgdb1234 Date: Thu, 25 Aug 2022 15:23:20 +0800 Subject: [PATCH 77/79] doc: add description field for files without description --- docs/zh/02-intro.md | 1 + docs/zh/04-concept/index.md | 2 ++ docs/zh/05-get-started/01-docker.md | 1 + docs/zh/05-get-started/03-package.md | 1 + docs/zh/07-develop/02-model/index.mdx | 2 ++ docs/zh/07-develop/03-insert-data/index.md | 2 ++ docs/zh/07-develop/04-query-data/index.mdx | 1 + docs/zh/07-develop/index.md | 2 ++ docs/zh/08-connector/_01-error-code.md | 1 + docs/zh/10-deployment/01-deploy.md | 1 + docs/zh/10-deployment/03-k8s.md | 1 + docs/zh/10-deployment/05-helm.md | 1 + docs/zh/10-deployment/index.md | 2 ++ docs/zh/12-taos-sql/03-table.md | 2 ++ docs/zh/12-taos-sql/04-stable.md | 1 + docs/zh/12-taos-sql/05-insert.md | 1 + docs/zh/12-taos-sql/06-select.md | 1 + docs/zh/12-taos-sql/10-function.md | 1 + docs/zh/12-taos-sql/12-distinguished.md | 1 + docs/zh/12-taos-sql/13-tmq.md | 1 + docs/zh/12-taos-sql/14-stream.md | 1 + docs/zh/12-taos-sql/16-operators.md | 1 + docs/zh/12-taos-sql/17-json.md | 1 + docs/zh/12-taos-sql/18-escape.md | 2 ++ docs/zh/12-taos-sql/19-limit.md | 1 + docs/zh/12-taos-sql/20-keywords.md | 1 + docs/zh/12-taos-sql/21-node.md | 1 + docs/zh/12-taos-sql/22-meta.md | 1 + docs/zh/12-taos-sql/23-perf.md | 1 + docs/zh/12-taos-sql/24-show.md | 1 + docs/zh/12-taos-sql/25-grant.md | 1 + docs/zh/12-taos-sql/26-udf.md | 1 + docs/zh/12-taos-sql/27-index.md | 1 + docs/zh/12-taos-sql/28-recovery.md | 1 + docs/zh/14-reference/07-tdinsight/index.mdx | 3 ++- docs/zh/14-reference/index.md | 1 + docs/zh/17-operation/02-planning.mdx | 1 + docs/zh/17-operation/03-tolerance.md | 2 ++ docs/zh/17-operation/07-import.md | 1 + docs/zh/17-operation/08-export.md | 1 + docs/zh/17-operation/10-monitor.md | 1 + docs/zh/17-operation/17-diagnose.md | 1 + docs/zh/20-third-party/01-grafana.mdx | 1 + docs/zh/20-third-party/02-prometheus.md | 1 + docs/zh/20-third-party/03-telegraf.md | 1 + docs/zh/20-third-party/05-collectd.md | 1 + docs/zh/20-third-party/06-statsd.md | 1 + docs/zh/20-third-party/07-icinga2.md | 1 + docs/zh/20-third-party/08-tcollector.md | 1 + docs/zh/20-third-party/09-emq-broker.md | 1 + docs/zh/20-third-party/10-hive-mq-broker.md | 1 + docs/zh/20-third-party/11-kafka.md | 3 ++- docs/zh/21-tdinternal/01-arch.md | 1 + docs/zh/21-tdinternal/03-high-availability.md | 1 + docs/zh/21-tdinternal/05-load-balance.md | 3 ++- docs/zh/21-tdinternal/index.md | 1 + docs/zh/25-application/01-telegraf.md | 3 ++- docs/zh/25-application/02-collectd.md | 3 ++- docs/zh/25-application/index.md | 1 + docs/zh/27-train-faq/01-faq.md | 1 + docs/zh/27-train-faq/index.md | 1 + docs/zh/28-releases/01-tdengine.md | 1 + docs/zh/28-releases/02-tools.md | 1 + 63 files changed, 76 insertions(+), 5 deletions(-) diff --git a/docs/zh/02-intro.md b/docs/zh/02-intro.md index f726b4ea92..f6779b8776 100644 --- a/docs/zh/02-intro.md +++ b/docs/zh/02-intro.md @@ -1,5 +1,6 @@ --- title: 产品简介 +description: 简要介绍 TDengine 的主要功能 toc_max_heading_level: 2 --- diff --git a/docs/zh/04-concept/index.md b/docs/zh/04-concept/index.md index 9493a62457..89d3df9c97 100644 --- a/docs/zh/04-concept/index.md +++ b/docs/zh/04-concept/index.md @@ -1,5 +1,7 @@ --- +sidebar_label: 基本概念 title: 数据模型和基本概念 +description: TDengine 的数据模型和基本概念 --- 为了便于解释基本概念,便于撰写示例程序,整个 TDengine 文档以智能电表作为典型时序数据场景。假设每个智能电表采集电流、电压、相位三个量,有多个智能电表,每个电表有位置 location 和分组 group ID 的静态属性. 其采集的数据类似如下的表格: diff --git a/docs/zh/05-get-started/01-docker.md b/docs/zh/05-get-started/01-docker.md index f0f09d4c7e..e2be419517 100644 --- a/docs/zh/05-get-started/01-docker.md +++ b/docs/zh/05-get-started/01-docker.md @@ -1,6 +1,7 @@ --- sidebar_label: Docker title: 通过 Docker 快速体验 TDengine +description: 使用 Docker 快速体验 TDengine 的高效写入和查询 --- 本节首先介绍如何通过 Docker 快速体验 TDengine,然后介绍如何在 Docker 环境下体验 TDengine 的写入和查询功能。如果你不熟悉 Docker,请使用[安装包的方式快速体验](../../get-started/package/)。如果您希望为 TDengine 贡献代码或对内部技术实现感兴趣,请参考 [TDengine GitHub 主页](https://github.com/taosdata/TDengine) 下载源码构建和安装. diff --git a/docs/zh/05-get-started/03-package.md b/docs/zh/05-get-started/03-package.md index a1c1802d77..3e0fb056a5 100644 --- a/docs/zh/05-get-started/03-package.md +++ b/docs/zh/05-get-started/03-package.md @@ -1,6 +1,7 @@ --- sidebar_label: 安装包 title: 使用安装包立即开始 +description: 使用安装包快速体验 TDengine --- import Tabs from "@theme/Tabs"; diff --git a/docs/zh/07-develop/02-model/index.mdx b/docs/zh/07-develop/02-model/index.mdx index 1609eb5362..634c8a98d4 100644 --- a/docs/zh/07-develop/02-model/index.mdx +++ b/docs/zh/07-develop/02-model/index.mdx @@ -1,5 +1,7 @@ --- +sidebar_label: 数据建模 title: TDengine 数据建模 +description: TDengine 中如何建立数据模型 --- TDengine 采用类关系型数据模型,需要建库、建表。因此对于一个具体的应用场景,需要考虑库、超级表和普通表的设计。本节不讨论细致的语法规则,只介绍概念。 diff --git a/docs/zh/07-develop/03-insert-data/index.md b/docs/zh/07-develop/03-insert-data/index.md index 55a28e4a8b..f1e5ada4df 100644 --- a/docs/zh/07-develop/03-insert-data/index.md +++ b/docs/zh/07-develop/03-insert-data/index.md @@ -1,5 +1,7 @@ --- +sidebar_label: 写入数据 title: 写入数据 +description: TDengine 的各种写入方式 --- TDengine 支持多种写入协议,包括 SQL,InfluxDB Line 协议, OpenTSDB Telnet 协议,OpenTSDB JSON 格式协议。数据可以单条插入,也可以批量插入,可以插入一个数据采集点的数据,也可以同时插入多个数据采集点的数据。同时,TDengine 支持多线程插入,支持时间乱序数据插入,也支持历史数据插入。InfluxDB Line 协议、OpenTSDB Telnet 协议和 OpenTSDB JSON 格式协议是 TDengine 支持的三种无模式写入协议。使用无模式方式写入无需提前创建超级表和子表,并且引擎能自适用数据对表结构做调整。 diff --git a/docs/zh/07-develop/04-query-data/index.mdx b/docs/zh/07-develop/04-query-data/index.mdx index 2631d147a5..c083c30c2c 100644 --- a/docs/zh/07-develop/04-query-data/index.mdx +++ b/docs/zh/07-develop/04-query-data/index.mdx @@ -1,4 +1,5 @@ --- +sidebar_label: 查询数据 title: 查询数据 description: "主要查询功能,通过连接器执行同步查询和异步查询" --- diff --git a/docs/zh/07-develop/index.md b/docs/zh/07-develop/index.md index 20c0170844..efaffaea71 100644 --- a/docs/zh/07-develop/index.md +++ b/docs/zh/07-develop/index.md @@ -1,5 +1,7 @@ --- title: 开发指南 +sidebar_label: 开发指南 +description: 让开发者能够快速上手的指南 --- 开发一个应用,如果你准备采用TDengine作为时序数据处理的工具,那么有如下几个事情要做: diff --git a/docs/zh/08-connector/_01-error-code.md b/docs/zh/08-connector/_01-error-code.md index 53e006e108..3111d4bbf8 100644 --- a/docs/zh/08-connector/_01-error-code.md +++ b/docs/zh/08-connector/_01-error-code.md @@ -1,6 +1,7 @@ --- sidebar_label: 错误码 title: TDengine C/C++ 连接器错误码 +description: C/C++ 连接器的错误码列表和详细说明 --- 本文中详细列举了在使用 TDengine C/C++ 连接器时客户端可能得到的错误码以及所要采取的相应动作。其它语言的连接器在使用原生连接方式时也会所得到的返回码返回给连接器的调用者。 diff --git a/docs/zh/10-deployment/01-deploy.md b/docs/zh/10-deployment/01-deploy.md index 22a9c2ff8e..8d8a2eb6d8 100644 --- a/docs/zh/10-deployment/01-deploy.md +++ b/docs/zh/10-deployment/01-deploy.md @@ -1,6 +1,7 @@ --- sidebar_label: 手动部署 title: 集群部署和管理 +description: 使用命令行工具手动部署 TDengine 集群 --- ## 准备工作 diff --git a/docs/zh/10-deployment/03-k8s.md b/docs/zh/10-deployment/03-k8s.md index 396b834324..5d512700b6 100644 --- a/docs/zh/10-deployment/03-k8s.md +++ b/docs/zh/10-deployment/03-k8s.md @@ -1,6 +1,7 @@ --- sidebar_label: Kubernetes title: 在 Kubernetes 上部署 TDengine 集群 +description: 利用 Kubernetes 部署 TDengine 集群的详细指南 --- 作为面向云原生架构设计的时序数据库,TDengine 支持 Kubernetes 部署。这里介绍如何使用 YAML 文件一步一步从头创建一个 TDengine 集群,并重点介绍 Kubernetes 环境下 TDengine 的常用操作。 diff --git a/docs/zh/10-deployment/05-helm.md b/docs/zh/10-deployment/05-helm.md index 34f3a7c5d9..9a3b21f092 100644 --- a/docs/zh/10-deployment/05-helm.md +++ b/docs/zh/10-deployment/05-helm.md @@ -1,6 +1,7 @@ --- sidebar_label: Helm title: 使用 Helm 部署 TDengine 集群 +description: 使用 Helm 部署 TDengine 集群的详细指南 --- Helm 是 Kubernetes 的包管理器,上一节使用 Kubernets 部署 TDengine 集群的操作已经足够简单,但 Helm 依然可以提供更强大的能力。 diff --git a/docs/zh/10-deployment/index.md b/docs/zh/10-deployment/index.md index 96ac7b176d..4ff1add779 100644 --- a/docs/zh/10-deployment/index.md +++ b/docs/zh/10-deployment/index.md @@ -1,5 +1,7 @@ --- +sidebar_label: 部署集群 title: 部署集群 +description: 部署 TDengine 集群的多种方式 --- TDengine 支持集群,提供水平扩展的能力。如果需要获得更高的处理能力,只需要多增加节点即可。TDengine 采用虚拟节点技术,将一个节点虚拟化为多个虚拟节点,以实现负载均衡。同时,TDengine可以将多个节点上的虚拟节点组成虚拟节点组,通过多副本机制,以保证供系统的高可用。TDengine的集群功能完全开源。 diff --git a/docs/zh/12-taos-sql/03-table.md b/docs/zh/12-taos-sql/03-table.md index 0e104bb7b6..a93b010c4c 100644 --- a/docs/zh/12-taos-sql/03-table.md +++ b/docs/zh/12-taos-sql/03-table.md @@ -1,5 +1,7 @@ --- title: 表管理 +sidebar_label: 表 +description: 对表的各种管理操作 --- ## 创建表 diff --git a/docs/zh/12-taos-sql/04-stable.md b/docs/zh/12-taos-sql/04-stable.md index 59d9657694..450ff07fd8 100644 --- a/docs/zh/12-taos-sql/04-stable.md +++ b/docs/zh/12-taos-sql/04-stable.md @@ -1,6 +1,7 @@ --- sidebar_label: 超级表管理 title: 超级表 STable 管理 +description: 对超级表的各种管理操作 --- ## 创建超级表 diff --git a/docs/zh/12-taos-sql/05-insert.md b/docs/zh/12-taos-sql/05-insert.md index c91e70c481..59af9c55ed 100644 --- a/docs/zh/12-taos-sql/05-insert.md +++ b/docs/zh/12-taos-sql/05-insert.md @@ -1,6 +1,7 @@ --- sidebar_label: 数据写入 title: 数据写入 +description: 写入数据的详细语法 --- ## 写入语法 diff --git a/docs/zh/12-taos-sql/06-select.md b/docs/zh/12-taos-sql/06-select.md index 5312d7d2f3..0c305231e0 100644 --- a/docs/zh/12-taos-sql/06-select.md +++ b/docs/zh/12-taos-sql/06-select.md @@ -1,6 +1,7 @@ --- sidebar_label: 数据查询 title: 数据查询 +description: 查询数据的详细语法 --- ## 查询语法 diff --git a/docs/zh/12-taos-sql/10-function.md b/docs/zh/12-taos-sql/10-function.md index e99d915101..af31a1d4bd 100644 --- a/docs/zh/12-taos-sql/10-function.md +++ b/docs/zh/12-taos-sql/10-function.md @@ -1,6 +1,7 @@ --- sidebar_label: 函数 title: 函数 +description: TDengine 支持的函数列表 toc_max_heading_level: 4 --- diff --git a/docs/zh/12-taos-sql/12-distinguished.md b/docs/zh/12-taos-sql/12-distinguished.md index 2dad49ece9..b9e06033d6 100644 --- a/docs/zh/12-taos-sql/12-distinguished.md +++ b/docs/zh/12-taos-sql/12-distinguished.md @@ -1,6 +1,7 @@ --- sidebar_label: 时序数据特色查询 title: 时序数据特色查询 +description: TDengine 提供的时序数据特有的查询功能 --- TDengine 是专为时序数据而研发的大数据平台,存储和计算都针对时序数据的特定进行了量身定制,在支持标准 SQL 的基础之上,还提供了一系列贴合时序业务场景的特色查询语法,极大的方便时序场景的应用开发。 diff --git a/docs/zh/12-taos-sql/13-tmq.md b/docs/zh/12-taos-sql/13-tmq.md index b05d2bf680..571300ad8c 100644 --- a/docs/zh/12-taos-sql/13-tmq.md +++ b/docs/zh/12-taos-sql/13-tmq.md @@ -1,6 +1,7 @@ --- sidebar_label: 数据订阅 title: 数据订阅 +description: TDengine 消息队列提供的数据订阅功能 --- TDengine 3.0.0.0 开始对消息队列做了大幅的优化和增强以简化用户的解决方案。 diff --git a/docs/zh/12-taos-sql/14-stream.md b/docs/zh/12-taos-sql/14-stream.md index 28f52be59a..70b062a6ca 100644 --- a/docs/zh/12-taos-sql/14-stream.md +++ b/docs/zh/12-taos-sql/14-stream.md @@ -1,6 +1,7 @@ --- sidebar_label: 流式计算 title: 流式计算 +description: 流式计算的相关 SQL 的详细语法 --- diff --git a/docs/zh/12-taos-sql/16-operators.md b/docs/zh/12-taos-sql/16-operators.md index 22b78455fb..48e9991799 100644 --- a/docs/zh/12-taos-sql/16-operators.md +++ b/docs/zh/12-taos-sql/16-operators.md @@ -1,6 +1,7 @@ --- sidebar_label: 运算符 title: 运算符 +description: TDengine 支持的所有运算符 --- ## 算术运算符 diff --git a/docs/zh/12-taos-sql/17-json.md b/docs/zh/12-taos-sql/17-json.md index 4a4a8cca73..4cbd8eef36 100644 --- a/docs/zh/12-taos-sql/17-json.md +++ b/docs/zh/12-taos-sql/17-json.md @@ -1,6 +1,7 @@ --- sidebar_label: JSON 类型使用说明 title: JSON 类型使用说明 +description: 对 JSON 类型如何使用的详细说明 --- diff --git a/docs/zh/12-taos-sql/18-escape.md b/docs/zh/12-taos-sql/18-escape.md index d478340599..7e543743a3 100644 --- a/docs/zh/12-taos-sql/18-escape.md +++ b/docs/zh/12-taos-sql/18-escape.md @@ -1,5 +1,7 @@ --- title: 转义字符说明 +sidebar_label: 转义字符 +description: TDengine 中使用转义字符的详细规则 --- ## 转义字符表 diff --git a/docs/zh/12-taos-sql/19-limit.md b/docs/zh/12-taos-sql/19-limit.md index ff552fc977..473bb29c1c 100644 --- a/docs/zh/12-taos-sql/19-limit.md +++ b/docs/zh/12-taos-sql/19-limit.md @@ -1,6 +1,7 @@ --- sidebar_label: 命名与边界限制 title: 命名与边界限制 +description: 合法字符集和命名中的限制规则 --- ## 名称命名规则 diff --git a/docs/zh/12-taos-sql/20-keywords.md b/docs/zh/12-taos-sql/20-keywords.md index cac29d7863..047c6b08c9 100644 --- a/docs/zh/12-taos-sql/20-keywords.md +++ b/docs/zh/12-taos-sql/20-keywords.md @@ -1,6 +1,7 @@ --- sidebar_label: 保留关键字 title: TDengine 保留关键字 +description: TDengine 保留关键字的详细列表 --- ## 保留关键字 diff --git a/docs/zh/12-taos-sql/21-node.md b/docs/zh/12-taos-sql/21-node.md index 4816daf420..d47dc8198f 100644 --- a/docs/zh/12-taos-sql/21-node.md +++ b/docs/zh/12-taos-sql/21-node.md @@ -1,6 +1,7 @@ --- sidebar_label: 集群管理 title: 集群管理 +description: 管理集群的 SQL 命令的详细解析 --- 组成 TDengine 集群的物理实体是 dnode (data node 的缩写),它是一个运行在操作系统之上的进程。在 dnode 中可以建立负责时序数据存储的 vnode (virtual node),在多节点集群环境下当某个数据库的 replica 为 3 时,该数据库中的每个 vgroup 由 3 个 vnode 组成;当数据库的 replica 为 1 时,该数据库中的每个 vgroup 由 1 个 vnode 组成。如果要想配置某个数据库为多副本,则集群中的 dnode 数量至少为 3。在 dnode 还可以创建 mnode (management node),单个集群中最多可以创建三个 mnode。在 TDengine 3.0.0.0 中为了支持存算分离,引入了一种新的逻辑节点 qnode (query node),qnode 和 vnode 既可以共存在一个 dnode 中,也可以完全分离在不同的 dnode 上。 diff --git a/docs/zh/12-taos-sql/22-meta.md b/docs/zh/12-taos-sql/22-meta.md index 8139b2fc55..e9cda45b0f 100644 --- a/docs/zh/12-taos-sql/22-meta.md +++ b/docs/zh/12-taos-sql/22-meta.md @@ -1,6 +1,7 @@ --- sidebar_label: 元数据 title: 存储元数据的 Information_Schema 数据库 +description: Information_Schema 数据库中存储了系统中所有的元数据信息 --- TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数据库元数据、数据库系统信息和状态的访问,例如数据库或表的名称,当前执行的 SQL 语句等。该数据库存储有关 TDengine 维护的所有其他数据库的信息。它包含多个只读表。实际上,这些表都是视图,而不是基表,因此没有与它们关联的文件。所以对这些表只能查询,不能进行 INSERT 等写入操作。`INFORMATION_SCHEMA` 数据库旨在以一种更一致的方式来提供对 TDengine 支持的各种 SHOW 语句(如 SHOW TABLES、SHOW DATABASES)所提供的信息的访问。与 SHOW 语句相比,使用 SELECT ... FROM INFORMATION_SCHEMA.tablename 具有以下优点: diff --git a/docs/zh/12-taos-sql/23-perf.md b/docs/zh/12-taos-sql/23-perf.md index ac852ee150..e6ff4960a7 100644 --- a/docs/zh/12-taos-sql/23-perf.md +++ b/docs/zh/12-taos-sql/23-perf.md @@ -1,6 +1,7 @@ --- sidebar_label: 统计数据 title: 存储统计数据的 Performance_Schema 数据库 +description: Performance_Schema 数据库中存储了系统中的各种统计信息 --- TDengine 3.0 版本开始提供一个内置数据库 `performance_schema`,其中存储了与性能有关的统计数据。本节详细介绍其中的表和表结构。 diff --git a/docs/zh/12-taos-sql/24-show.md b/docs/zh/12-taos-sql/24-show.md index 75efd5f514..14b51fb4c1 100644 --- a/docs/zh/12-taos-sql/24-show.md +++ b/docs/zh/12-taos-sql/24-show.md @@ -1,6 +1,7 @@ --- sidebar_label: SHOW 命令 title: 使用 SHOW 命令查看系统元数据 +description: SHOW 命令的完整列表 --- SHOW 命令可以用来获取简要的系统信息。若想获取系统中详细的各种元数据、系统信息和状态,请使用 select 语句查询 INFORMATION_SCHEMA 数据库中的表。 diff --git a/docs/zh/12-taos-sql/25-grant.md b/docs/zh/12-taos-sql/25-grant.md index c41a3fcfc9..6f7024d32e 100644 --- a/docs/zh/12-taos-sql/25-grant.md +++ b/docs/zh/12-taos-sql/25-grant.md @@ -1,6 +1,7 @@ --- sidebar_label: 权限管理 title: 权限管理 +description: 企业版中才具有的权限管理功能 --- 本节讲述如何在 TDengine 中进行权限管理的相关操作。 diff --git a/docs/zh/12-taos-sql/26-udf.md b/docs/zh/12-taos-sql/26-udf.md index 7ddcad298b..764fde6e1f 100644 --- a/docs/zh/12-taos-sql/26-udf.md +++ b/docs/zh/12-taos-sql/26-udf.md @@ -1,6 +1,7 @@ --- sidebar_label: 自定义函数 title: 用户自定义函数 +description: 使用 UDF 的详细指南 --- 除了 TDengine 的内置函数以外,用户还可以编写自己的函数逻辑并加入TDengine系统中。 diff --git a/docs/zh/12-taos-sql/27-index.md b/docs/zh/12-taos-sql/27-index.md index 2c0907723e..f88c6cf4ff 100644 --- a/docs/zh/12-taos-sql/27-index.md +++ b/docs/zh/12-taos-sql/27-index.md @@ -1,6 +1,7 @@ --- sidebar_label: 索引 title: 使用索引 +description: 索引功能的使用细节 --- TDengine 从 3.0.0.0 版本开始引入了索引功能,支持 SMA 索引和 FULLTEXT 索引。 diff --git a/docs/zh/12-taos-sql/28-recovery.md b/docs/zh/12-taos-sql/28-recovery.md index 72b220b8ff..582c373907 100644 --- a/docs/zh/12-taos-sql/28-recovery.md +++ b/docs/zh/12-taos-sql/28-recovery.md @@ -1,6 +1,7 @@ --- sidebar_label: 异常恢复 title: 异常恢复 +description: 如何终止出现问题的连接、查询和事务以使系统恢复正常 --- 在一个复杂的应用场景中,连接和查询任务等有可能进入一种错误状态或者耗时过长迟迟无法结束,此时需要有能够终止这些连接或任务的方法。 diff --git a/docs/zh/14-reference/07-tdinsight/index.mdx b/docs/zh/14-reference/07-tdinsight/index.mdx index 9548922e65..ecd6362143 100644 --- a/docs/zh/14-reference/07-tdinsight/index.mdx +++ b/docs/zh/14-reference/07-tdinsight/index.mdx @@ -1,6 +1,7 @@ --- -title: TDinsight - 基于Grafana的TDengine零依赖监控解决方案 +title: TDinsight sidebar_label: TDinsight +description: 基于Grafana的TDengine零依赖监控解决方案 --- TDinsight 是使用监控数据库和 [Grafana] 对 TDengine 进行监控的解决方案。 diff --git a/docs/zh/14-reference/index.md b/docs/zh/14-reference/index.md index e9c0c4fe23..9d0a44af57 100644 --- a/docs/zh/14-reference/index.md +++ b/docs/zh/14-reference/index.md @@ -1,5 +1,6 @@ --- title: 参考手册 +description: TDengine 中的各种组件的详细说明 --- 参考手册是对 TDengine 本身、 TDengine 各语言连接器及自带的工具最详细的介绍。 diff --git a/docs/zh/17-operation/02-planning.mdx b/docs/zh/17-operation/02-planning.mdx index 0d63c4eaf3..28e3f54020 100644 --- a/docs/zh/17-operation/02-planning.mdx +++ b/docs/zh/17-operation/02-planning.mdx @@ -1,6 +1,7 @@ --- sidebar_label: 容量规划 title: 容量规划 +description: 如何规划一个 TDengine 集群所需的物理资源 --- 使用 TDengine 来搭建一个物联网大数据平台,计算资源、存储资源需要根据业务场景进行规划。下面分别讨论系统运行所需要的内存、CPU 以及硬盘空间。 diff --git a/docs/zh/17-operation/03-tolerance.md b/docs/zh/17-operation/03-tolerance.md index 1ce485b042..79cf10c39a 100644 --- a/docs/zh/17-operation/03-tolerance.md +++ b/docs/zh/17-operation/03-tolerance.md @@ -1,5 +1,7 @@ --- title: 容错和灾备 +sidebar_label: 容错和灾备 +description: TDengine 的容错和灾备功能 --- ## 容错 diff --git a/docs/zh/17-operation/07-import.md b/docs/zh/17-operation/07-import.md index 7dee05720d..17945be595 100644 --- a/docs/zh/17-operation/07-import.md +++ b/docs/zh/17-operation/07-import.md @@ -1,5 +1,6 @@ --- title: 数据导入 +description: 如何导入外部数据到 TDengine --- TDengine 提供多种方便的数据导入功能,一种按脚本文件导入,一种按数据文件导入,一种是 taosdump 工具导入本身导出的文件。 diff --git a/docs/zh/17-operation/08-export.md b/docs/zh/17-operation/08-export.md index 042ecc7ba2..ecc3b2f110 100644 --- a/docs/zh/17-operation/08-export.md +++ b/docs/zh/17-operation/08-export.md @@ -1,5 +1,6 @@ --- title: 数据导出 +description: 如何导出 TDengine 中的数据 --- 为方便数据导出,TDengine 提供了两种导出方式,分别是按表导出和用 taosdump 导出。 diff --git a/docs/zh/17-operation/10-monitor.md b/docs/zh/17-operation/10-monitor.md index 9f0f06fde2..e936f35dca 100644 --- a/docs/zh/17-operation/10-monitor.md +++ b/docs/zh/17-operation/10-monitor.md @@ -1,5 +1,6 @@ --- title: 系统监控 +description: 监控 TDengine 的运行状态 --- TDengine 通过 [taosKeeper](/reference/taosKeeper/) 将服务器的 CPU、内存、硬盘空间、带宽、请求数、磁盘读写速度等信息定时写入指定数据库。TDengine 还将重要的系统操作(比如登录、创建、删除数据库等)日志以及各种错误报警信息进行记录。系统管理员可以从 CLI 直接查看这个数据库,也可以在 WEB 通过图形化界面查看这些监测信息。 diff --git a/docs/zh/17-operation/17-diagnose.md b/docs/zh/17-operation/17-diagnose.md index e6e9be7153..ec529096a7 100644 --- a/docs/zh/17-operation/17-diagnose.md +++ b/docs/zh/17-operation/17-diagnose.md @@ -1,5 +1,6 @@ --- title: 诊断及其他 +description: 一些常见问题的诊断技巧 --- ## 网络连接诊断 diff --git a/docs/zh/20-third-party/01-grafana.mdx b/docs/zh/20-third-party/01-grafana.mdx index becb1a70a9..83f3f8bb25 100644 --- a/docs/zh/20-third-party/01-grafana.mdx +++ b/docs/zh/20-third-party/01-grafana.mdx @@ -1,6 +1,7 @@ --- sidebar_label: Grafana title: Grafana +description: 使用 Grafana 与 TDengine 的详细说明 --- import Tabs from "@theme/Tabs"; diff --git a/docs/zh/20-third-party/02-prometheus.md b/docs/zh/20-third-party/02-prometheus.md index 0fe534b8df..eb6c3bf1d0 100644 --- a/docs/zh/20-third-party/02-prometheus.md +++ b/docs/zh/20-third-party/02-prometheus.md @@ -1,6 +1,7 @@ --- sidebar_label: Prometheus title: Prometheus +description: 使用 Prometheus 访问 TDengine --- import Prometheus from "../14-reference/_prometheus.mdx" diff --git a/docs/zh/20-third-party/03-telegraf.md b/docs/zh/20-third-party/03-telegraf.md index 88a69211c0..84883e665a 100644 --- a/docs/zh/20-third-party/03-telegraf.md +++ b/docs/zh/20-third-party/03-telegraf.md @@ -1,6 +1,7 @@ --- sidebar_label: Telegraf title: Telegraf 写入 +description: 使用 Telegraf 向 TDengine 写入数据 --- import Telegraf from "../14-reference/_telegraf.mdx" diff --git a/docs/zh/20-third-party/05-collectd.md b/docs/zh/20-third-party/05-collectd.md index 04892fd42e..cc2235f260 100644 --- a/docs/zh/20-third-party/05-collectd.md +++ b/docs/zh/20-third-party/05-collectd.md @@ -1,6 +1,7 @@ --- sidebar_label: collectd title: collectd 写入 +description: 使用 collected 向 TDengine 写入数据 --- import CollectD from "../14-reference/_collectd.mdx" diff --git a/docs/zh/20-third-party/06-statsd.md b/docs/zh/20-third-party/06-statsd.md index 260d011835..122c9fd94c 100644 --- a/docs/zh/20-third-party/06-statsd.md +++ b/docs/zh/20-third-party/06-statsd.md @@ -1,6 +1,7 @@ --- sidebar_label: StatsD title: StatsD 直接写入 +description: 使用 StatsD 向 TDengine 写入 --- import StatsD from "../14-reference/_statsd.mdx" diff --git a/docs/zh/20-third-party/07-icinga2.md b/docs/zh/20-third-party/07-icinga2.md index ed1f1404a7..06ead57655 100644 --- a/docs/zh/20-third-party/07-icinga2.md +++ b/docs/zh/20-third-party/07-icinga2.md @@ -1,6 +1,7 @@ --- sidebar_label: icinga2 title: icinga2 写入 +description: 使用 icinga2 写入 TDengine --- import Icinga2 from "../14-reference/_icinga2.mdx" diff --git a/docs/zh/20-third-party/08-tcollector.md b/docs/zh/20-third-party/08-tcollector.md index a1245e8c27..78d0b4a5df 100644 --- a/docs/zh/20-third-party/08-tcollector.md +++ b/docs/zh/20-third-party/08-tcollector.md @@ -1,6 +1,7 @@ --- sidebar_label: TCollector title: TCollector 写入 +description: 使用 TCollector 写入 TDengine --- import TCollector from "../14-reference/_tcollector.mdx" diff --git a/docs/zh/20-third-party/09-emq-broker.md b/docs/zh/20-third-party/09-emq-broker.md index f252e520a7..782a139e22 100644 --- a/docs/zh/20-third-party/09-emq-broker.md +++ b/docs/zh/20-third-party/09-emq-broker.md @@ -1,6 +1,7 @@ --- sidebar_label: EMQX Broker title: EMQX Broker 写入 +description: 使用 EMQX Broker 写入 TDengine --- MQTT 是流行的物联网数据传输协议,[EMQX](https://github.com/emqx/emqx)是一开源的 MQTT Broker 软件,无需任何代码,只需要在 EMQX Dashboard 里使用“规则”做简单配置,即可将 MQTT 的数据直接写入 TDengine。EMQX 支持通过 发送到 Web 服务的方式保存数据到 TDengine,也在企业版上提供原生的 TDengine 驱动实现直接保存。 diff --git a/docs/zh/20-third-party/10-hive-mq-broker.md b/docs/zh/20-third-party/10-hive-mq-broker.md index f75ed793d6..a388ff6daf 100644 --- a/docs/zh/20-third-party/10-hive-mq-broker.md +++ b/docs/zh/20-third-party/10-hive-mq-broker.md @@ -1,6 +1,7 @@ --- sidebar_label: HiveMQ Broker title: HiveMQ Broker 写入 +description: 使用 HivMQ Broker 写入 TDengine --- [HiveMQ](https://www.hivemq.com/) 是一个提供免费个人版和企业版的 MQTT 代理,主要用于企业和新兴的机器到机器 M2M 通讯和内部传输,满足可伸缩性、易管理和安全特性。HiveMQ 提供了开源的插件开发包。可以通过 HiveMQ extension - TDengine 保存数据到 TDengine。详细使用方法请参考 [HiveMQ extension - TDengine 说明文档](https://github.com/huskar-t/hivemq-tdengine-extension/blob/b62a26ecc164a310104df57691691b237e091c89/README.md)。 diff --git a/docs/zh/20-third-party/11-kafka.md b/docs/zh/20-third-party/11-kafka.md index 09bda4664f..1172f4fbc5 100644 --- a/docs/zh/20-third-party/11-kafka.md +++ b/docs/zh/20-third-party/11-kafka.md @@ -1,6 +1,7 @@ --- sidebar_label: Kafka -title: TDengine Kafka Connector 使用教程 +title: TDengine Kafka Connector +description: 使用 TDengine Kafka Connector 的详细指南 --- TDengine Kafka Connector 包含两个插件: TDengine Source Connector 和 TDengine Sink Connector。用户只需提供简单的配置文件,就可以将 Kafka 中指定 topic 的数据(批量或实时)同步到 TDengine, 或将 TDengine 中指定数据库的数据(批量或实时)同步到 Kafka。 diff --git a/docs/zh/21-tdinternal/01-arch.md b/docs/zh/21-tdinternal/01-arch.md index a910c584d6..d74366d129 100644 --- a/docs/zh/21-tdinternal/01-arch.md +++ b/docs/zh/21-tdinternal/01-arch.md @@ -1,6 +1,7 @@ --- sidebar_label: 整体架构 title: 整体架构 +description: TDengine 架构设计,包括:集群、存储、缓存与持久化、数据备份、多级存储等 --- ## 集群与基本逻辑单元 diff --git a/docs/zh/21-tdinternal/03-high-availability.md b/docs/zh/21-tdinternal/03-high-availability.md index ba056b6f16..4cdf04f6d1 100644 --- a/docs/zh/21-tdinternal/03-high-availability.md +++ b/docs/zh/21-tdinternal/03-high-availability.md @@ -1,5 +1,6 @@ --- title: 高可用 +description: TDengine 的高可用设计 --- ## Vnode 的高可用性 diff --git a/docs/zh/21-tdinternal/05-load-balance.md b/docs/zh/21-tdinternal/05-load-balance.md index 2376dd3e61..07af2328d5 100644 --- a/docs/zh/21-tdinternal/05-load-balance.md +++ b/docs/zh/21-tdinternal/05-load-balance.md @@ -1,5 +1,6 @@ --- title: 负载均衡 +description: TDengine 的负载均衡设计 --- TDengine 中的负载均衡主要指对时序数据的处理的负载均衡。TDengine 采用 Hash 一致性算法将一个数据库中的所有表和子表的数据均衡分散在属于该数据库的所有 vgroup 中,每张表或子表只能由一个 vgroup 处理,一个 vgroup 可能负责处理多个表或子表。 @@ -7,7 +8,7 @@ TDengine 中的负载均衡主要指对时序数据的处理的负载均衡。TD 创建数据库时可以指定其中的 vgroup 的数量: ```sql -create database db0 vgroups 100; +create database db0 vgroups 20; ``` 如何指定合适的 vgroup 的数量,这取决于系统资源。假定系统中只计划建立一个数据库,则 vgroup 数量由集群中所有 dnode 所能使用的资源决定。原则上可用的 CPU 和 Memory 越多,可建立的 vgroup 也越多。但也要考虑到磁盘性能,过多的 vgroup 在磁盘性能达到上限后反而会拖累整个系统的性能。假如系统中会建立多个数据库,则多个数据库的 vgroup 之和取决于系统中可用资源的数量。要综合考虑多个数据库之间表的数量、写入频率、数据量等多个因素在多个数据库之间分配 vgroup。实际中建议首先根据系统资源配置选择一个初始的 vgroup 数量,比如 CPU 总核数的 2 倍,以此为起点通过测试找到最佳的 vgroup 数量配置,此为系统中的 vgroup 总数。如果有多个数据库的话,再根据各个数据库的表数和数据量对 vgroup 进行分配。 diff --git a/docs/zh/21-tdinternal/index.md b/docs/zh/21-tdinternal/index.md index 63a746623e..21f106edc9 100644 --- a/docs/zh/21-tdinternal/index.md +++ b/docs/zh/21-tdinternal/index.md @@ -1,5 +1,6 @@ --- title: 技术内幕 +description: TDengine 的内部设计 --- ```mdx-code-block diff --git a/docs/zh/25-application/01-telegraf.md b/docs/zh/25-application/01-telegraf.md index a949fa9721..4e9597f964 100644 --- a/docs/zh/25-application/01-telegraf.md +++ b/docs/zh/25-application/01-telegraf.md @@ -1,6 +1,7 @@ --- sidebar_label: TDengine + Telegraf + Grafana -title: 使用 TDengine + Telegraf + Grafana 快速搭建 IT 运维展示系统 +title: TDengine + Telegraf + Grafana +description: 使用 TDengine + Telegraf + Grafana 快速搭建 IT 运维展示系统 --- ## 背景介绍 diff --git a/docs/zh/25-application/02-collectd.md b/docs/zh/25-application/02-collectd.md index 6bdebd5030..c6230f48ab 100644 --- a/docs/zh/25-application/02-collectd.md +++ b/docs/zh/25-application/02-collectd.md @@ -1,6 +1,7 @@ --- sidebar_label: TDengine + collectd/StatsD + Grafana -title: 使用 TDengine + collectd/StatsD + Grafana 快速搭建 IT 运维监控系统 +title: TDengine + collectd/StatsD + Grafana +description: 使用 TDengine + collectd/StatsD + Grafana 快速搭建 IT 运维监控系统 --- ## 背景介绍 diff --git a/docs/zh/25-application/index.md b/docs/zh/25-application/index.md index 1305cf230f..76aa179927 100644 --- a/docs/zh/25-application/index.md +++ b/docs/zh/25-application/index.md @@ -1,5 +1,6 @@ --- title: 应用实践 +description: TDengine 配合其它开源组件的一些应用示例 --- ```mdx-code-block diff --git a/docs/zh/27-train-faq/01-faq.md b/docs/zh/27-train-faq/01-faq.md index 04ee011b93..2fd9dff80b 100644 --- a/docs/zh/27-train-faq/01-faq.md +++ b/docs/zh/27-train-faq/01-faq.md @@ -1,5 +1,6 @@ --- title: 常见问题及反馈 +description: 一些常见问题的解决方法汇总 --- ## 问题反馈 diff --git a/docs/zh/27-train-faq/index.md b/docs/zh/27-train-faq/index.md index b42bff0288..e7159d98c8 100644 --- a/docs/zh/27-train-faq/index.md +++ b/docs/zh/27-train-faq/index.md @@ -1,5 +1,6 @@ --- title: FAQ 及其他 +description: 用户经常遇到的问题 --- ```mdx-code-block diff --git a/docs/zh/28-releases/01-tdengine.md b/docs/zh/28-releases/01-tdengine.md index 1e97572ca4..e3e1463131 100644 --- a/docs/zh/28-releases/01-tdengine.md +++ b/docs/zh/28-releases/01-tdengine.md @@ -1,6 +1,7 @@ --- sidebar_label: TDengine 发布历史 title: TDengine 发布历史 +description: TDengine 发布历史、Release Notes 及下载链接 --- import Release from "/components/ReleaseV3"; diff --git a/docs/zh/28-releases/02-tools.md b/docs/zh/28-releases/02-tools.md index 7513333040..61129d74e5 100644 --- a/docs/zh/28-releases/02-tools.md +++ b/docs/zh/28-releases/02-tools.md @@ -1,6 +1,7 @@ --- sidebar_label: taosTools 发布历史 title: taosTools 发布历史 +description: taosTools 的发布历史、Release Notes 和下载链接 --- import Release from "/components/ReleaseV3"; From 45775a9e833f6268d4b777bd8ab5e6b83f5f83f8 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 25 Aug 2022 15:56:27 +0800 Subject: [PATCH 78/79] fix: release ofp when encoding and decoding --- source/libs/tdb/src/db/tdbBtree.c | 8 ++++++++ source/libs/tdb/src/db/tdbPCache.c | 28 ++++++++++++---------------- source/libs/tdb/src/db/tdbPage.c | 2 ++ 3 files changed, 22 insertions(+), 16 deletions(-) diff --git a/source/libs/tdb/src/db/tdbBtree.c b/source/libs/tdb/src/db/tdbBtree.c index 5430acb972..2ab5fd2e9c 100644 --- a/source/libs/tdb/src/db/tdbBtree.c +++ b/source/libs/tdb/src/db/tdbBtree.c @@ -934,6 +934,8 @@ static int tdbFetchOvflPage(SPgno *pPgno, SPage **ppOfp, TXN *pTxn, SBTree *pBt) return -1; } + tdbPCacheRelease(pBt->pPager->pCache, *ppOfp, pTxn); + return ret; } @@ -1277,6 +1279,8 @@ static int tdbBtreeDecodePayload(SPage *pPage, const SCell *pCell, int nHeader, nLeft -= bytes; memcpy(&pgno, ofpCell + bytes, sizeof(pgno)); + + tdbPCacheRelease(pBt->pPager->pCache, ofp, pTxn); } } else { int nLeftKey = kLen; @@ -1336,6 +1340,8 @@ static int tdbBtreeDecodePayload(SPage *pPage, const SCell *pCell, int nHeader, memcpy(&pgno, ofpCell + bytes, sizeof(pgno)); + tdbPCacheRelease(pBt->pPager->pCache, ofp, pTxn); + nLeftKey -= bytes; nLeft -= bytes; } @@ -1374,6 +1380,8 @@ static int tdbBtreeDecodePayload(SPage *pPage, const SCell *pCell, int nHeader, memcpy(&pgno, ofpCell + vLen - nLeft + bytes, sizeof(pgno)); + tdbPCacheRelease(pBt->pPager->pCache, ofp, pTxn); + nLeft -= bytes; } } diff --git a/source/libs/tdb/src/db/tdbPCache.c b/source/libs/tdb/src/db/tdbPCache.c index 527ad200d4..76d95cbb91 100644 --- a/source/libs/tdb/src/db/tdbPCache.c +++ b/source/libs/tdb/src/db/tdbPCache.c @@ -111,6 +111,7 @@ void tdbPCacheRelease(SPCache *pCache, SPage *pPage, TXN *pTxn) { tdbPCacheLock(pCache); nRef = tdbUnrefPage(pPage); + tdbDebug("pcache/release page %p/%d/%d/%d", pPage, TDB_PAGE_PGNO(pPage), pPage->id, nRef); if (nRef == 0) { // test the nRef again to make sure // it is safe th handle the page @@ -212,7 +213,8 @@ static SPage *tdbPCacheFetchImpl(SPCache *pCache, const SPgid *pPgid, TXN *pTxn) pPage->pPager = pPageH->pPager; memcpy(pPage->pData, pPageH->pData, pPage->pageSize); - tdbDebug("pcache/pPageH: %p %d %p %p", pPageH, pPageH->pPageHdr - pPageH->pData, pPageH->xCellSize, pPage); + tdbDebug("pcache/pPageH: %p %d %p %p %d", pPageH, pPageH->pPageHdr - pPageH->pData, pPageH->xCellSize, pPage, + TDB_PAGE_PGNO(pPageH)); tdbPageInit(pPage, pPageH->pPageHdr - pPageH->pData, pPageH->xCellSize); pPage->kLen = pPageH->kLen; pPage->vLen = pPageH->vLen; @@ -243,7 +245,7 @@ static void tdbPCachePinPage(SPCache *pCache, SPage *pPage) { pCache->nRecyclable--; // printf("pin page %d pgno %d pPage %p\n", pPage->id, TDB_PAGE_PGNO(pPage), pPage); - tdbDebug("pin page %d", pPage->id); + tdbDebug("pcache/pin page %p/%d/%d", pPage, TDB_PAGE_PGNO(pPage), pPage->id); } } @@ -264,29 +266,23 @@ static void tdbPCacheUnpinPage(SPCache *pCache, SPage *pPage) { pCache->nRecyclable++; // printf("unpin page %d pgno %d pPage %p\n", pPage->id, TDB_PAGE_PGNO(pPage), pPage); - tdbDebug("unpin page %d", pPage->id); + tdbDebug("pcache/unpin page %p/%d/%d", pPage, TDB_PAGE_PGNO(pPage), pPage->id); } static void tdbPCacheRemovePageFromHash(SPCache *pCache, SPage *pPage) { uint32_t h = tdbPCachePageHash(&(pPage->pgid)) % pCache->nHash; SPage **ppPage = &(pCache->pgHash[h]); - if (*ppPage == pPage) { - pCache->pgHash[h] = pPage->pHashNext; - } else { - for (; (*ppPage) && (*ppPage)->pHashNext != pPage; ppPage = &((*ppPage)->pHashNext)) - ; - if (*ppPage) { - (*ppPage)->pHashNext = pPage->pHashNext; - } - } + for (; (*ppPage) && *ppPage != pPage; ppPage = &((*ppPage)->pHashNext)) + ; + if (*ppPage) { - pPage->pHashNext = NULL; - --pCache->nPage; + *ppPage = pPage->pHashNext; + pCache->nPage--; // printf("rmv page %d to hash, pgno %d, pPage %p\n", pPage->id, TDB_PAGE_PGNO(pPage), pPage); } - tdbDebug("remove page %p/%d from hash", pPage, pPage->id); + tdbDebug("pcache/remove page %p/%d/%d from hash %" PRIu32, pPage, TDB_PAGE_PGNO(pPage), pPage->id, h); } static void tdbPCacheAddPageToHash(SPCache *pCache, SPage *pPage) { @@ -298,7 +294,7 @@ static void tdbPCacheAddPageToHash(SPCache *pCache, SPage *pPage) { pCache->nPage++; // printf("add page %d to hash, pgno %d, pPage %p\n", pPage->id, TDB_PAGE_PGNO(pPage), pPage); - tdbDebug("add page %p/%d to hash", pPage, pPage->id); + tdbDebug("pcache/add page %p/%d/%d to hash %" PRIu32, pPage, TDB_PAGE_PGNO(pPage), pPage->id, h); } static int tdbPCacheOpenImpl(SPCache *pCache) { diff --git a/source/libs/tdb/src/db/tdbPage.c b/source/libs/tdb/src/db/tdbPage.c index 9e0cd76573..a3f376b929 100644 --- a/source/libs/tdb/src/db/tdbPage.c +++ b/source/libs/tdb/src/db/tdbPage.c @@ -68,6 +68,8 @@ int tdbPageCreate(int pageSize, SPage **ppPage, void *(*xMalloc)(void *, size_t) } *ppPage = pPage; + + tdbDebug("page/create: %p %p", pPage, xMalloc); return 0; } From fa2c4f70859c64683c78c8e491ef033dd69af14c Mon Sep 17 00:00:00 2001 From: gccgdb1234 Date: Thu, 25 Aug 2022 16:34:43 +0800 Subject: [PATCH 79/79] doc: refine description --- docs/zh/07-develop/01-connect/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/07-develop/01-connect/index.md b/docs/zh/07-develop/01-connect/index.md index 3e44e6c5da..075d99cfee 100644 --- a/docs/zh/07-develop/01-connect/index.md +++ b/docs/zh/07-develop/01-connect/index.md @@ -1,6 +1,6 @@ --- title: 建立连接 -description: "本节介绍如何使用连接器建立与 TDengine 的连接,给出连接器安装、连接的简单说明。" +description: 使用连接器建立与 TDengine 的连接,以及连接器的安装和连接 --- import Tabs from "@theme/Tabs";