From 68fe671cbc826d8392c84d77516eb6e345e66bce Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Thu, 1 Feb 2024 21:04:38 +0800 Subject: [PATCH 01/15] add tsz compress --- tests/army/community/cluster/snapshot.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/army/community/cluster/snapshot.py b/tests/army/community/cluster/snapshot.py index 5b5457be75..26bb6b9377 100644 --- a/tests/army/community/cluster/snapshot.py +++ b/tests/army/community/cluster/snapshot.py @@ -29,7 +29,11 @@ from frame import * class TDTestCase(TBase): updatecfgDict = { - "countAlwaysReturnValue" : "0" + "countAlwaysReturnValue" : "0", + "lossyColumns" : "float|double", + "fPrecision" : "0.000000001", + "dPrecision" : "0.00000000000000001", + "ifAdtFse" : "1" } def insertData(self): From 3236ef7bae72d50aa526701784fd05d619447d77 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 2 Feb 2024 11:26:47 +0800 Subject: [PATCH 02/15] feat(stream): return the stream source task exec delay, and do some internal refactor. --- include/libs/executor/storageapi.h | 8 +-- include/libs/stream/tstream.h | 2 +- source/dnode/vnode/inc/vnode.h | 4 +- source/dnode/vnode/src/inc/tq.h | 7 +- source/dnode/vnode/src/inc/tsdb.h | 3 +- source/dnode/vnode/src/sma/smaRollup.c | 2 +- source/dnode/vnode/src/sma/smaTimeRange.c | 4 +- source/dnode/vnode/src/tq/tq.c | 3 +- source/dnode/vnode/src/tq/tqRead.c | 2 +- source/dnode/vnode/src/tq/tqSink.c | 8 +-- source/dnode/vnode/src/tq/tqUtil.c | 72 ++++++++++++++++++- source/dnode/vnode/src/tsdb/tsdbRead2.c | 6 +- source/dnode/vnode/src/vnd/vnodeInitApi.c | 6 +- source/dnode/vnode/src/vnd/vnodeQuery.c | 5 ++ .../executor/src/streameventwindowoperator.c | 1 - source/libs/parser/src/parTranslater.c | 58 +++++++++++---- source/libs/qworker/src/qworker.c | 7 ++ source/libs/stream/src/streamCheckpoint.c | 10 +-- source/libs/stream/src/streamQueue.c | 4 +- source/libs/wal/src/walRead.c | 11 ++- source/libs/wal/src/walWrite.c | 2 +- 21 files changed, 165 insertions(+), 60 deletions(-) diff --git a/include/libs/executor/storageapi.h b/include/libs/executor/storageapi.h index 669340f9e5..9987dab166 100644 --- a/include/libs/executor/storageapi.h +++ b/include/libs/executor/storageapi.h @@ -211,6 +211,7 @@ typedef struct SStoreTqReader { bool (*tqNextBlockImpl)(); // todo remove it SSDataBlock* (*tqGetResultBlock)(); int64_t (*tqGetResultBlockTime)(); + int32_t (*tqGetStreamExecProgress)(); void (*tqReaderSetColIdList)(); int32_t (*tqReaderSetQueryTableList)(); @@ -266,16 +267,11 @@ typedef struct SStoreMeta { // support filter and non-filter cases. [vnodeGetCtbIdList & vnodeGetCtbIdListByFilter] int32_t (*getChildTableList)(void* pVnode, int64_t suid, SArray* list); int32_t (*storeGetTableList)(void* pVnode, int8_t type, SArray* pList); - void* storeGetVersionRange; - void* storeGetLastTimestamp; - - int32_t (*getTableSchema)(void* pVnode, int64_t uid, STSchema** pSchema, int64_t* suid); // tsdbGetTableSchema + int32_t (*getTableSchema)(void* pVnode, int64_t uid, STSchema** pSchema, int64_t* suid); int32_t (*getNumOfChildTables)(void* pVnode, int64_t uid, int64_t* numOfTables, int32_t* numOfCols); void (*getBasicInfo)(void* pVnode, const char** dbname, int32_t* vgId, int64_t* numOfTables, int64_t* numOfNormalTables); - int64_t (*getNumOfRowsInMem)(void* pVnode); - SMCtbCursor* (*openCtbCursor)(void* pVnode, tb_uid_t uid, int lock); int32_t (*resumeCtbCursor)(SMCtbCursor* pCtbCur, int8_t first); void (*pauseCtbCursor)(SMCtbCursor* pCtbCur); diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index 9738be839d..dce8fffe11 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -313,7 +313,7 @@ typedef struct SCheckpointInfo { int64_t failedId; // record the latest failed checkpoint id int64_t checkpointingId; int32_t downstreamAlignNum; - int32_t checkpointNotReadyTasks; + int32_t numOfNotReady; bool dispatchCheckpointTrigger; int64_t msgVer; int32_t transId; diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h index 97cf0ffebc..3c334be2f2 100644 --- a/source/dnode/vnode/inc/vnode.h +++ b/source/dnode/vnode/inc/vnode.h @@ -90,6 +90,8 @@ int32_t vnodeGetStbColumnNum(SVnode *pVnode, tb_uid_t suid, int *num); int32_t vnodeGetTimeSeriesNum(SVnode *pVnode, int64_t *num); int32_t vnodeGetAllCtbNum(SVnode *pVnode, int64_t *num); +int32_t vnodeGetTableSchema(void *pVnode, int64_t uid, STSchema **pSchema, int64_t *suid); + void vnodeResetLoad(SVnode *pVnode, SVnodeLoad *pLoad); int32_t vnodeGetLoad(SVnode *pVnode, SVnodeLoad *pLoad); int32_t vnodeGetLoadLite(SVnode *pVnode, SVnodeLoadLite *pLoad); @@ -180,7 +182,6 @@ int32_t tsdbCacherowsReaderOpen(void *pVnode, int32_t type, void *pTableIdList, int32_t tsdbRetrieveCacheRows(void *pReader, SSDataBlock *pResBlock, const int32_t *slotIds, const int32_t *dstSlotIds, SArray *pTableUids); void *tsdbCacherowsReaderClose(void *pReader); -int32_t tsdbGetTableSchema(void *pVnode, int64_t uid, STSchema **pSchema, int64_t *suid); void tsdbCacheSetCapacity(SVnode *pVnode, size_t capacity); size_t tsdbCacheGetCapacity(SVnode *pVnode); @@ -233,6 +234,7 @@ int32_t tqReaderSetSubmitMsg(STqReader *pReader, void *msgStr, int32_t msgLen, i bool tqNextDataBlockFilterOut(STqReader *pReader, SHashObj *filterOutUids); int32_t tqRetrieveDataBlock(STqReader *pReader, SSDataBlock **pRes, const char *idstr); int32_t tqRetrieveTaosxBlock(STqReader *pReader, SArray *blocks, SArray *schemas, SSubmitTbData **pSubmitTbDataRet); +int32_t tqGetStreamExecInfo(SVnode* pVnode, int64_t streamId, int64_t* pDelay, bool* fhFinished); // sma int32_t smaGetTSmaDays(SVnodeCfg *pCfg, void *pCont, uint32_t contLen, int32_t *days); diff --git a/source/dnode/vnode/src/inc/tq.h b/source/dnode/vnode/src/inc/tq.h index cded4ddd7c..475a26aff5 100644 --- a/source/dnode/vnode/src/inc/tq.h +++ b/source/dnode/vnode/src/inc/tq.h @@ -97,7 +97,6 @@ typedef struct { struct STQ { SVnode* pVnode; char* path; - int64_t walLogLastVer; SRWLatch lock; SHashObj* pPushMgr; // subKey -> STqHandle SHashObj* pHandle; // subKey -> STqHandle @@ -153,14 +152,14 @@ char* tqOffsetBuildFName(const char* path, int32_t fVer); int32_t tqOffsetRestoreFromFile(STqOffsetStore* pStore, const char* fname); // tq util -int32_t extractDelDataBlock(const void* pData, int32_t len, int64_t ver, void** pRefBlock, int32_t type); +int32_t tqExtractDelDataBlock(const void* pData, int32_t len, int64_t ver, void** pRefBlock, int32_t type); int32_t tqExtractDataForMq(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest, SRpcMsg* pMsg); int32_t tqDoSendDataRsp(const SRpcHandleInfo* pRpcHandleInfo, const SMqDataRsp* pRsp, int32_t epoch, int64_t consumerId, int32_t type, int64_t sver, int64_t ever); int32_t tqInitDataRsp(SMqDataRsp* pRsp, STqOffsetVal pOffset); void tqUpdateNodeStage(STQ* pTq, bool isLeader); -int32_t setDstTableDataPayload(uint64_t suid, const STSchema* pTSchema, int32_t blockIndex, SSDataBlock* pDataBlock, - SSubmitTbData* pTableData, const char* id); +int32_t tqSetDstTableDataPayload(uint64_t suid, const STSchema* pTSchema, int32_t blockIndex, SSDataBlock* pDataBlock, + SSubmitTbData* pTableData, const char* id); int32_t doMergeExistedRows(SSubmitTbData* pExisted, const SSubmitTbData* pNew, const char* id); SVCreateTbReq* buildAutoCreateTableReq(const char* stbFullName, int64_t suid, int32_t numOfCols, diff --git a/source/dnode/vnode/src/inc/tsdb.h b/source/dnode/vnode/src/inc/tsdb.h index 9d8d5013fa..cac3be9ee3 100644 --- a/source/dnode/vnode/src/inc/tsdb.h +++ b/source/dnode/vnode/src/inc/tsdb.h @@ -279,6 +279,7 @@ int32_t tsdbReadDelIdx(SDelFReader *pReader, SArray *aDelIdx); // tsdbRead.c ============================================================================================== int32_t tsdbTakeReadSnap2(STsdbReader *pReader, _query_reseek_func_t reseek, STsdbReadSnap **ppSnap); void tsdbUntakeReadSnap2(STsdbReader *pReader, STsdbReadSnap *pSnap, bool proactive); +int32_t tsdbGetTableSchema(SMeta* pMeta, int64_t uid, STSchema** pSchema, int64_t* suid); // tsdbMerge.c ============================================================================================== typedef struct { @@ -970,8 +971,6 @@ static FORCE_INLINE TSDBROW *tsdbTbDataIterGet(STbDataIter *pIter) { return pIter->pRow; } -int32_t tRowInfoCmprFn(const void *p1, const void *p2); - typedef struct { int64_t suid; int64_t uid; diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c index 138bcbb133..621651507e 100644 --- a/source/dnode/vnode/src/sma/smaRollup.c +++ b/source/dnode/vnode/src/sma/smaRollup.c @@ -1554,7 +1554,7 @@ static int32_t tdRSmaBatchExec(SSma *pSma, SRSmaInfo *pInfo, STaosQall *qall, SA } _resume_delete: version = RSMA_EXEC_MSG_VER(msg); - if ((terrno = extractDelDataBlock(RSMA_EXEC_MSG_BODY(msg), RSMA_EXEC_MSG_LEN(msg), version, + if ((terrno = tqExtractDelDataBlock(RSMA_EXEC_MSG_BODY(msg), RSMA_EXEC_MSG_LEN(msg), version, &packData.pDataBlock, 1))) { taosFreeQitem(msg); goto _err; diff --git a/source/dnode/vnode/src/sma/smaTimeRange.c b/source/dnode/vnode/src/sma/smaTimeRange.c index f537ede8c1..767ea47e21 100644 --- a/source/dnode/vnode/src/sma/smaTimeRange.c +++ b/source/dnode/vnode/src/sma/smaTimeRange.c @@ -203,7 +203,7 @@ int32_t smaBlockToSubmit(SVnode *pVnode, const SArray *pBlocks, const STSchema * int32_t *index = taosHashGet(pTableIndexMap, &groupId, sizeof(groupId)); if (index == NULL) { // no data yet, append it - code = setDstTableDataPayload(suid, pTSchema, i, pDataBlock, &tbData, ""); + code = tqSetDstTableDataPayload(suid, pTSchema, i, pDataBlock, &tbData, ""); if (code != TSDB_CODE_SUCCESS) { continue; } @@ -213,7 +213,7 @@ int32_t smaBlockToSubmit(SVnode *pVnode, const SArray *pBlocks, const STSchema * int32_t size = (int32_t)taosArrayGetSize(pReq->aSubmitTbData) - 1; taosHashPut(pTableIndexMap, &groupId, sizeof(groupId), &size, sizeof(size)); } else { - code = setDstTableDataPayload(suid, pTSchema, i, pDataBlock, &tbData, ""); + code = tqSetDstTableDataPayload(suid, pTSchema, i, pDataBlock, &tbData, ""); if (code != TSDB_CODE_SUCCESS) { continue; } diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 8689c30a55..bde6889ecd 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -66,7 +66,6 @@ STQ* tqOpen(const char* path, SVnode* pVnode) { pTq->path = taosStrdup(path); pTq->pVnode = pVnode; - pTq->walLogLastVer = pVnode->pWal->vers.lastVer; pTq->pHandle = taosHashInit(64, MurmurHash3_32, true, HASH_ENTRY_LOCK); taosHashSetFreeFp(pTq->pHandle, tqDestroyTqHandle); @@ -1055,7 +1054,7 @@ int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg) { int32_t code = tqStreamTaskProcessRunReq(pTq->pStreamMeta, pMsg, vnodeIsRoleLeader(pTq->pVnode)); // let's continue scan data in the wal files - if(code == 0 && pReq->reqType >= 0){ + if (code == 0 && (pReq->reqType >= 0 || pReq->reqType == STREAM_EXEC_T_RESUME_TASK)) { tqScanWalAsync(pTq, false); } diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index 383a636f71..8392f4c479 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -344,7 +344,7 @@ int32_t extractMsgFromWal(SWalReader* pReader, void** pItem, int64_t maxVer, con void* pBody = POINTER_SHIFT(pCont->body, sizeof(SMsgHead)); int32_t len = pCont->bodyLen - sizeof(SMsgHead); - code = extractDelDataBlock(pBody, len, ver, (void**)pItem, 0); + code = tqExtractDelDataBlock(pBody, len, ver, (void**)pItem, 0); if (code == TSDB_CODE_SUCCESS) { if (*pItem == NULL) { tqDebug("s-task:%s empty delete msg, discard it, len:%d, ver:%" PRId64, id, len, ver); diff --git a/source/dnode/vnode/src/tq/tqSink.c b/source/dnode/vnode/src/tq/tqSink.c index 7fcb86d84a..7050870c57 100644 --- a/source/dnode/vnode/src/tq/tqSink.c +++ b/source/dnode/vnode/src/tq/tqSink.c @@ -746,7 +746,7 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat return TDB_CODE_SUCCESS; } -int32_t setDstTableDataPayload(uint64_t suid, const STSchema *pTSchema, int32_t blockIndex, SSDataBlock* pDataBlock, +int32_t tqSetDstTableDataPayload(uint64_t suid, const STSchema *pTSchema, int32_t blockIndex, SSDataBlock* pDataBlock, SSubmitTbData* pTableData, const char* id) { int32_t numOfRows = pDataBlock->info.rows; @@ -821,7 +821,7 @@ void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) { continue; } - code = setDstTableDataPayload(suid, pTSchema, i, pDataBlock, &tbData, id); + code = tqSetDstTableDataPayload(suid, pTSchema, i, pDataBlock, &tbData, id); if (code != TSDB_CODE_SUCCESS) { continue; } @@ -868,7 +868,7 @@ void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) { continue; } - code = setDstTableDataPayload(suid, pTSchema, i, pDataBlock, &tbData, id); + code = tqSetDstTableDataPayload(suid, pTSchema, i, pDataBlock, &tbData, id); if (code != TSDB_CODE_SUCCESS) { continue; } @@ -878,7 +878,7 @@ void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) { int32_t size = (int32_t)taosArrayGetSize(submitReq.aSubmitTbData) - 1; taosHashPut(pTableIndexMap, &groupId, sizeof(groupId), &size, sizeof(size)); } else { - code = setDstTableDataPayload(suid, pTSchema, i, pDataBlock, &tbData, id); + code = tqSetDstTableDataPayload(suid, pTSchema, i, pDataBlock, &tbData, id); if (code != TSDB_CODE_SUCCESS) { continue; } diff --git a/source/dnode/vnode/src/tq/tqUtil.c b/source/dnode/vnode/src/tq/tqUtil.c index d18455d221..b9f578a74b 100644 --- a/source/dnode/vnode/src/tq/tqUtil.c +++ b/source/dnode/vnode/src/tq/tqUtil.c @@ -388,7 +388,7 @@ int32_t tqDoSendDataRsp(const SRpcHandleInfo* pRpcHandleInfo, const SMqDataRsp* return 0; } -int32_t extractDelDataBlock(const void* pData, int32_t len, int64_t ver, void** pRefBlock, int32_t type) { +int32_t tqExtractDelDataBlock(const void* pData, int32_t len, int64_t ver, void** pRefBlock, int32_t type) { SDecoder* pCoder = &(SDecoder){0}; SDeleteRes* pRes = &(SDeleteRes){0}; @@ -449,3 +449,73 @@ int32_t extractDelDataBlock(const void* pData, int32_t len, int64_t ver, void** return TSDB_CODE_SUCCESS; } + +int32_t tqGetStreamExecInfo(SVnode* pVnode, int64_t streamId, int64_t* pDelay, bool* fhFinished) { + SStreamMeta* pMeta = pVnode->pTq->pStreamMeta; + int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList); + int32_t code = TSDB_CODE_SUCCESS; + + if (pDelay != NULL) { + *pDelay = 0; + } + + *fhFinished = false; + + if (numOfTasks <= 0) { + return code; + } + + // extract the required source task for a given stream, identified by streamId + for (int32_t i = 0; i < numOfTasks; ++i) { + STaskId* pId = taosArrayGet(pMeta->pTaskList, i); + if (pId->streamId != streamId) { + continue; + } + + SStreamTask** ppTask = taosHashGet(pMeta->pTasksMap, pId, sizeof(*pId)); + if (ppTask == NULL) { + tqError("vgId:%d failed to acquire task:0x%" PRIx64 " in retrieving progress", pMeta->vgId, pId->taskId); + continue; + } + + if ((*ppTask)->info.taskLevel != TASK_LEVEL__SOURCE) { + continue; + } + + // here we get the required stream source task + SStreamTask* pTask = *ppTask; + *fhFinished = !HAS_RELATED_FILLHISTORY_TASK(pTask); + + int64_t ver = walReaderGetCurrentVer(pTask->exec.pWalReader); + + SVersionRange verRange = {0}; + walReaderValidVersionRange(pTask->exec.pWalReader, &verRange.minVer, &verRange.maxVer); + + SWalReader* pReader = walOpenReader(pTask->exec.pWalReader->pWal, NULL, 0); + if (pReader == NULL) { + tqError("failed to open wal reader to extract exec progress, vgId:%d", pMeta->vgId); + continue; + } + + int64_t cur = 0; + int64_t latest = 0; + + code = walFetchHead(pReader, ver); + if (code != TSDB_CODE_SUCCESS) { + cur = pReader->pHead->head.ingestTs; + } + + code = walFetchHead(pReader, verRange.maxVer); + if (code != TSDB_CODE_SUCCESS) { + latest = pReader->pHead->head.ingestTs; + } + + if (pDelay != NULL) { // delay in ms + *pDelay = (latest - cur) / 1000; + } + + walCloseReader(pReader); + } + + return TSDB_CODE_SUCCESS; +} diff --git a/source/dnode/vnode/src/tsdb/tsdbRead2.c b/source/dnode/vnode/src/tsdb/tsdbRead2.c index 9d158668d2..d9b932a367 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead2.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead2.c @@ -4995,9 +4995,9 @@ int64_t tsdbGetNumOfRowsInMemTable2(STsdbReader* pReader) { return rows; } -int32_t tsdbGetTableSchema(void* pVnode, int64_t uid, STSchema** pSchema, int64_t* suid) { +int32_t tsdbGetTableSchema(SMeta* pMeta, int64_t uid, STSchema** pSchema, int64_t* suid) { SMetaReader mr = {0}; - metaReaderDoInit(&mr, ((SVnode*)pVnode)->pMeta, 0); + metaReaderDoInit(&mr, pMeta, 0); int32_t code = metaReaderGetTableEntryByUidCache(&mr, uid); if (code != TSDB_CODE_SUCCESS) { terrno = TSDB_CODE_TDB_INVALID_TABLE_ID; @@ -5027,7 +5027,7 @@ int32_t tsdbGetTableSchema(void* pVnode, int64_t uid, STSchema** pSchema, int64_ metaReaderClear(&mr); // get the newest table schema version - code = metaGetTbTSchemaEx(((SVnode*)pVnode)->pMeta, *suid, uid, -1, pSchema); + code = metaGetTbTSchemaEx(pMeta, *suid, uid, -1, pSchema); return code; } diff --git a/source/dnode/vnode/src/vnd/vnodeInitApi.c b/source/dnode/vnode/src/vnd/vnodeInitApi.c index c323a81093..2392716bbf 100644 --- a/source/dnode/vnode/src/vnd/vnodeInitApi.c +++ b/source/dnode/vnode/src/vnd/vnodeInitApi.c @@ -91,7 +91,7 @@ void initMetadataAPI(SStoreMeta* pMeta) { pMeta->getTableTypeByName = metaGetTableTypeByName; pMeta->getTableNameByUid = metaGetTableNameByUid; - pMeta->getTableSchema = tsdbGetTableSchema; // todo refactor + pMeta->getTableSchema = vnodeGetTableSchema; pMeta->storeGetTableList = vnodeGetTableList; pMeta->getCachedTableList = metaGetCachedTableUidList; @@ -135,7 +135,9 @@ void initTqAPI(SStoreTqReader* pTq) { pTq->tqReaderNextBlockFilterOut = tqNextDataBlockFilterOut; pTq->tqGetResultBlockTime = tqGetResultBlockTime; -} + + pTq->tqGetStreamExecProgress = tqGetStreamExecInfo; + } void initStateStoreAPI(SStateStore* pStore) { pStore->streamFileStateInit = streamFileStateInit; diff --git a/source/dnode/vnode/src/vnd/vnodeQuery.c b/source/dnode/vnode/src/vnd/vnodeQuery.c index b6a9360afd..4fc7a88494 100644 --- a/source/dnode/vnode/src/vnd/vnodeQuery.c +++ b/source/dnode/vnode/src/vnd/vnodeQuery.c @@ -14,6 +14,7 @@ */ #include "vnd.h" +#include "tsdb.h" #define VNODE_GET_LOAD_RESET_VALS(pVar, oVal, vType, tags) \ do { \ @@ -703,3 +704,7 @@ void *vnodeGetIvtIdx(void *pVnode) { } return metaGetIvtIdx(((SVnode *)pVnode)->pMeta); } + +int32_t vnodeGetTableSchema(void *pVnode, int64_t uid, STSchema **pSchema, int64_t *suid) { + return tsdbGetTableSchema(((SVnode*)pVnode)->pMeta, uid, pSchema, suid); +} diff --git a/source/libs/executor/src/streameventwindowoperator.c b/source/libs/executor/src/streameventwindowoperator.c index 8aca76597b..0602016268 100644 --- a/source/libs/executor/src/streameventwindowoperator.c +++ b/source/libs/executor/src/streameventwindowoperator.c @@ -725,7 +725,6 @@ SOperatorInfo* createStreamEventAggOperatorInfo(SOperatorInfo* downstream, SPhys } if (pInfo->isHistoryOp) { - _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); pInfo->pAllUpdated = tSimpleHashInit(64, hashFn); } else { pInfo->pAllUpdated = NULL; diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index e221d2158c..0047fdb514 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -8090,27 +8090,27 @@ static int32_t createLastTsSelectStmt(char* pDb, char* pTable, STableMeta* pMeta tstrncpy(col->tableAlias, pTable, tListLen(col->tableAlias)); tstrncpy(col->colName, pMeta->schema[0].name, tListLen(col->colName)); - SNodeList* pParamterList = nodesMakeList(); - if (NULL == pParamterList) { + SNodeList* pParameterList = nodesMakeList(); + if (NULL == pParameterList) { nodesDestroyNode((SNode*)col); return TSDB_CODE_OUT_OF_MEMORY; } - int32_t code = nodesListStrictAppend(pParamterList, (SNode*)col); + int32_t code = nodesListStrictAppend(pParameterList, (SNode*)col); if (code) { - nodesDestroyList(pParamterList); + nodesDestroyList(pParameterList); return code; } - SNode* pFunc = (SNode*)createFunction("last", pParamterList); + SNode* pFunc = (SNode*)createFunction("last", pParameterList); if (NULL == pFunc) { - nodesDestroyList(pParamterList); + nodesDestroyList(pParameterList); return TSDB_CODE_OUT_OF_MEMORY; } SNodeList* pProjectionList = nodesMakeList(); if (NULL == pProjectionList) { - nodesDestroyList(pParamterList); + nodesDestroyNode(pFunc); return TSDB_CODE_OUT_OF_MEMORY; } @@ -8122,7 +8122,7 @@ static int32_t createLastTsSelectStmt(char* pDb, char* pTable, STableMeta* pMeta SFunctionNode* pFunc1 = createFunction("_vgid", NULL); if (NULL == pFunc1) { - nodesDestroyList(pParamterList); + nodesDestroyList(pProjectionList); return TSDB_CODE_OUT_OF_MEMORY; } @@ -8135,7 +8135,7 @@ static int32_t createLastTsSelectStmt(char* pDb, char* pTable, STableMeta* pMeta SFunctionNode* pFunc2 = createFunction("_vgver", NULL); if (NULL == pFunc2) { - nodesDestroyList(pParamterList); + nodesDestroyList(pProjectionList); return TSDB_CODE_OUT_OF_MEMORY; } @@ -8152,24 +8152,54 @@ static int32_t createLastTsSelectStmt(char* pDb, char* pTable, STableMeta* pMeta return code; } - // todo add the group by statement SSelectStmt** pSelect1 = (SSelectStmt**)pQuery; (*pSelect1)->pGroupByList = nodesMakeList(); + if (NULL == (*pSelect1)->pGroupByList) { + return TSDB_CODE_OUT_OF_MEMORY; + } SGroupingSetNode* pNode1 = (SGroupingSetNode*)nodesMakeNode(QUERY_NODE_GROUPING_SET); + if (NULL == pNode1) { + return TSDB_CODE_OUT_OF_MEMORY; + } + pNode1->groupingSetType = GP_TYPE_NORMAL; pNode1->pParameterList = nodesMakeList(); - nodesListAppend(pNode1->pParameterList, (SNode*)pFunc1); + if (NULL == pNode1->pParameterList) { + nodesDestroyNode((SNode*)pNode1); + return TSDB_CODE_OUT_OF_MEMORY; + } - nodesListAppend((*pSelect1)->pGroupByList, (SNode*)pNode1); + code = nodesListAppend(pNode1->pParameterList, (SNode*)pFunc1); + if (code) { + nodesDestroyNode((SNode*)pNode1); + return code; + } + + code = nodesListAppend((*pSelect1)->pGroupByList, (SNode*)pNode1); + if (code) { + return code; + } SGroupingSetNode* pNode2 = (SGroupingSetNode*)nodesMakeNode(QUERY_NODE_GROUPING_SET); + if (NULL == pNode2) { + return TSDB_CODE_OUT_OF_MEMORY; + } + pNode2->groupingSetType = GP_TYPE_NORMAL; pNode2->pParameterList = nodesMakeList(); - nodesListAppend(pNode2->pParameterList, (SNode*)pFunc2); + if (NULL == pNode2->pParameterList) { + nodesDestroyNode((SNode*)pNode1); + return TSDB_CODE_OUT_OF_MEMORY; + } - nodesListAppend((*pSelect1)->pGroupByList, (SNode*)pNode2); + code = nodesListAppend(pNode2->pParameterList, (SNode*)pFunc2); + if (code) { + nodesDestroyNode((SNode*)pNode2); + return code; + } + code = nodesListAppend((*pSelect1)->pGroupByList, (SNode*)pNode2); return code; } diff --git a/source/libs/qworker/src/qworker.c b/source/libs/qworker/src/qworker.c index 7376aa3a9c..93559745be 100644 --- a/source/libs/qworker/src/qworker.c +++ b/source/libs/qworker/src/qworker.c @@ -737,6 +737,13 @@ int32_t qwProcessQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg, char *sql) { QW_ERR_JRET(code); } +#if 0 + SReadHandle* pReadHandle = qwMsg->node; + int64_t delay = 0; + bool fhFinish = false; + pReadHandle->api.tqReaderFn.tqGetStreamExecProgress(pReadHandle->vnode, 0, &delay, &fhFinish); +#endif + code = qCreateExecTask(qwMsg->node, mgmt->nodeId, tId, plan, &pTaskInfo, &sinkHandle, sql, OPTR_EXEC_MODEL_BATCH); sql = NULL; if (code) { diff --git a/source/libs/stream/src/streamCheckpoint.c b/source/libs/stream/src/streamCheckpoint.c index f45904f036..b1783fb640 100644 --- a/source/libs/stream/src/streamCheckpoint.c +++ b/source/libs/stream/src/streamCheckpoint.c @@ -158,7 +158,7 @@ int32_t streamProcessCheckpointSourceReq(SStreamTask* pTask, SStreamCheckpointSo pTask->chkInfo.transId = pReq->transId; pTask->chkInfo.checkpointingId = pReq->checkpointId; - pTask->chkInfo.checkpointNotReadyTasks = streamTaskGetNumOfDownstream(pTask); + pTask->chkInfo.numOfNotReady = streamTaskGetNumOfDownstream(pTask); pTask->chkInfo.startTs = taosGetTimestampMs(); pTask->execInfo.checkpoint += 1; @@ -214,7 +214,7 @@ int32_t streamProcessCheckpointBlock(SStreamTask* pTask, SStreamDataBlock* pBloc stDebug("s-task:%s set childIdx:%d, and add checkpoint-trigger block into outputQ", id, pTask->info.selfChildId); continueDispatchCheckpointBlock(pBlock, pTask); } else { // only one task exists, no need to dispatch downstream info - atomic_add_fetch_32(&pTask->chkInfo.checkpointNotReadyTasks, 1); + atomic_add_fetch_32(&pTask->chkInfo.numOfNotReady, 1); streamProcessCheckpointReadyMsg(pTask); streamFreeQitem((SStreamQueueItem*)pBlock); } @@ -249,7 +249,7 @@ int32_t streamProcessCheckpointBlock(SStreamTask* pTask, SStreamDataBlock* pBloc // set the needed checked downstream tasks, only when all downstream tasks do checkpoint complete, this task // can start local checkpoint procedure - pTask->chkInfo.checkpointNotReadyTasks = streamTaskGetNumOfDownstream(pTask); + pTask->chkInfo.numOfNotReady = streamTaskGetNumOfDownstream(pTask); // Put the checkpoint block into inputQ, to make sure all blocks with less version have been handled by this task // already. And then, dispatch check point msg to all downstream tasks @@ -268,7 +268,7 @@ int32_t streamProcessCheckpointReadyMsg(SStreamTask* pTask) { ASSERT(pTask->info.taskLevel == TASK_LEVEL__SOURCE || pTask->info.taskLevel == TASK_LEVEL__AGG); // only when all downstream tasks are send checkpoint rsp, we can start the checkpoint procedure for the agg task - int32_t notReady = atomic_sub_fetch_32(&pTask->chkInfo.checkpointNotReadyTasks, 1); + int32_t notReady = atomic_sub_fetch_32(&pTask->chkInfo.numOfNotReady, 1); ASSERT(notReady >= 0); if (notReady == 0) { @@ -287,7 +287,7 @@ void streamTaskClearCheckInfo(SStreamTask* pTask, bool clearChkpReadyMsg) { pTask->chkInfo.checkpointingId = 0; // clear the checkpoint id pTask->chkInfo.failedId = 0; pTask->chkInfo.startTs = 0; // clear the recorded start time - pTask->chkInfo.checkpointNotReadyTasks = 0; + pTask->chkInfo.numOfNotReady = 0; pTask->chkInfo.transId = 0; pTask->chkInfo.dispatchCheckpointTrigger = false; diff --git a/source/libs/stream/src/streamQueue.c b/source/libs/stream/src/streamQueue.c index 78929c365e..0936d410bf 100644 --- a/source/libs/stream/src/streamQueue.c +++ b/source/libs/stream/src/streamQueue.c @@ -155,14 +155,14 @@ int32_t streamTaskGetDataFromInputQ(SStreamTask* pTask, SStreamQueueItem** pInpu *blockSize = 0; // no available token in bucket for sink task, let's wait for a little bit - if (taskLevel == TASK_LEVEL__SINK && (!streamTaskExtractAvailableToken(pTask->outputInfo.pTokenBucket, pTask->id.idStr))) { + if (taskLevel == TASK_LEVEL__SINK && (!streamTaskExtractAvailableToken(pTask->outputInfo.pTokenBucket, id))) { stDebug("s-task:%s no available token in bucket for sink data, wait for 10ms", id); return TSDB_CODE_SUCCESS; } while (1) { if (streamTaskShouldPause(pTask) || streamTaskShouldStop(pTask)) { - stDebug("s-task:%s task should pause, extract input blocks:%d", pTask->id.idStr, *numOfBlocks); + stDebug("s-task:%s task should pause, extract input blocks:%d", id, *numOfBlocks); return TSDB_CODE_SUCCESS; } diff --git a/source/libs/wal/src/walRead.c b/source/libs/wal/src/walRead.c index 3854e90901..d491b00e73 100644 --- a/source/libs/wal/src/walRead.c +++ b/source/libs/wal/src/walRead.c @@ -70,10 +70,9 @@ int32_t walNextValidMsg(SWalReader *pReader) { int64_t committedVer = walGetCommittedVer(pReader->pWal); int64_t appliedVer = walGetAppliedVer(pReader->pWal); - wDebug("vgId:%d, wal start to fetch, index:%" PRId64 ", last index:%" PRId64 " commit index:%" PRId64 - ", applied index:%" PRId64, + wDebug("vgId:%d, wal start to fetch, index:%" PRId64 ", last:%" PRId64 " commit:%" PRId64 ", applied:%" PRId64, pReader->pWal->cfg.vgId, fetchVer, lastVer, committedVer, appliedVer); - if (fetchVer > appliedVer){ + if (fetchVer > appliedVer) { terrno = TSDB_CODE_WAL_LOG_NOT_EXIST; return -1; } @@ -86,10 +85,8 @@ int32_t walNextValidMsg(SWalReader *pReader) { int32_t type = pReader->pHead->head.msgType; if (type == TDMT_VND_SUBMIT || ((type == TDMT_VND_DELETE) && (pReader->cond.deleteMsg == 1)) || (IS_META_MSG(type) && pReader->cond.scanMeta)) { - if (walFetchBody(pReader) < 0) { - return -1; - } - return 0; + int32_t code = walFetchBody(pReader); + return (code == TSDB_CODE_SUCCESS)? 0:-1; } else { if (walSkipFetchBody(pReader) < 0) { return -1; diff --git a/source/libs/wal/src/walWrite.c b/source/libs/wal/src/walWrite.c index 341d989f8f..9783705bad 100644 --- a/source/libs/wal/src/walWrite.c +++ b/source/libs/wal/src/walWrite.c @@ -498,7 +498,7 @@ static FORCE_INLINE int32_t walWriteImpl(SWal *pWal, int64_t index, tmsg_t msgTy pWal->writeHead.head.version = index; pWal->writeHead.head.bodyLen = bodyLen; pWal->writeHead.head.msgType = msgType; - pWal->writeHead.head.ingestTs = 0; + pWal->writeHead.head.ingestTs = taosGetTimestampUs(); // sync info for sync module pWal->writeHead.head.syncMeta = syncMeta; From b852993b8d9ce074f660a225dc657a31fd95b457 Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Sun, 4 Feb 2024 13:22:39 +0800 Subject: [PATCH 03/15] remove TD_TSZ macro define --- include/util/tcompression.h | 1 - source/dnode/mgmt/node_mgmt/src/dmMgmt.c | 6 - source/util/src/tcompression.c | 24 +--- .../army/community/storage/oneStageComp.json | 66 ++++++++++ tests/army/community/storage/oneStageComp.py | 123 ++++++++++++++++++ tests/parallel_test/cases.task | 2 +- 6 files changed, 192 insertions(+), 30 deletions(-) create mode 100644 tests/army/community/storage/oneStageComp.json create mode 100644 tests/army/community/storage/oneStageComp.py diff --git a/include/util/tcompression.h b/include/util/tcompression.h index 75ddbb12e7..79fe9b613b 100644 --- a/include/util/tcompression.h +++ b/include/util/tcompression.h @@ -55,7 +55,6 @@ extern "C" { #define HEAD_MODE(x) x % 2 #define HEAD_ALGO(x) x / 2 -#ifdef TD_TSZ extern bool lossyFloat; extern bool lossyDouble; int32_t tsCompressInit(char* lossyColumns, float fPrecision, double dPrecision, uint32_t maxIntervals, uint32_t intervals, diff --git a/source/dnode/mgmt/node_mgmt/src/dmMgmt.c b/source/dnode/mgmt/node_mgmt/src/dmMgmt.c index 6cbf31b15f..2eab045251 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmMgmt.c +++ b/source/dnode/mgmt/node_mgmt/src/dmMgmt.c @@ -19,10 +19,8 @@ #include "index.h" #include "qworker.h" #include "tstream.h" -#ifdef TD_TSZ #include "tcompression.h" #include "tglobal.h" -#endif static bool dmRequireNode(SDnode *pDnode, SMgmtWrapper *pWrapper) { SMgmtInputOpt input = dmBuildMgmtInputOpt(pWrapper); @@ -47,10 +45,8 @@ int32_t dmInitDnode(SDnode *pDnode) { goto _OVER; } -#ifdef TD_TSZ // compress module init tsCompressInit(tsLossyColumns, tsFPrecision, tsDPrecision, tsMaxRange, tsCurRange, (int)tsIfAdtFse, tsCompressor); -#endif pDnode->wrappers[DNODE].func = dmGetMgmtFunc(); pDnode->wrappers[MNODE].func = mmGetMgmtFunc(); @@ -119,10 +115,8 @@ void dmCleanupDnode(SDnode *pDnode) { indexCleanup(); taosConvDestroy(); -#ifdef TD_TSZ // compress destroy tsCompressExit(); -#endif dDebug("dnode is closed, ptr:%p", pDnode); } diff --git a/source/util/src/tcompression.c b/source/util/src/tcompression.c index 3cc00ddc7f..5cb92a4d47 100644 --- a/source/util/src/tcompression.c +++ b/source/util/src/tcompression.c @@ -54,9 +54,7 @@ #include "tlog.h" #include "ttypes.h" -#ifdef TD_TSZ #include "td_sz.h" -#endif static const int32_t TEST_NUMBER = 1; #define is_bigendian() ((*(char *)&TEST_NUMBER) == 0) @@ -64,7 +62,6 @@ static const int32_t TEST_NUMBER = 1; #define safeInt64Add(a, b) (((a >= 0) && (b <= INT64_MAX - a)) || ((a < 0) && (b >= INT64_MIN - a))) -#ifdef TD_TSZ bool lossyFloat = false; bool lossyDouble = false; @@ -83,7 +80,6 @@ int32_t tsCompressInit(char* lossyColumns, float fPrecision, double dPrecision, // exit call void tsCompressExit() { tdszExit(); } -#endif /* * Compress Integer (Simple8B). @@ -906,7 +902,6 @@ int32_t tsDecompressFloatImp(const char *const input, const int32_t nelements, c return nelements * FLOAT_BYTES; } -#ifdef TD_TSZ // // ---------- float double lossy ----------- // @@ -977,6 +972,7 @@ int32_t tsDecompressDoubleLossyImp(const char *input, int32_t compressedSize, co } #endif +#ifdef BUILD_NO_CALL /************************************************************************* * STREAM COMPRESSION *************************************************************************/ @@ -2120,7 +2116,7 @@ int32_t tCompressEnd(SCompressor *pCmprsor, const uint8_t **ppOut, int32_t *nOut int32_t tCompress(SCompressor *pCmprsor, const void *pData, int64_t nData) { return DATA_TYPE_INFO[pCmprsor->type].cmprFn(pCmprsor, pData, nData); } - +#endif /************************************************************************* * REGULAR COMPRESSION *************************************************************************/ @@ -2154,13 +2150,11 @@ int32_t tsDecompressTimestamp(void *pIn, int32_t nIn, int32_t nEle, void *pOut, // Float ===================================================== int32_t tsCompressFloat(void *pIn, int32_t nIn, int32_t nEle, void *pOut, int32_t nOut, uint8_t cmprAlg, void *pBuf, int32_t nBuf) { -#ifdef TD_TSZ // lossy mode if (lossyFloat) { return tsCompressFloatLossyImp(pIn, nEle, pOut); // lossless mode } else { -#endif if (cmprAlg == ONE_STAGE_COMP) { return tsCompressFloatImp(pIn, nEle, pOut); } else if (cmprAlg == TWO_STAGE_COMP) { @@ -2170,19 +2164,15 @@ int32_t tsCompressFloat(void *pIn, int32_t nIn, int32_t nEle, void *pOut, int32_ ASSERTS(0, "compress algo invalid"); return -1; } -#ifdef TD_TSZ } -#endif } int32_t tsDecompressFloat(void *pIn, int32_t nIn, int32_t nEle, void *pOut, int32_t nOut, uint8_t cmprAlg, void *pBuf, int32_t nBuf) { -#ifdef TD_TSZ if (HEAD_ALGO(((uint8_t *)pIn)[0]) == ALGO_SZ_LOSSY) { // decompress lossy return tsDecompressFloatLossyImp(pIn, nIn, nEle, pOut); } else { -#endif // decompress lossless if (cmprAlg == ONE_STAGE_COMP) { return tsDecompressFloatImp(pIn, nEle, pOut); @@ -2193,20 +2183,16 @@ int32_t tsDecompressFloat(void *pIn, int32_t nIn, int32_t nEle, void *pOut, int3 ASSERTS(0, "compress algo invalid"); return -1; } -#ifdef TD_TSZ } -#endif } // Double ===================================================== int32_t tsCompressDouble(void *pIn, int32_t nIn, int32_t nEle, void *pOut, int32_t nOut, uint8_t cmprAlg, void *pBuf, int32_t nBuf) { -#ifdef TD_TSZ if (lossyDouble) { // lossy mode return tsCompressDoubleLossyImp(pIn, nEle, pOut); } else { -#endif // lossless mode if (cmprAlg == ONE_STAGE_COMP) { return tsCompressDoubleImp(pIn, nEle, pOut); @@ -2217,19 +2203,15 @@ int32_t tsCompressDouble(void *pIn, int32_t nIn, int32_t nEle, void *pOut, int32 ASSERTS(0, "compress algo invalid"); return -1; } -#ifdef TD_TSZ } -#endif } int32_t tsDecompressDouble(void *pIn, int32_t nIn, int32_t nEle, void *pOut, int32_t nOut, uint8_t cmprAlg, void *pBuf, int32_t nBuf) { -#ifdef TD_TSZ if (HEAD_ALGO(((uint8_t *)pIn)[0]) == ALGO_SZ_LOSSY) { // decompress lossy return tsDecompressDoubleLossyImp(pIn, nIn, nEle, pOut); } else { -#endif // decompress lossless if (cmprAlg == ONE_STAGE_COMP) { return tsDecompressDoubleImp(pIn, nEle, pOut); @@ -2240,9 +2222,7 @@ int32_t tsDecompressDouble(void *pIn, int32_t nIn, int32_t nEle, void *pOut, int ASSERTS(0, "compress algo invalid"); return -1; } -#ifdef TD_TSZ } -#endif } // Binary ===================================================== diff --git a/tests/army/community/storage/oneStageComp.json b/tests/army/community/storage/oneStageComp.json new file mode 100644 index 0000000000..12fa51db83 --- /dev/null +++ b/tests/army/community/storage/oneStageComp.json @@ -0,0 +1,66 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "connection_pool_size": 8, + "num_of_records_per_req": 3000, + "prepared_rand": 3000, + "thread_count": 2, + "create_table_thread_count": 1, + "confirm_parameter_prompt": "no", + "databases": [ + { + "dbinfo": { + "name": "db", + "drop": "yes", + "vgroups": 2, + "replica": 3, + "wal_retention_period": 10, + "wal_retention_size": 100, + "keep": "60d,120d,365d", + "stt_trigger": 1, + "wal_level": 2, + "WAL_FSYNC_PERIOD": 3300, + "cachemode": "last_value", + "TABLE_PREFIX":1, + "comp": 1 + }, + "super_tables": [ + { + "name": "stb", + "child_table_exists": "no", + "childtable_count": 10, + "insert_rows": 100000, + "childtable_prefix": "d", + "insert_mode": "taosc", + "timestamp_step": 1000, + "start_timestamp":"2023-01-01 00:00:00", + "columns": [ + { "type": "bool", "name": "bc","max": 1,"min": 1}, + { "type": "float", "name": "fc" ,"max": 101,"min": 101}, + { "type": "double", "name": "dc" ,"max": 102,"min": 102}, + { "type": "tinyint", "name": "ti" ,"max": 103,"min": 103}, + { "type": "smallint", "name": "si" ,"max": 104,"min": 104}, + { "type": "int", "name": "ic" ,"max": 105,"min": 105}, + { "type": "bigint", "name": "bi" ,"max": 106,"min": 106}, + { "type": "utinyint", "name": "uti","max": 107,"min": 107}, + { "type": "usmallint", "name": "usi","max": 108,"min": 108}, + { "type": "uint", "name": "ui" ,"max": 109,"min": 109}, + { "type": "ubigint", "name": "ubi","max": 110,"min": 110}, + { "type": "binary", "name": "bin", "len": 16}, + { "type": "nchar", "name": "nch", "len": 32} + ], + "tags": [ + {"type": "tinyint", "name": "groupid","max": 100,"min": 100}, + {"name": "location","type": "binary", "len": 16, "values": + ["San Francisco", "Los Angles", "San Diego", "San Jose", "Palo Alto", "Campbell", "Mountain View","Sunnyvale", "Santa Clara", "Cupertino"] + } + ] + } + ] + } + ] +} diff --git a/tests/army/community/storage/oneStageComp.py b/tests/army/community/storage/oneStageComp.py new file mode 100644 index 0000000000..f3718bc716 --- /dev/null +++ b/tests/army/community/storage/oneStageComp.py @@ -0,0 +1,123 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import time +import random + +import taos +import frame +import frame.etool + + +from frame.log import * +from frame.cases import * +from frame.sql import * +from frame.caseBase import * +from frame import * + + +class TDTestCase(TBase): + updatecfgDict = { + "compressMsgSize" : "100", + } + + def insertData(self): + tdLog.info(f"insert data.") + # taosBenchmark run + jfile = etool.curFile(__file__, "oneStageComp.json") + etool.benchMark(json=jfile) + + tdSql.execute(f"use {self.db}") + # set insert data information + self.childtable_count = 10 + self.insert_rows = 100000 + self.timestamp_step = 1000 + + + + def checkColValueCorrect(self): + tdLog.info(f"do action.") + self.flushDb() + + # check all columns correct + cnt = self.insert_rows * self.childtable_count + sql = "select * from stb where bc!=1" + tdSql.checkRows(0) + sql = "select * from stb where fc=101" + tdSql.checkRows(cnt) + sql = "select * from stb where dc!=102" + tdSql.checkRows(0) + sql = "select * from stb where ti!=103" + tdSql.checkRows(0) + sql = "select * from stb where si!=104" + tdSql.checkRows(0) + sql = "select * from stb where ic!=105" + tdSql.checkRows(0) + sql = "select * from stb where b!i=106" + tdSql.checkRows(0) + sql = "select * from stb where uti!=107" + tdSql.checkRows(0) + sql = "select * from stb where usi!=108" + tdSql.checkRows(0) + sql = "select * from stb where ui!=109" + tdSql.checkRows(0) + sql = "select * from stb where ubi!=110" + tdSql.checkRows(0) + + def insertNull(self): + # insert 6 lines + sql = "insert into d0(ts) values(now) (now + 1s) (now + 2s) (now + 3s) (now + 4s) (now + 5s)" + tdSql.execute(sql) + + self.flushDb() + self.trimDb() + + # check all columns correct + cnt = self.insert_rows * self.childtable_count + sql = "select * from stb where bc!=1" + tdSql.checkRows(6) + sql = "select * from stb where bc=1" + tdSql.checkRows(cnt) + sql = "select * from stb where usi!=108" + tdSql.checkRows(6) + + # run + def run(self): + tdLog.debug(f"start to excute {__file__}") + + # insert data + self.insertData() + + # check insert data correct + self.checkInsertCorrect() + + # save + self.snapshotAgg() + + # do action + self.checkColValueCorrect() + + # check save agg result correct + self.checkAggCorrect() + + # insert null + self.insertNull() + + + tdLog.success(f"{__file__} successfully executed") + + + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 91a0ac46e5..9ea03b4e6b 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -23,7 +23,7 @@ fi ,,y,army,./pytest.sh python3 ./test.py -f community/query/query_basic.py -N 3 ,,y,army,./pytest.sh python3 ./test.py -f community/cluster/splitVgroupByLearner.py -N 3 ,,n,army,python3 ./test.py -f community/cmdline/fullopt.py - +,,y,army,./pytest.sh python3 ./test.py -f community/storage/oneStageComp.py -N 3 -L 3 -D 1 # From 8559e7061ce55f1c12582bdd1659beec0870743e Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Sun, 4 Feb 2024 13:26:36 +0800 Subject: [PATCH 04/15] fix: , replace | with float,double --- tests/army/community/cluster/snapshot.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/army/community/cluster/snapshot.py b/tests/army/community/cluster/snapshot.py index 26bb6b9377..3f9a497f16 100644 --- a/tests/army/community/cluster/snapshot.py +++ b/tests/army/community/cluster/snapshot.py @@ -30,7 +30,7 @@ from frame import * class TDTestCase(TBase): updatecfgDict = { "countAlwaysReturnValue" : "0", - "lossyColumns" : "float|double", + "lossyColumns" : "float,double", "fPrecision" : "0.000000001", "dPrecision" : "0.00000000000000001", "ifAdtFse" : "1" From 656af515ceed48eea2e85beff44dcf549128672f Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Sun, 4 Feb 2024 13:34:14 +0800 Subject: [PATCH 05/15] fix: build error --- include/util/tcompression.h | 1 - 1 file changed, 1 deletion(-) diff --git a/include/util/tcompression.h b/include/util/tcompression.h index 79fe9b613b..9948102494 100644 --- a/include/util/tcompression.h +++ b/include/util/tcompression.h @@ -91,7 +91,6 @@ static FORCE_INLINE int32_t tsDecompressDoubleLossy(const char *const input, int return tsDecompressDoubleLossyImp(input, compressedSize, nelements, output); } -#endif /************************************************************************* * REGULAR COMPRESSION From 3ae02aaac24826c2c8514c73a1b02386c2ccc9cd Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Sun, 4 Feb 2024 13:38:13 +0800 Subject: [PATCH 06/15] fix: build error1 --- source/util/src/tcompression.c | 1 - 1 file changed, 1 deletion(-) diff --git a/source/util/src/tcompression.c b/source/util/src/tcompression.c index 5cb92a4d47..f605c60ccd 100644 --- a/source/util/src/tcompression.c +++ b/source/util/src/tcompression.c @@ -970,7 +970,6 @@ int32_t tsDecompressDoubleLossyImp(const char *input, int32_t compressedSize, co // decompressed with sz return tdszDecompress(SZ_DOUBLE, input + 1, compressedSize - 1, nelements, output); } -#endif #ifdef BUILD_NO_CALL /************************************************************************* From b4621bb37c8186c0a342cb9e27b0f4563a885005 Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Sun, 4 Feb 2024 13:57:16 +0800 Subject: [PATCH 07/15] fix: restore TD_TSZ macro define --- include/util/tcompression.h | 2 ++ source/dnode/mgmt/node_mgmt/src/dmMgmt.c | 6 ++++++ source/util/src/tcompression.c | 22 ++++++++++++++++++++++ 3 files changed, 30 insertions(+) diff --git a/include/util/tcompression.h b/include/util/tcompression.h index 9948102494..75ddbb12e7 100644 --- a/include/util/tcompression.h +++ b/include/util/tcompression.h @@ -55,6 +55,7 @@ extern "C" { #define HEAD_MODE(x) x % 2 #define HEAD_ALGO(x) x / 2 +#ifdef TD_TSZ extern bool lossyFloat; extern bool lossyDouble; int32_t tsCompressInit(char* lossyColumns, float fPrecision, double dPrecision, uint32_t maxIntervals, uint32_t intervals, @@ -91,6 +92,7 @@ static FORCE_INLINE int32_t tsDecompressDoubleLossy(const char *const input, int return tsDecompressDoubleLossyImp(input, compressedSize, nelements, output); } +#endif /************************************************************************* * REGULAR COMPRESSION diff --git a/source/dnode/mgmt/node_mgmt/src/dmMgmt.c b/source/dnode/mgmt/node_mgmt/src/dmMgmt.c index 2eab045251..6cbf31b15f 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmMgmt.c +++ b/source/dnode/mgmt/node_mgmt/src/dmMgmt.c @@ -19,8 +19,10 @@ #include "index.h" #include "qworker.h" #include "tstream.h" +#ifdef TD_TSZ #include "tcompression.h" #include "tglobal.h" +#endif static bool dmRequireNode(SDnode *pDnode, SMgmtWrapper *pWrapper) { SMgmtInputOpt input = dmBuildMgmtInputOpt(pWrapper); @@ -45,8 +47,10 @@ int32_t dmInitDnode(SDnode *pDnode) { goto _OVER; } +#ifdef TD_TSZ // compress module init tsCompressInit(tsLossyColumns, tsFPrecision, tsDPrecision, tsMaxRange, tsCurRange, (int)tsIfAdtFse, tsCompressor); +#endif pDnode->wrappers[DNODE].func = dmGetMgmtFunc(); pDnode->wrappers[MNODE].func = mmGetMgmtFunc(); @@ -115,8 +119,10 @@ void dmCleanupDnode(SDnode *pDnode) { indexCleanup(); taosConvDestroy(); +#ifdef TD_TSZ // compress destroy tsCompressExit(); +#endif dDebug("dnode is closed, ptr:%p", pDnode); } diff --git a/source/util/src/tcompression.c b/source/util/src/tcompression.c index f605c60ccd..656e2706f2 100644 --- a/source/util/src/tcompression.c +++ b/source/util/src/tcompression.c @@ -54,7 +54,9 @@ #include "tlog.h" #include "ttypes.h" +#ifdef TD_TSZ #include "td_sz.h" +#endif static const int32_t TEST_NUMBER = 1; #define is_bigendian() ((*(char *)&TEST_NUMBER) == 0) @@ -62,6 +64,7 @@ static const int32_t TEST_NUMBER = 1; #define safeInt64Add(a, b) (((a >= 0) && (b <= INT64_MAX - a)) || ((a < 0) && (b >= INT64_MIN - a))) +#ifdef TD_TSZ bool lossyFloat = false; bool lossyDouble = false; @@ -80,6 +83,7 @@ int32_t tsCompressInit(char* lossyColumns, float fPrecision, double dPrecision, // exit call void tsCompressExit() { tdszExit(); } +#endif /* * Compress Integer (Simple8B). @@ -902,6 +906,7 @@ int32_t tsDecompressFloatImp(const char *const input, const int32_t nelements, c return nelements * FLOAT_BYTES; } +#ifdef TD_TSZ // // ---------- float double lossy ----------- // @@ -970,6 +975,7 @@ int32_t tsDecompressDoubleLossyImp(const char *input, int32_t compressedSize, co // decompressed with sz return tdszDecompress(SZ_DOUBLE, input + 1, compressedSize - 1, nelements, output); } +#endif #ifdef BUILD_NO_CALL /************************************************************************* @@ -2149,11 +2155,13 @@ int32_t tsDecompressTimestamp(void *pIn, int32_t nIn, int32_t nEle, void *pOut, // Float ===================================================== int32_t tsCompressFloat(void *pIn, int32_t nIn, int32_t nEle, void *pOut, int32_t nOut, uint8_t cmprAlg, void *pBuf, int32_t nBuf) { +#ifdef TD_TSZ // lossy mode if (lossyFloat) { return tsCompressFloatLossyImp(pIn, nEle, pOut); // lossless mode } else { +#endif if (cmprAlg == ONE_STAGE_COMP) { return tsCompressFloatImp(pIn, nEle, pOut); } else if (cmprAlg == TWO_STAGE_COMP) { @@ -2163,15 +2171,19 @@ int32_t tsCompressFloat(void *pIn, int32_t nIn, int32_t nEle, void *pOut, int32_ ASSERTS(0, "compress algo invalid"); return -1; } +#ifdef TD_TSZ } +#endif } int32_t tsDecompressFloat(void *pIn, int32_t nIn, int32_t nEle, void *pOut, int32_t nOut, uint8_t cmprAlg, void *pBuf, int32_t nBuf) { +#ifdef TD_TSZ if (HEAD_ALGO(((uint8_t *)pIn)[0]) == ALGO_SZ_LOSSY) { // decompress lossy return tsDecompressFloatLossyImp(pIn, nIn, nEle, pOut); } else { +#endif // decompress lossless if (cmprAlg == ONE_STAGE_COMP) { return tsDecompressFloatImp(pIn, nEle, pOut); @@ -2182,16 +2194,20 @@ int32_t tsDecompressFloat(void *pIn, int32_t nIn, int32_t nEle, void *pOut, int3 ASSERTS(0, "compress algo invalid"); return -1; } +#ifdef TD_TSZ } +#endif } // Double ===================================================== int32_t tsCompressDouble(void *pIn, int32_t nIn, int32_t nEle, void *pOut, int32_t nOut, uint8_t cmprAlg, void *pBuf, int32_t nBuf) { +#ifdef TD_TSZ if (lossyDouble) { // lossy mode return tsCompressDoubleLossyImp(pIn, nEle, pOut); } else { +#endif // lossless mode if (cmprAlg == ONE_STAGE_COMP) { return tsCompressDoubleImp(pIn, nEle, pOut); @@ -2202,15 +2218,19 @@ int32_t tsCompressDouble(void *pIn, int32_t nIn, int32_t nEle, void *pOut, int32 ASSERTS(0, "compress algo invalid"); return -1; } +#ifdef TD_TSZ } +#endif } int32_t tsDecompressDouble(void *pIn, int32_t nIn, int32_t nEle, void *pOut, int32_t nOut, uint8_t cmprAlg, void *pBuf, int32_t nBuf) { +#ifdef TD_TSZ if (HEAD_ALGO(((uint8_t *)pIn)[0]) == ALGO_SZ_LOSSY) { // decompress lossy return tsDecompressDoubleLossyImp(pIn, nIn, nEle, pOut); } else { +#endif // decompress lossless if (cmprAlg == ONE_STAGE_COMP) { return tsDecompressDoubleImp(pIn, nEle, pOut); @@ -2221,7 +2241,9 @@ int32_t tsDecompressDouble(void *pIn, int32_t nIn, int32_t nEle, void *pOut, int ASSERTS(0, "compress algo invalid"); return -1; } +#ifdef TD_TSZ } +#endif } // Binary ===================================================== From 6736fd1615b946d0a50cad8a6278dc008d4c3c87 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sun, 4 Feb 2024 14:11:17 +0800 Subject: [PATCH 08/15] fix(tsdb): check and return if the rows in stt are before the data rows in data files. --- source/dnode/vnode/src/tsdb/tsdbRead2.c | 126 +++++++++++++++--------- 1 file changed, 82 insertions(+), 44 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead2.c b/source/dnode/vnode/src/tsdb/tsdbRead2.c index d9b932a367..86f58717e2 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead2.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead2.c @@ -2628,6 +2628,58 @@ static bool moveToNextTableForPreFileSetMem(SReaderStatus* pStatus) { return (pStatus->pProcMemTableIter != NULL); } +static void buildCleanBlockFromSttFiles(STsdbReader* pReader, STableBlockScanInfo* pScanInfo) { + SReaderStatus* pStatus = &pReader->status; + SSttBlockReader* pSttBlockReader = pStatus->fileIter.pSttBlockReader; + SSDataBlock* pResBlock = pReader->resBlockInfo.pResBlock; + + bool asc = ASCENDING_TRAVERSE(pReader->info.order); + + SDataBlockInfo* pInfo = &pResBlock->info; + blockDataEnsureCapacity(pResBlock, pScanInfo->numOfRowsInStt); + + pInfo->rows = pScanInfo->numOfRowsInStt; + pInfo->id.uid = pScanInfo->uid; + pInfo->dataLoad = 1; + pInfo->window = pScanInfo->sttWindow; + + setComposedBlockFlag(pReader, true); + + pScanInfo->sttKeyInfo.nextProcKey = asc ? pScanInfo->sttWindow.ekey + 1 : pScanInfo->sttWindow.skey - 1; + pScanInfo->sttKeyInfo.status = STT_FILE_NO_DATA; + pScanInfo->lastProcKey = asc ? pScanInfo->sttWindow.ekey : pScanInfo->sttWindow.skey; + pScanInfo->sttBlockReturned = true; + + pSttBlockReader->mergeTree.pIter = NULL; + + tsdbDebug("%p uid:%" PRId64 " return clean stt block as one, brange:%" PRId64 "-%" PRId64 " rows:%" PRId64 " %s", + pReader, pResBlock->info.id.uid, pResBlock->info.window.skey, pResBlock->info.window.ekey, + pResBlock->info.rows, pReader->idStr); +} + +static void buildCleanBlockFromDataFiles(STsdbReader* pReader, STableBlockScanInfo* pScanInfo, + SFileDataBlockInfo* pBlockInfo, int32_t blockIndex) { + // whole block is required, return it directly + SReaderStatus* pStatus = &pReader->status; + SDataBlockInfo* pInfo = &pReader->resBlockInfo.pResBlock->info; + bool asc = ASCENDING_TRAVERSE(pReader->info.order); + + pInfo->rows = pBlockInfo->numRow; + pInfo->id.uid = pScanInfo->uid; + pInfo->dataLoad = 0; + pInfo->version = pReader->info.verRange.maxVer; + pInfo->window = (STimeWindow){.skey = pBlockInfo->firstKey, .ekey = pBlockInfo->lastKey}; + setComposedBlockFlag(pReader, false); + setBlockAllDumped(&pStatus->fBlockDumpInfo, pBlockInfo->lastKey, pReader->info.order); + + // update the last key for the corresponding table + pScanInfo->lastProcKey = asc ? pInfo->window.ekey : pInfo->window.skey; + tsdbDebug("%p uid:%" PRIu64 " clean file block retrieved from file, global index:%d, " + "table index:%d, rows:%d, brange:%" PRId64 "-%" PRId64 ", %s", + pReader, pScanInfo->uid, blockIndex, pBlockInfo->tbBlockIdx, pBlockInfo->numRow, pBlockInfo->firstKey, + pBlockInfo->lastKey, pReader->idStr); +} + static int32_t doLoadSttBlockSequentially(STsdbReader* pReader) { SReaderStatus* pStatus = &pReader->status; SSttBlockReader* pSttBlockReader = pStatus->fileIter.pSttBlockReader; @@ -2680,28 +2732,7 @@ static int32_t doLoadSttBlockSequentially(STsdbReader* pReader) { // if only require the total rows, no need to load data from stt file if it is clean stt blocks if (pReader->info.execMode == READER_EXEC_ROWS && pScanInfo->cleanSttBlocks) { - bool asc = ASCENDING_TRAVERSE(pReader->info.order); - - SDataBlockInfo* pInfo = &pResBlock->info; - blockDataEnsureCapacity(pResBlock, pScanInfo->numOfRowsInStt); - - pInfo->rows = pScanInfo->numOfRowsInStt; - pInfo->id.uid = pScanInfo->uid; - pInfo->dataLoad = 1; - pInfo->window = pScanInfo->sttWindow; - - setComposedBlockFlag(pReader, true); - - pScanInfo->sttKeyInfo.nextProcKey = asc ? pScanInfo->sttWindow.ekey + 1 : pScanInfo->sttWindow.skey - 1; - pScanInfo->sttKeyInfo.status = STT_FILE_NO_DATA; - pScanInfo->lastProcKey = asc ? pScanInfo->sttWindow.ekey : pScanInfo->sttWindow.skey; - pScanInfo->sttBlockReturned = true; - - pSttBlockReader->mergeTree.pIter = NULL; - - tsdbDebug("%p uid:%" PRId64 " return clean stt block as one, brange:%" PRId64 "-%" PRId64 " rows:%" PRId64 " %s", - pReader, pResBlock->info.id.uid, pResBlock->info.window.skey, pResBlock->info.window.ekey, - pResBlock->info.rows, pReader->idStr); + buildCleanBlockFromSttFiles(pReader, pScanInfo); return TSDB_CODE_SUCCESS; } @@ -2741,10 +2772,11 @@ static int32_t doLoadSttBlockSequentially(STsdbReader* pReader) { } } -static bool notOverlapWithSttFiles(SFileDataBlockInfo* pBlockInfo, STableBlockScanInfo* pScanInfo, bool asc) { +// current active data block not overlap with the stt-files/stt-blocks +static bool notOverlapWithFiles(SFileDataBlockInfo* pBlockInfo, STableBlockScanInfo* pScanInfo, bool asc) { ASSERT(pScanInfo->sttKeyInfo.status != STT_FILE_READER_UNINIT); - if (pScanInfo->sttKeyInfo.status == STT_FILE_NO_DATA) { + if ((!hasDataInSttBlock(pScanInfo)) || (pScanInfo->cleanSttBlocks == true)) { return true; } else { int64_t keyInStt = pScanInfo->sttKeyInfo.nextProcKey; @@ -2794,24 +2826,32 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) { int64_t endKey = getBoarderKeyInFiles(pBlockInfo, pScanInfo, pReader->info.order); code = buildDataBlockFromBuf(pReader, pScanInfo, endKey); } else { - if (notOverlapWithSttFiles(pBlockInfo, pScanInfo, asc)) { - // whole block is required, return it directly - SDataBlockInfo* pInfo = &pReader->resBlockInfo.pResBlock->info; - pInfo->rows = pBlockInfo->numRow; - pInfo->id.uid = pScanInfo->uid; - pInfo->dataLoad = 0; - pInfo->version = pReader->info.verRange.maxVer; - pInfo->window = (STimeWindow){.skey = pBlockInfo->firstKey, .ekey = pBlockInfo->lastKey}; - setComposedBlockFlag(pReader, false); - setBlockAllDumped(&pStatus->fBlockDumpInfo, pBlockInfo->lastKey, pReader->info.order); + if (notOverlapWithFiles(pBlockInfo, pScanInfo, asc)) { + int64_t keyInStt = pScanInfo->sttKeyInfo.nextProcKey; - // update the last key for the corresponding table - pScanInfo->lastProcKey = asc ? pInfo->window.ekey : pInfo->window.skey; - tsdbDebug("%p uid:%" PRIu64 - " clean file block retrieved from file, global index:%d, " - "table index:%d, rows:%d, brange:%" PRId64 "-%" PRId64 ", %s", - pReader, pScanInfo->uid, pBlockIter->index, pBlockInfo->tbBlockIdx, pBlockInfo->numRow, - pBlockInfo->firstKey, pBlockInfo->lastKey, pReader->idStr); + if ((!hasDataInSttBlock(pScanInfo)) || (asc && pBlockInfo->lastKey < keyInStt) || + (!asc && pBlockInfo->firstKey > keyInStt)) { + if (pScanInfo->cleanSttBlocks && hasDataInSttBlock(pScanInfo)) { + if (asc) { // file block is located before the stt block + ASSERT(pScanInfo->sttWindow.skey > pBlockInfo->lastKey); + } else { // stt block is before the file block + ASSERT(pScanInfo->sttWindow.ekey < pBlockInfo->firstKey); + } + } + + buildCleanBlockFromDataFiles(pReader, pScanInfo, pBlockInfo, pBlockIter->index); + } else { // clean stt block + if (asc) { + ASSERT(pScanInfo->sttWindow.ekey < pBlockInfo->firstKey); + } else { + ASSERT(pScanInfo->sttWindow.skey > pBlockInfo->lastKey); + } + + // return the stt file block + ASSERT(pReader->info.execMode == READER_EXEC_ROWS && pSttBlockReader->mergeTree.pIter == NULL); + buildCleanBlockFromSttFiles(pReader, pScanInfo); + return TSDB_CODE_SUCCESS; + } } else { SBlockData* pBData = &pReader->status.fileBlockData; tBlockDataReset(pBData); @@ -2822,7 +2862,6 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) { int64_t st = taosGetTimestampUs(); // let's load data from stt files, make sure clear the cleanStt block flag before load the data from stt files - pScanInfo->cleanSttBlocks = false; initSttBlockReader(pSttBlockReader, pScanInfo, pReader); // no data in stt block, no need to proceed. @@ -2840,8 +2879,7 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) { // data in stt now overlaps with current active file data block, need to composed with file data block. int64_t lastKeyInStt = getCurrentKeyInSttBlock(pSttBlockReader); - if ((lastKeyInStt >= pBlockInfo->firstKey && asc) || - (lastKeyInStt <= pBlockInfo->lastKey && (!asc))) { + if ((lastKeyInStt >= pBlockInfo->firstKey && asc) || (lastKeyInStt <= pBlockInfo->lastKey && (!asc))) { tsdbDebug("%p lastKeyInStt:%" PRId64 ", overlap with file block, brange:%" PRId64 "-%" PRId64 " %s", pReader, lastKeyInStt, pBlockInfo->firstKey, pBlockInfo->lastKey, pReader->idStr); break; From 0029c348c5e049ce90d8ef23e8fe644e8e7dfb6e Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Sun, 4 Feb 2024 14:56:24 +0800 Subject: [PATCH 09/15] fix: float and double tsz check --- tests/army/community/cluster/snapshot.json | 4 ++-- tests/army/community/cluster/snapshot.py | 17 +++++++++++++++++ utils/TSZ/sz/src/sz_double.c | 8 +++++--- 3 files changed, 24 insertions(+), 5 deletions(-) diff --git a/tests/army/community/cluster/snapshot.json b/tests/army/community/cluster/snapshot.json index d4f6f00d37..f664278a72 100644 --- a/tests/army/community/cluster/snapshot.json +++ b/tests/army/community/cluster/snapshot.json @@ -35,8 +35,8 @@ "start_timestamp":"now-12d", "columns": [ { "type": "bool", "name": "bc"}, - { "type": "float", "name": "fc" }, - { "type": "double", "name": "dc"}, + { "type": "float", "name": "fc", "min": 100, "min": 100}, + { "type": "double", "name": "dc", "min": 200, "min": 200}, { "type": "tinyint", "name": "ti"}, { "type": "smallint", "name": "si" }, { "type": "int", "name": "ic" }, diff --git a/tests/army/community/cluster/snapshot.py b/tests/army/community/cluster/snapshot.py index 3f9a497f16..6648efa027 100644 --- a/tests/army/community/cluster/snapshot.py +++ b/tests/army/community/cluster/snapshot.py @@ -52,6 +52,16 @@ class TDTestCase(TBase): sql = f"create table {self.db}.ta(ts timestamp, age int) tags(area int)" tdSql.execute(sql) + def checkFloatDouble(self): + sql = f"select count(*) from {self.db}.{self.stb} where fc!=100" + tdSql.checkFirstValue(sql, 0) + sql = f"select count(*) from {self.db}.{self.stb} where dc!=200" + tdSql.checkFirstValue(sql, 0) + sql = f"select avg(fc) from {self.db}.{self.stb}" + tdSql.checkFirstValue(sql, 100) + sql = f"select avg(dc) from {self.db}.{self.stb}" + tdSql.checkFirstValue(sql, 200) + def doAction(self): tdLog.info(f"do action.") self.flushDb() @@ -89,6 +99,9 @@ class TDTestCase(TBase): # check insert data correct self.checkInsertCorrect() + # check float double value ok + tdSql.checkFloatDouble() + # save self.snapshotAgg() @@ -101,6 +114,10 @@ class TDTestCase(TBase): # check insert correct again self.checkInsertCorrect() + # check float double value ok + tdSql.checkFloatDouble() + + tdLog.success(f"{__file__} successfully executed") diff --git a/utils/TSZ/sz/src/sz_double.c b/utils/TSZ/sz/src/sz_double.c index 1adfdf3b56..0510fc612d 100644 --- a/utils/TSZ/sz/src/sz_double.c +++ b/utils/TSZ/sz/src/sz_double.c @@ -385,9 +385,11 @@ unsigned int optimize_intervals_double_1D_opt(double *oriData, size_t dataLength totalSampleSize++; pred_value = data_pos[-1]; pred_err = fabs(pred_value - *data_pos); - radiusIndex = (unsigned long)((pred_err/realPrecision+1)/2); - if(radiusIndex>=confparams_cpr->maxRangeRadius) - radiusIndex = confparams_cpr->maxRangeRadius - 1; + double dbri = (unsigned long)((pred_err/realPrecision+1)/2); + if(dbri >= (double)confparams_cpr->maxRangeRadius) + radiusIndex = confparams_cpr->maxRangeRadius - 1; + else + radiusIndex = dbri; intervals[radiusIndex]++; data_pos += confparams_cpr->sampleDistance; From d51e651c17ef19b74b9325b3ebf95164e10601e3 Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Sun, 4 Feb 2024 15:00:44 +0800 Subject: [PATCH 10/15] fix: case right --- tests/army/community/cluster/snapshot.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/army/community/cluster/snapshot.py b/tests/army/community/cluster/snapshot.py index 6648efa027..6fd2218344 100644 --- a/tests/army/community/cluster/snapshot.py +++ b/tests/army/community/cluster/snapshot.py @@ -100,7 +100,7 @@ class TDTestCase(TBase): self.checkInsertCorrect() # check float double value ok - tdSql.checkFloatDouble() + self.checkFloatDouble() # save self.snapshotAgg() @@ -115,7 +115,7 @@ class TDTestCase(TBase): self.checkInsertCorrect() # check float double value ok - tdSql.checkFloatDouble() + self.checkFloatDouble() tdLog.success(f"{__file__} successfully executed") From d63448be72b9ebeadc8876e33cc07371b62d2e82 Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao> Date: Sun, 4 Feb 2024 15:44:49 +0800 Subject: [PATCH 11/15] reset group id for event window --- source/libs/executor/src/eventwindowoperator.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/source/libs/executor/src/eventwindowoperator.c b/source/libs/executor/src/eventwindowoperator.c index 3cfd0ab582..2cba6e3241 100644 --- a/source/libs/executor/src/eventwindowoperator.c +++ b/source/libs/executor/src/eventwindowoperator.c @@ -220,7 +220,6 @@ static int32_t setSingleOutputTupleBufv1(SResultRowInfo* pResultRowInfo, STimeWi (*pResult)->win = *win; - clearResultRowInitFlag(pExprSup->pCtx, pExprSup->numOfExprs); setResultRowInitCtx(*pResult, pExprSup->pCtx, pExprSup->numOfExprs, pExprSup->rowEntryInfoOffset); return TSDB_CODE_SUCCESS; } @@ -262,6 +261,7 @@ int32_t eventWindowAggImpl(SOperatorInfo* pOperator, SEventWindowOperatorInfo* p } else if (pInfo->groupId != gid) { // this is a new group, reset the info pInfo->inWindow = false; + pInfo->groupId = gid; } SFilterColumnParam param1 = {.numOfCols = taosArrayGetSize(pBlock->pDataBlock), .pDataBlock = pBlock->pDataBlock}; @@ -319,6 +319,9 @@ int32_t eventWindowAggImpl(SOperatorInfo* pOperator, SEventWindowOperatorInfo* p doKeepNewWindowStartInfo(pRowSup, tsList, rowIndex, gid); pInfo->inWindow = true; startIndex = rowIndex; + if (pInfo->pRow != NULL) { + clearResultRowInitFlag(pSup->pCtx, pSup->numOfExprs); + } break; } } From c5f5aca4ce19f3bb0128b46a20e1d0f69303a7dd Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Sun, 4 Feb 2024 17:35:34 +0800 Subject: [PATCH 12/15] fix: fix case passed --- tests/army/community/cluster/snapshot.json | 4 ++-- tests/army/community/cluster/snapshot.py | 8 +++++--- tests/army/community/storage/oneStageComp.json | 4 ++-- tests/army/community/storage/oneStageComp.py | 11 +++++++++++ 4 files changed, 20 insertions(+), 7 deletions(-) diff --git a/tests/army/community/cluster/snapshot.json b/tests/army/community/cluster/snapshot.json index f664278a72..4855c23260 100644 --- a/tests/army/community/cluster/snapshot.json +++ b/tests/army/community/cluster/snapshot.json @@ -35,8 +35,8 @@ "start_timestamp":"now-12d", "columns": [ { "type": "bool", "name": "bc"}, - { "type": "float", "name": "fc", "min": 100, "min": 100}, - { "type": "double", "name": "dc", "min": 200, "min": 200}, + { "type": "float", "name": "fc", "min": 100, "max": 100}, + { "type": "double", "name": "dc", "min": 200, "max": 200}, { "type": "tinyint", "name": "ti"}, { "type": "smallint", "name": "si" }, { "type": "int", "name": "ic" }, diff --git a/tests/army/community/cluster/snapshot.py b/tests/army/community/cluster/snapshot.py index 6fd2218344..b4c4d3c4c8 100644 --- a/tests/army/community/cluster/snapshot.py +++ b/tests/army/community/cluster/snapshot.py @@ -53,10 +53,12 @@ class TDTestCase(TBase): tdSql.execute(sql) def checkFloatDouble(self): - sql = f"select count(*) from {self.db}.{self.stb} where fc!=100" - tdSql.checkFirstValue(sql, 0) + sql = f"select * from {self.db}.{self.stb} where fc!=100" + tdSql.query(sql) + tdSql.checkRows(0) sql = f"select count(*) from {self.db}.{self.stb} where dc!=200" - tdSql.checkFirstValue(sql, 0) + tdSql.query(sql) + tdSql.checkRows(0) sql = f"select avg(fc) from {self.db}.{self.stb}" tdSql.checkFirstValue(sql, 100) sql = f"select avg(dc) from {self.db}.{self.stb}" diff --git a/tests/army/community/storage/oneStageComp.json b/tests/army/community/storage/oneStageComp.json index 12fa51db83..f64fda3824 100644 --- a/tests/army/community/storage/oneStageComp.json +++ b/tests/army/community/storage/oneStageComp.json @@ -24,7 +24,7 @@ "stt_trigger": 1, "wal_level": 2, "WAL_FSYNC_PERIOD": 3300, - "cachemode": "last_value", + "cachemodel": "'last_value'", "TABLE_PREFIX":1, "comp": 1 }, @@ -37,7 +37,7 @@ "childtable_prefix": "d", "insert_mode": "taosc", "timestamp_step": 1000, - "start_timestamp":"2023-01-01 00:00:00", + "start_timestamp":"now-360d", "columns": [ { "type": "bool", "name": "bc","max": 1,"min": 1}, { "type": "float", "name": "fc" ,"max": 101,"min": 101}, diff --git a/tests/army/community/storage/oneStageComp.py b/tests/army/community/storage/oneStageComp.py index f3718bc716..35a7717449 100644 --- a/tests/army/community/storage/oneStageComp.py +++ b/tests/army/community/storage/oneStageComp.py @@ -53,26 +53,37 @@ class TDTestCase(TBase): # check all columns correct cnt = self.insert_rows * self.childtable_count sql = "select * from stb where bc!=1" + tdSql.query(sql) tdSql.checkRows(0) sql = "select * from stb where fc=101" + tdSql.query(sql) tdSql.checkRows(cnt) sql = "select * from stb where dc!=102" + tdSql.query(sql) tdSql.checkRows(0) sql = "select * from stb where ti!=103" + tdSql.query(sql) tdSql.checkRows(0) sql = "select * from stb where si!=104" + tdSql.query(sql) tdSql.checkRows(0) sql = "select * from stb where ic!=105" + tdSql.query(sql) tdSql.checkRows(0) sql = "select * from stb where b!i=106" + tdSql.query(sql) tdSql.checkRows(0) sql = "select * from stb where uti!=107" + tdSql.query(sql) tdSql.checkRows(0) sql = "select * from stb where usi!=108" + tdSql.query(sql) tdSql.checkRows(0) sql = "select * from stb where ui!=109" + tdSql.query(sql) tdSql.checkRows(0) sql = "select * from stb where ubi!=110" + tdSql.query(sql) tdSql.checkRows(0) def insertNull(self): From cb4cb936d32a09ef8390dce3c84dbb27c825e447 Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Sun, 4 Feb 2024 17:48:24 +0800 Subject: [PATCH 13/15] fix: case passed --- tests/army/community/storage/oneStageComp.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tests/army/community/storage/oneStageComp.py b/tests/army/community/storage/oneStageComp.py index 35a7717449..8860220673 100644 --- a/tests/army/community/storage/oneStageComp.py +++ b/tests/army/community/storage/oneStageComp.py @@ -70,7 +70,7 @@ class TDTestCase(TBase): sql = "select * from stb where ic!=105" tdSql.query(sql) tdSql.checkRows(0) - sql = "select * from stb where b!i=106" + sql = "select * from stb where bi!=106" tdSql.query(sql) tdSql.checkRows(0) sql = "select * from stb where uti!=107" @@ -97,10 +97,16 @@ class TDTestCase(TBase): # check all columns correct cnt = self.insert_rows * self.childtable_count sql = "select * from stb where bc!=1" + tdSql.query(sql) + tdSql.checkRows(0) + sql = "select * from stb where bc is null" + tdSql.query(sql) tdSql.checkRows(6) sql = "select * from stb where bc=1" + tdSql.query(sql) tdSql.checkRows(cnt) sql = "select * from stb where usi!=108" + tdSql.query(sql) tdSql.checkRows(6) # run From 8360d6a12b126b4a82152f50898ed288fc87f5c5 Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Sun, 4 Feb 2024 18:01:27 +0800 Subject: [PATCH 14/15] fix: check null value --- tests/army/community/storage/oneStageComp.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/army/community/storage/oneStageComp.py b/tests/army/community/storage/oneStageComp.py index 8860220673..9a2c7cfcd6 100644 --- a/tests/army/community/storage/oneStageComp.py +++ b/tests/army/community/storage/oneStageComp.py @@ -105,7 +105,7 @@ class TDTestCase(TBase): sql = "select * from stb where bc=1" tdSql.query(sql) tdSql.checkRows(cnt) - sql = "select * from stb where usi!=108" + sql = "select * from stb where usi is null" tdSql.query(sql) tdSql.checkRows(6) From b05141d3983facbf496c521a778e900100bc6cbf Mon Sep 17 00:00:00 2001 From: factosea <285808407@qq.com> Date: Sun, 4 Feb 2024 18:12:04 +0800 Subject: [PATCH 15/15] fix app name on windows --- source/os/src/osSemaphore.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/source/os/src/osSemaphore.c b/source/os/src/osSemaphore.c index dda4b14901..7d1cc746ff 100644 --- a/source/os/src/osSemaphore.c +++ b/source/os/src/osSemaphore.c @@ -56,6 +56,8 @@ int32_t taosGetAppName(char* name, int32_t* len) { char* end = strrchr(filepath, TD_DIRSEP[0]); if (end == NULL) { end = filepath; + } else { + end += 1; } tstrncpy(name, end, TSDB_APP_NAME_LEN);