merge szhou/fix/udf
This commit is contained in:
commit
bf18385e00
|
@ -22,6 +22,7 @@ mac/
|
||||||
.mypy_cache
|
.mypy_cache
|
||||||
*.tmp
|
*.tmp
|
||||||
*.swp
|
*.swp
|
||||||
|
*.swo
|
||||||
*.orig
|
*.orig
|
||||||
src/connector/nodejs/node_modules/
|
src/connector/nodejs/node_modules/
|
||||||
src/connector/nodejs/out/
|
src/connector/nodejs/out/
|
||||||
|
|
|
@ -154,7 +154,6 @@ typedef struct SQueryTableDataCond {
|
||||||
int32_t numOfCols;
|
int32_t numOfCols;
|
||||||
SColumnInfo* colList;
|
SColumnInfo* colList;
|
||||||
int32_t type; // data block load type:
|
int32_t type; // data block load type:
|
||||||
// int32_t numOfTWindows;
|
|
||||||
STimeWindow twindows;
|
STimeWindow twindows;
|
||||||
int64_t startVersion;
|
int64_t startVersion;
|
||||||
int64_t endVersion;
|
int64_t endVersion;
|
||||||
|
|
|
@ -312,7 +312,7 @@ static int32_t mndSaveQueryList(SConnObj *pConn, SQueryHbReqBasic *pBasic) {
|
||||||
pConn->numOfQueries = pBasic->queryDesc ? taosArrayGetSize(pBasic->queryDesc) : 0;
|
pConn->numOfQueries = pBasic->queryDesc ? taosArrayGetSize(pBasic->queryDesc) : 0;
|
||||||
pBasic->queryDesc = NULL;
|
pBasic->queryDesc = NULL;
|
||||||
|
|
||||||
mDebug("queries updated in conn %d, num:%d", pConn->id, pConn->numOfQueries);
|
mDebug("queries updated in conn %u, num:%d", pConn->id, pConn->numOfQueries);
|
||||||
|
|
||||||
taosWUnLockLatch(&pConn->queryLock);
|
taosWUnLockLatch(&pConn->queryLock);
|
||||||
|
|
||||||
|
|
|
@ -118,9 +118,8 @@ int32_t metaTbCursorNext(SMTbCursor *pTbCur);
|
||||||
// typedef struct STsdb STsdb;
|
// typedef struct STsdb STsdb;
|
||||||
typedef struct STsdbReader STsdbReader;
|
typedef struct STsdbReader STsdbReader;
|
||||||
|
|
||||||
#define BLOCK_LOAD_OFFSET_ORDER 1
|
#define TIMEWINDOW_RANGE_CONTAINED 1
|
||||||
#define BLOCK_LOAD_TABLESEQ_ORDER 2
|
#define TIMEWINDOW_RANGE_EXTERNAL 2
|
||||||
#define BLOCK_LOAD_EXTERN_ORDER 3
|
|
||||||
|
|
||||||
#define LASTROW_RETRIEVE_TYPE_ALL 0x1
|
#define LASTROW_RETRIEVE_TYPE_ALL 0x1
|
||||||
#define LASTROW_RETRIEVE_TYPE_SINGLE 0x2
|
#define LASTROW_RETRIEVE_TYPE_SINGLE 0x2
|
||||||
|
|
|
@ -16,6 +16,12 @@
|
||||||
#include "tsdb.h"
|
#include "tsdb.h"
|
||||||
#define ASCENDING_TRAVERSE(o) (o == TSDB_ORDER_ASC)
|
#define ASCENDING_TRAVERSE(o) (o == TSDB_ORDER_ASC)
|
||||||
|
|
||||||
|
typedef enum {
|
||||||
|
EXTERNAL_ROWS_PREV = 0x1,
|
||||||
|
EXTERNAL_ROWS_MAIN = 0x2,
|
||||||
|
EXTERNAL_ROWS_NEXT = 0x3,
|
||||||
|
} EContentData;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
STbDataIter* iter;
|
STbDataIter* iter;
|
||||||
int32_t index;
|
int32_t index;
|
||||||
|
@ -70,9 +76,9 @@ typedef struct SFilesetIter {
|
||||||
} SFilesetIter;
|
} SFilesetIter;
|
||||||
|
|
||||||
typedef struct SFileDataBlockInfo {
|
typedef struct SFileDataBlockInfo {
|
||||||
int32_t
|
// index position in STableBlockScanInfo in order to check whether neighbor block overlaps with it
|
||||||
tbBlockIdx; // index position in STableBlockScanInfo in order to check whether neighbor block overlaps with it
|
|
||||||
uint64_t uid;
|
uint64_t uid;
|
||||||
|
int32_t tbBlockIdx;
|
||||||
} SFileDataBlockInfo;
|
} SFileDataBlockInfo;
|
||||||
|
|
||||||
typedef struct SDataBlockIter {
|
typedef struct SDataBlockIter {
|
||||||
|
@ -99,7 +105,6 @@ typedef struct SReaderStatus {
|
||||||
SHashObj* pTableMap; // SHash<STableBlockScanInfo>
|
SHashObj* pTableMap; // SHash<STableBlockScanInfo>
|
||||||
STableBlockScanInfo* pTableIter; // table iterator used in building in-memory buffer data blocks.
|
STableBlockScanInfo* pTableIter; // table iterator used in building in-memory buffer data blocks.
|
||||||
SFileBlockDumpInfo fBlockDumpInfo;
|
SFileBlockDumpInfo fBlockDumpInfo;
|
||||||
|
|
||||||
SDFileSet* pCurrentFileset; // current opened file set
|
SDFileSet* pCurrentFileset; // current opened file set
|
||||||
SBlockData fileBlockData;
|
SBlockData fileBlockData;
|
||||||
SFilesetIter fileIter;
|
SFilesetIter fileIter;
|
||||||
|
@ -119,11 +124,13 @@ struct STsdbReader {
|
||||||
int32_t type; // query type: 1. retrieve all data blocks, 2. retrieve direct prev|next rows
|
int32_t type; // query type: 1. retrieve all data blocks, 2. retrieve direct prev|next rows
|
||||||
SBlockLoadSuppInfo suppInfo;
|
SBlockLoadSuppInfo suppInfo;
|
||||||
STsdbReadSnap* pReadSnap;
|
STsdbReadSnap* pReadSnap;
|
||||||
|
|
||||||
SIOCostSummary cost;
|
SIOCostSummary cost;
|
||||||
STSchema* pSchema;
|
STSchema* pSchema;
|
||||||
SDataFReader* pFileReader;
|
SDataFReader* pFileReader;
|
||||||
SVersionRange verRange;
|
SVersionRange verRange;
|
||||||
|
|
||||||
|
int32_t step;
|
||||||
|
STsdbReader* innerReader[2];
|
||||||
};
|
};
|
||||||
|
|
||||||
static SFileDataBlockInfo* getCurrentBlockInfo(SDataBlockIter* pBlockIter);
|
static SFileDataBlockInfo* getCurrentBlockInfo(SDataBlockIter* pBlockIter);
|
||||||
|
@ -200,6 +207,9 @@ static SHashObj* createDataBlockScanInfo(STsdbReader* pTsdbReader, const STableK
|
||||||
pTsdbReader->idStr);
|
pTsdbReader->idStr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tsdbDebug("%p create %d tables scan-info, size:%.2f Kb, %s", pTsdbReader, numOfTables, (sizeof(STableBlockScanInfo)*numOfTables)/1024.0,
|
||||||
|
pTsdbReader->idStr);
|
||||||
|
|
||||||
return pTableMap;
|
return pTableMap;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -328,7 +338,7 @@ static bool filesetIteratorNext(SFilesetIter* pIter, STsdbReader* pReader) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
tsdbDebug("%p file found fid:%d for qrange:%" PRId64 "-%" PRId64 ", ignore, %s", pReader, fid, pReader->window.skey,
|
tsdbDebug("%p file found fid:%d for qrange:%" PRId64 "-%" PRId64 ", %s", pReader, fid, pReader->window.skey,
|
||||||
pReader->window.ekey, pReader->idStr);
|
pReader->window.ekey, pReader->idStr);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -378,7 +388,7 @@ static SSDataBlock* createResBlock(SQueryTableDataCond* pCond, int32_t capacity)
|
||||||
return pResBlock;
|
return pResBlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, STsdbReader** ppReader, const char* idstr) {
|
static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, STsdbReader** ppReader, int32_t capacity, const char* idstr) {
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
int8_t level = 0;
|
int8_t level = 0;
|
||||||
STsdbReader* pReader = (STsdbReader*)taosMemoryCalloc(1, sizeof(*pReader));
|
STsdbReader* pReader = (STsdbReader*)taosMemoryCalloc(1, sizeof(*pReader));
|
||||||
|
@ -392,7 +402,7 @@ static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, STsd
|
||||||
pReader->pTsdb = getTsdbByRetentions(pVnode, pCond->twindows.skey, pVnode->config.tsdbCfg.retentions, idstr, &level);
|
pReader->pTsdb = getTsdbByRetentions(pVnode, pCond->twindows.skey, pVnode->config.tsdbCfg.retentions, idstr, &level);
|
||||||
pReader->suid = pCond->suid;
|
pReader->suid = pCond->suid;
|
||||||
pReader->order = pCond->order;
|
pReader->order = pCond->order;
|
||||||
pReader->capacity = 4096;
|
pReader->capacity = capacity;
|
||||||
pReader->idStr = (idstr != NULL) ? strdup(idstr) : NULL;
|
pReader->idStr = (idstr != NULL) ? strdup(idstr) : NULL;
|
||||||
pReader->verRange = getQueryVerRange(pVnode, pCond, level);
|
pReader->verRange = getQueryVerRange(pVnode, pCond, level);
|
||||||
pReader->type = pCond->type;
|
pReader->type = pCond->type;
|
||||||
|
@ -483,95 +493,6 @@ _end:
|
||||||
// return res;
|
// return res;
|
||||||
// }
|
// }
|
||||||
|
|
||||||
// static TSKEY extractFirstTraverseKey(STableBlockScanInfo* pCheckInfo, int32_t order, int32_t update, TDRowVerT
|
|
||||||
// maxVer) {
|
|
||||||
// TSDBROW row = {0};
|
|
||||||
// STSRow *rmem = NULL, *rimem = NULL;
|
|
||||||
|
|
||||||
// if (pCheckInfo->iter) {
|
|
||||||
// if (tsdbTbDataIterGet(pCheckInfo->iter, &row)) {
|
|
||||||
// rmem = row.pTSRow;
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
// if (pCheckInfo->iiter) {
|
|
||||||
// if (tsdbTbDataIterGet(pCheckInfo->iiter, &row)) {
|
|
||||||
// rimem = row.pTSRow;
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
// if (rmem == NULL && rimem == NULL) {
|
|
||||||
// return TSKEY_INITIAL_VAL;
|
|
||||||
// }
|
|
||||||
|
|
||||||
// if (rmem != NULL && rimem == NULL) {
|
|
||||||
// pCheckInfo->chosen = CHECKINFO_CHOSEN_MEM;
|
|
||||||
// return TD_ROW_KEY(rmem);
|
|
||||||
// }
|
|
||||||
|
|
||||||
// if (rmem == NULL && rimem != NULL) {
|
|
||||||
// pCheckInfo->chosen = CHECKINFO_CHOSEN_IMEM;
|
|
||||||
// return TD_ROW_KEY(rimem);
|
|
||||||
// }
|
|
||||||
|
|
||||||
// TSKEY r1 = TD_ROW_KEY(rmem);
|
|
||||||
// TSKEY r2 = TD_ROW_KEY(rimem);
|
|
||||||
|
|
||||||
// if (r1 == r2) {
|
|
||||||
// if (TD_SUPPORT_UPDATE(update)) {
|
|
||||||
// pCheckInfo->chosen = CHECKINFO_CHOSEN_BOTH;
|
|
||||||
// } else {
|
|
||||||
// pCheckInfo->chosen = CHECKINFO_CHOSEN_IMEM;
|
|
||||||
// tsdbTbDataIterNext(pCheckInfo->iter);
|
|
||||||
// }
|
|
||||||
// return r1;
|
|
||||||
// } else if (r1 < r2 && ASCENDING_TRAVERSE(order)) {
|
|
||||||
// pCheckInfo->chosen = CHECKINFO_CHOSEN_MEM;
|
|
||||||
// return r1;
|
|
||||||
// } else {
|
|
||||||
// pCheckInfo->chosen = CHECKINFO_CHOSEN_IMEM;
|
|
||||||
// return r2;
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
// static bool moveToNextRowInMem(STableBlockScanInfo* pCheckInfo) {
|
|
||||||
// bool hasNext = false;
|
|
||||||
// if (pCheckInfo->chosen == CHECKINFO_CHOSEN_MEM) {
|
|
||||||
// if (pCheckInfo->iter != NULL) {
|
|
||||||
// hasNext = tsdbTbDataIterNext(pCheckInfo->iter);
|
|
||||||
// }
|
|
||||||
|
|
||||||
// if (hasNext) {
|
|
||||||
// return hasNext;
|
|
||||||
// }
|
|
||||||
|
|
||||||
// if (pCheckInfo->iiter != NULL) {
|
|
||||||
// return tsdbTbDataIterGet(pCheckInfo->iiter, NULL);
|
|
||||||
// }
|
|
||||||
// } else if (pCheckInfo->chosen == CHECKINFO_CHOSEN_IMEM) {
|
|
||||||
// if (pCheckInfo->iiter != NULL) {
|
|
||||||
// hasNext = tsdbTbDataIterNext(pCheckInfo->iiter);
|
|
||||||
// }
|
|
||||||
|
|
||||||
// if (hasNext) {
|
|
||||||
// return hasNext;
|
|
||||||
// }
|
|
||||||
|
|
||||||
// if (pCheckInfo->iter != NULL) {
|
|
||||||
// return tsdbTbDataIterGet(pCheckInfo->iter, NULL);
|
|
||||||
// }
|
|
||||||
// } else {
|
|
||||||
// if (pCheckInfo->iter != NULL) {
|
|
||||||
// hasNext = tsdbTbDataIterNext(pCheckInfo->iter);
|
|
||||||
// }
|
|
||||||
// if (pCheckInfo->iiter != NULL) {
|
|
||||||
// hasNext = tsdbTbDataIterNext(pCheckInfo->iiter) || hasNext;
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
// return hasNext;
|
|
||||||
// }
|
|
||||||
|
|
||||||
// static int32_t binarySearchForBlock(SBlock* pBlock, int32_t numOfBlocks, TSKEY skey, int32_t order) {
|
// static int32_t binarySearchForBlock(SBlock* pBlock, int32_t numOfBlocks, TSKEY skey, int32_t order) {
|
||||||
// int32_t firstSlot = 0;
|
// int32_t firstSlot = 0;
|
||||||
// int32_t lastSlot = numOfBlocks - 1;
|
// int32_t lastSlot = numOfBlocks - 1;
|
||||||
|
@ -602,18 +523,22 @@ _end:
|
||||||
static int32_t doLoadBlockIndex(STsdbReader* pReader, SDataFReader* pFileReader, SArray* pIndexList) {
|
static int32_t doLoadBlockIndex(STsdbReader* pReader, SDataFReader* pFileReader, SArray* pIndexList) {
|
||||||
SArray* aBlockIdx = taosArrayInit(0, sizeof(SBlockIdx));
|
SArray* aBlockIdx = taosArrayInit(0, sizeof(SBlockIdx));
|
||||||
|
|
||||||
|
int64_t st = taosGetTimestampUs();
|
||||||
int32_t code = tsdbReadBlockIdx(pFileReader, aBlockIdx, NULL);
|
int32_t code = tsdbReadBlockIdx(pFileReader, aBlockIdx, NULL);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
goto _end;
|
goto _end;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (taosArrayGetSize(aBlockIdx) == 0) {
|
size_t num = taosArrayGetSize(aBlockIdx);
|
||||||
|
if (num == 0) {
|
||||||
taosArrayClear(aBlockIdx);
|
taosArrayClear(aBlockIdx);
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
SBlockIdx* pBlockIdx;
|
int64_t et1 = taosGetTimestampUs();
|
||||||
for (int32_t i = 0; i < taosArrayGetSize(aBlockIdx); ++i) {
|
|
||||||
|
SBlockIdx* pBlockIdx = NULL;
|
||||||
|
for (int32_t i = 0; i < num; ++i) {
|
||||||
pBlockIdx = (SBlockIdx*)taosArrayGet(aBlockIdx, i);
|
pBlockIdx = (SBlockIdx*)taosArrayGet(aBlockIdx, i);
|
||||||
|
|
||||||
// uid check
|
// uid check
|
||||||
|
@ -627,17 +552,6 @@ static int32_t doLoadBlockIndex(STsdbReader* pReader, SDataFReader* pFileReader,
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
// todo: not valid info in bockIndex
|
|
||||||
// time range check
|
|
||||||
// if (pBlockIdx->minKey > pReader->window.ekey || pBlockIdx->maxKey < pReader->window.skey) {
|
|
||||||
// continue;
|
|
||||||
// }
|
|
||||||
|
|
||||||
// version check
|
|
||||||
// if (pBlockIdx->minVersion > pReader->verRange.maxVer || pBlockIdx->maxVersion < pReader->verRange.minVer) {
|
|
||||||
// continue;
|
|
||||||
// }
|
|
||||||
|
|
||||||
STableBlockScanInfo* pScanInfo = p;
|
STableBlockScanInfo* pScanInfo = p;
|
||||||
if (pScanInfo->pBlockList == NULL) {
|
if (pScanInfo->pBlockList == NULL) {
|
||||||
pScanInfo->pBlockList = taosArrayInit(16, sizeof(SBlock));
|
pScanInfo->pBlockList = taosArrayInit(16, sizeof(SBlock));
|
||||||
|
@ -647,6 +561,9 @@ static int32_t doLoadBlockIndex(STsdbReader* pReader, SDataFReader* pFileReader,
|
||||||
taosArrayPush(pIndexList, pBlockIdx);
|
taosArrayPush(pIndexList, pBlockIdx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int64_t et2 = taosGetTimestampUs();
|
||||||
|
tsdbDebug("load block index for %d tables completed, elapsed time:%.2f ms, set blockIdx:%.2f ms, size:%d bytes %s",
|
||||||
|
(int32_t)num, (et1 - st)/1000.0, (et2-et1)/1000.0, num * sizeof(SBlockIdx), pReader->idStr);
|
||||||
_end:
|
_end:
|
||||||
taosArrayDestroy(aBlockIdx);
|
taosArrayDestroy(aBlockIdx);
|
||||||
return code;
|
return code;
|
||||||
|
@ -655,9 +572,11 @@ _end:
|
||||||
static int32_t doLoadFileBlock(STsdbReader* pReader, SArray* pIndexList, uint32_t* numOfValidTables,
|
static int32_t doLoadFileBlock(STsdbReader* pReader, SArray* pIndexList, uint32_t* numOfValidTables,
|
||||||
int32_t* numOfBlocks) {
|
int32_t* numOfBlocks) {
|
||||||
size_t numOfTables = taosArrayGetSize(pIndexList);
|
size_t numOfTables = taosArrayGetSize(pIndexList);
|
||||||
|
|
||||||
*numOfValidTables = 0;
|
*numOfValidTables = 0;
|
||||||
|
|
||||||
|
int64_t st = taosGetTimestampUs();
|
||||||
|
size_t size = 0;
|
||||||
|
|
||||||
STableBlockScanInfo* px = NULL;
|
STableBlockScanInfo* px = NULL;
|
||||||
while (1) {
|
while (1) {
|
||||||
px = taosHashIterate(pReader->status.pTableMap, px);
|
px = taosHashIterate(pReader->status.pTableMap, px);
|
||||||
|
@ -675,6 +594,8 @@ static int32_t doLoadFileBlock(STsdbReader* pReader, SArray* pIndexList, uint32_
|
||||||
tMapDataReset(&mapData);
|
tMapDataReset(&mapData);
|
||||||
tsdbReadBlock(pReader->pFileReader, pBlockIdx, &mapData, NULL);
|
tsdbReadBlock(pReader->pFileReader, pBlockIdx, &mapData, NULL);
|
||||||
|
|
||||||
|
size += mapData.nData;
|
||||||
|
|
||||||
STableBlockScanInfo* pScanInfo = taosHashGet(pReader->status.pTableMap, &pBlockIdx->uid, sizeof(int64_t));
|
STableBlockScanInfo* pScanInfo = taosHashGet(pReader->status.pTableMap, &pBlockIdx->uid, sizeof(int64_t));
|
||||||
for (int32_t j = 0; j < mapData.nItem; ++j) {
|
for (int32_t j = 0; j < mapData.nItem; ++j) {
|
||||||
SBlock block = {0};
|
SBlock block = {0};
|
||||||
|
@ -706,6 +627,10 @@ static int32_t doLoadFileBlock(STsdbReader* pReader, SArray* pIndexList, uint32_
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int64_t et = taosGetTimestampUs();
|
||||||
|
tsdbDebug("load block of %d tables completed, blocks:%d in %d tables, size:%.2f Kb, elapsed time:%.2f ms %s",
|
||||||
|
numOfTables, *numOfBlocks, *numOfValidTables, size/1000.0, (et-st)/1000.0, pReader->idStr);
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -816,7 +741,6 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, STableBlockScanIn
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
// todo consider the output buffer size
|
|
||||||
static int32_t doLoadFileBlockData(STsdbReader* pReader, SDataBlockIter* pBlockIter,
|
static int32_t doLoadFileBlockData(STsdbReader* pReader, SDataBlockIter* pBlockIter,
|
||||||
STableBlockScanInfo* pBlockScanInfo, SBlockData* pBlockData) {
|
STableBlockScanInfo* pBlockScanInfo, SBlockData* pBlockData) {
|
||||||
int64_t st = taosGetTimestampUs();
|
int64_t st = taosGetTimestampUs();
|
||||||
|
@ -853,346 +777,6 @@ _error:
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
// static int doBinarySearchKey(char* pValue, int num, TSKEY key, int order) {
|
|
||||||
// int firstPos, lastPos, midPos = -1;
|
|
||||||
// int numOfRows;
|
|
||||||
// TSKEY* keyList;
|
|
||||||
|
|
||||||
// assert(order == TSDB_ORDER_ASC || order == TSDB_ORDER_DESC);
|
|
||||||
|
|
||||||
// if (num <= 0) return -1;
|
|
||||||
|
|
||||||
// keyList = (TSKEY*)pValue;
|
|
||||||
// firstPos = 0;
|
|
||||||
// lastPos = num - 1;
|
|
||||||
|
|
||||||
// if (order == TSDB_ORDER_DESC) {
|
|
||||||
// // find the first position which is smaller than the key
|
|
||||||
// while (1) {
|
|
||||||
// if (key >= keyList[lastPos]) return lastPos;
|
|
||||||
// if (key == keyList[firstPos]) return firstPos;
|
|
||||||
// if (key < keyList[firstPos]) return firstPos - 1;
|
|
||||||
|
|
||||||
// numOfRows = lastPos - firstPos + 1;
|
|
||||||
// midPos = (numOfRows >> 1) + firstPos;
|
|
||||||
|
|
||||||
// if (key < keyList[midPos]) {
|
|
||||||
// lastPos = midPos - 1;
|
|
||||||
// } else if (key > keyList[midPos]) {
|
|
||||||
// firstPos = midPos + 1;
|
|
||||||
// } else {
|
|
||||||
// break;
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
// } else {
|
|
||||||
// // find the first position which is bigger than the key
|
|
||||||
// while (1) {
|
|
||||||
// if (key <= keyList[firstPos]) return firstPos;
|
|
||||||
// if (key == keyList[lastPos]) return lastPos;
|
|
||||||
|
|
||||||
// if (key > keyList[lastPos]) {
|
|
||||||
// lastPos = lastPos + 1;
|
|
||||||
// if (lastPos >= num)
|
|
||||||
// return -1;
|
|
||||||
// else
|
|
||||||
// return lastPos;
|
|
||||||
// }
|
|
||||||
|
|
||||||
// numOfRows = lastPos - firstPos + 1;
|
|
||||||
// midPos = (numOfRows >> 1) + firstPos;
|
|
||||||
|
|
||||||
// if (key < keyList[midPos]) {
|
|
||||||
// lastPos = midPos - 1;
|
|
||||||
// } else if (key > keyList[midPos]) {
|
|
||||||
// firstPos = midPos + 1;
|
|
||||||
// } else {
|
|
||||||
// break;
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
// return midPos;
|
|
||||||
// }
|
|
||||||
|
|
||||||
// static void doCheckGeneratedBlockRange(STsdbReader* pTsdbReadHandle) {
|
|
||||||
// SQueryFilePos* cur = &pTsdbReadHandle->cur;
|
|
||||||
|
|
||||||
// if (cur->rows > 0) {
|
|
||||||
// if (ASCENDING_TRAVERSE(pTsdbReadHandle->order)) {
|
|
||||||
// assert(cur->win.skey >= pTsdbReadHandle->window.skey && cur->win.ekey <= pTsdbReadHandle->window.ekey);
|
|
||||||
// } else {
|
|
||||||
// assert(cur->win.skey >= pTsdbReadHandle->window.ekey && cur->win.ekey <= pTsdbReadHandle->window.skey);
|
|
||||||
// }
|
|
||||||
|
|
||||||
// SColumnInfoData* pColInfoData = taosArrayGet(pTsdbReadHandle->pColumns, 0);
|
|
||||||
// assert(cur->win.skey == ((TSKEY*)pColInfoData->pData)[0] &&
|
|
||||||
// cur->win.ekey == ((TSKEY*)pColInfoData->pData)[cur->rows - 1]);
|
|
||||||
// } else {
|
|
||||||
// cur->win = pTsdbReadHandle->window;
|
|
||||||
|
|
||||||
// int32_t step = ASCENDING_TRAVERSE(pTsdbReadHandle->order) ? 1 : -1;
|
|
||||||
// cur->lastKey = pTsdbReadHandle->window.ekey + step;
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
// static void copyAllRemainRowsFromFileBlock(STsdbReader* pTsdbReadHandle, STableBlockScanInfo* pCheckInfo,
|
|
||||||
// SDataBlockInfo* pBlockInfo, int32_t endPos) {
|
|
||||||
// SQueryFilePos* cur = &pTsdbReadHandle->cur;
|
|
||||||
|
|
||||||
// SDataCols* pCols = pTsdbReadHandle->rhelper.pDCols[0];
|
|
||||||
// TSKEY* tsArray = pCols->cols[0].pData;
|
|
||||||
|
|
||||||
// bool ascScan = ASCENDING_TRAVERSE(pTsdbReadHandle->order);
|
|
||||||
|
|
||||||
// int32_t step = ascScan ? 1 : -1;
|
|
||||||
|
|
||||||
// int32_t start = cur->pos;
|
|
||||||
// int32_t end = endPos;
|
|
||||||
|
|
||||||
// if (!ascScan) {
|
|
||||||
// TSWAP(start, end);
|
|
||||||
// }
|
|
||||||
|
|
||||||
// assert(pTsdbReadHandle->outputCapacity >= (end - start + 1));
|
|
||||||
// int32_t numOfRows = doCopyRowsFromFileBlock(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, 0, start, end);
|
|
||||||
|
|
||||||
// // the time window should always be ascending order: skey <= ekey
|
|
||||||
// cur->win = (STimeWindow){.skey = tsArray[start], .ekey = tsArray[end]};
|
|
||||||
// cur->mixBlock = (numOfRows != pBlockInfo->rows);
|
|
||||||
// cur->lastKey = tsArray[endPos] + step;
|
|
||||||
// cur->blockCompleted = (ascScan ? (endPos == pBlockInfo->rows - 1) : (endPos == 0));
|
|
||||||
|
|
||||||
// // The value of pos may be -1 or pBlockInfo->rows, and it is invalid in both cases.
|
|
||||||
// int32_t pos = endPos + step;
|
|
||||||
// updateInfoAfterMerge(pTsdbReadHandle, pCheckInfo, numOfRows, pos);
|
|
||||||
// doCheckGeneratedBlockRange(pTsdbReadHandle);
|
|
||||||
|
|
||||||
// tsdbDebug("%p uid:%" PRIu64 ", data block created, mixblock:%d, brange:%" PRIu64 "-%" PRIu64 " rows:%d, %s",
|
|
||||||
// pTsdbReadHandle, pCheckInfo->tableId, cur->mixBlock, cur->win.skey, cur->win.ekey, cur->rows,
|
|
||||||
// pTsdbReadHandle->idStr);
|
|
||||||
// }
|
|
||||||
|
|
||||||
// // only return the qualified data to client in terms of query time window, data rows in the same block but do not
|
|
||||||
// // be included in the query time window will be discarded
|
|
||||||
// static void doMergeTwoLevelData(STsdbReader* pTsdbReadHandle, STableBlockScanInfo* pCheckInfo, SBlock* pBlock) {
|
|
||||||
// SQueryFilePos* cur = &pTsdbReadHandle->cur;
|
|
||||||
// SDataBlockInfo blockInfo = GET_FILE_DATA_BLOCK_INFO(pCheckInfo, pBlock);
|
|
||||||
// STsdbCfg* pCfg = REPO_CFG(pTsdbReadHandle->pTsdb);
|
|
||||||
|
|
||||||
// initTableMemIterator(pTsdbReadHandle, pCheckInfo);
|
|
||||||
|
|
||||||
// SDataCols* pCols = pTsdbReadHandle->rhelper.pDCols[0];
|
|
||||||
// assert(pCols->cols[0].type == TSDB_DATA_TYPE_TIMESTAMP && pCols->cols[0].colId == PRIMARYKEY_TIMESTAMP_COL_ID &&
|
|
||||||
// cur->pos >= 0 && cur->pos < pBlock->numOfRows);
|
|
||||||
// // Even Multi-Version supported, the records with duplicated TSKEY would be merged inside of tsdbLoadData
|
|
||||||
// interface. TSKEY* tsArray = pCols->cols[0].pData; assert(pCols->numOfRows == pBlock->numOfRows && tsArray[0] ==
|
|
||||||
// pBlock->minKey.ts &&
|
|
||||||
// tsArray[pBlock->numOfRows - 1] == pBlock->maxKey.ts);
|
|
||||||
|
|
||||||
// bool ascScan = ASCENDING_TRAVERSE(pTsdbReadHandle->order);
|
|
||||||
// int32_t step = ascScan ? 1 : -1;
|
|
||||||
|
|
||||||
// // for search the endPos, so the order needs to reverse
|
|
||||||
// int32_t order = ascScan ? TSDB_ORDER_DESC : TSDB_ORDER_ASC;
|
|
||||||
|
|
||||||
// int32_t numOfCols = (int32_t)(QH_GET_NUM_OF_COLS(pTsdbReadHandle));
|
|
||||||
// int32_t endPos = getEndPosInDataBlock(pTsdbReadHandle, &blockInfo);
|
|
||||||
|
|
||||||
// STimeWindow* pWin = &blockInfo.window;
|
|
||||||
// tsdbDebug("%p uid:%" PRIu64 " start merge data block, file block range:%" PRIu64 "-%" PRIu64
|
|
||||||
// " rows:%d, start:%d, end:%d, %s",
|
|
||||||
// pTsdbReadHandle, pCheckInfo->tableId, pWin->skey, pWin->ekey, blockInfo.rows, cur->pos, endPos,
|
|
||||||
// pTsdbReadHandle->idStr);
|
|
||||||
|
|
||||||
// // compared with the data from in-memory buffer, to generate the correct timestamp array list
|
|
||||||
// int32_t numOfRows = 0;
|
|
||||||
// int32_t curRow = 0;
|
|
||||||
|
|
||||||
// int16_t rv1 = -1;
|
|
||||||
// int16_t rv2 = -1;
|
|
||||||
// STSchema* pSchema1 = NULL;
|
|
||||||
// STSchema* pSchema2 = NULL;
|
|
||||||
|
|
||||||
// int32_t pos = cur->pos;
|
|
||||||
// cur->win = TSWINDOW_INITIALIZER;
|
|
||||||
// bool adjustPos = false;
|
|
||||||
|
|
||||||
// // no data in buffer, load data from file directly
|
|
||||||
// if (pCheckInfo->iiter == NULL && pCheckInfo->iter == NULL) {
|
|
||||||
// copyAllRemainRowsFromFileBlock(pTsdbReadHandle, pCheckInfo, &blockInfo, endPos);
|
|
||||||
// return;
|
|
||||||
// } else if (pCheckInfo->iter != NULL || pCheckInfo->iiter != NULL) {
|
|
||||||
// SSkipListNode* node = NULL;
|
|
||||||
// TSKEY lastKeyAppend = TSKEY_INITIAL_VAL;
|
|
||||||
|
|
||||||
// do {
|
|
||||||
// STSRow* row2 = NULL;
|
|
||||||
// STSRow* row1 = getSRowInTableMem(pCheckInfo, pTsdbReadHandle->order, pCfg->update, &row2, TD_VER_MAX);
|
|
||||||
// if (row1 == NULL) {
|
|
||||||
// break;
|
|
||||||
// }
|
|
||||||
|
|
||||||
// TSKEY key = TD_ROW_KEY(row1);
|
|
||||||
// if ((key > pTsdbReadHandle->window.ekey && ascScan) || (key < pTsdbReadHandle->window.ekey && !ascScan)) {
|
|
||||||
// break;
|
|
||||||
// }
|
|
||||||
|
|
||||||
// if (adjustPos) {
|
|
||||||
// if (key == lastKeyAppend) {
|
|
||||||
// pos -= step;
|
|
||||||
// }
|
|
||||||
// adjustPos = false;
|
|
||||||
// }
|
|
||||||
|
|
||||||
// if (((pos > endPos || tsArray[pos] > pTsdbReadHandle->window.ekey) && ascScan) ||
|
|
||||||
// ((pos < endPos || tsArray[pos] < pTsdbReadHandle->window.ekey) && !ascScan)) {
|
|
||||||
// break;
|
|
||||||
// }
|
|
||||||
|
|
||||||
// if ((key < tsArray[pos] && ascScan) || (key > tsArray[pos] && !ascScan)) {
|
|
||||||
// if (rv1 != TD_ROW_SVER(row1)) {
|
|
||||||
// // pSchema1 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row1));
|
|
||||||
// rv1 = TD_ROW_SVER(row1);
|
|
||||||
// }
|
|
||||||
// if (row2 && rv2 != TD_ROW_SVER(row2)) {
|
|
||||||
// // pSchema2 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row2));
|
|
||||||
// rv2 = TD_ROW_SVER(row2);
|
|
||||||
// }
|
|
||||||
|
|
||||||
// numOfRows +=
|
|
||||||
// mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, row1, row2, numOfCols,
|
|
||||||
// pCheckInfo->tableId, pSchema1, pSchema2, pCfg->update, &lastKeyAppend);
|
|
||||||
// if (cur->win.skey == TSKEY_INITIAL_VAL) {
|
|
||||||
// cur->win.skey = key;
|
|
||||||
// }
|
|
||||||
|
|
||||||
// cur->win.ekey = key;
|
|
||||||
// cur->lastKey = key + step;
|
|
||||||
// cur->mixBlock = true;
|
|
||||||
// moveToNextRowInMem(pCheckInfo);
|
|
||||||
// } else if (key == tsArray[pos]) { // data in buffer has the same timestamp of data in file block, ignore it
|
|
||||||
// if (TD_SUPPORT_UPDATE(pCfg->update)) {
|
|
||||||
// if (lastKeyAppend != key) {
|
|
||||||
// if (lastKeyAppend != TSKEY_INITIAL_VAL) {
|
|
||||||
// ++curRow;
|
|
||||||
// }
|
|
||||||
// lastKeyAppend = key;
|
|
||||||
// }
|
|
||||||
// // load data from file firstly
|
|
||||||
// numOfRows = doCopyRowsFromFileBlock(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, curRow, pos, pos);
|
|
||||||
|
|
||||||
// if (rv1 != TD_ROW_SVER(row1)) {
|
|
||||||
// rv1 = TD_ROW_SVER(row1);
|
|
||||||
// }
|
|
||||||
// if (row2 && rv2 != TD_ROW_SVER(row2)) {
|
|
||||||
// rv2 = TD_ROW_SVER(row2);
|
|
||||||
// }
|
|
||||||
|
|
||||||
// // still assign data into current row
|
|
||||||
// numOfRows +=
|
|
||||||
// mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, row1, row2, numOfCols,
|
|
||||||
// pCheckInfo->tableId, pSchema1, pSchema2, pCfg->update, &lastKeyAppend);
|
|
||||||
|
|
||||||
// if (cur->win.skey == TSKEY_INITIAL_VAL) {
|
|
||||||
// cur->win.skey = key;
|
|
||||||
// }
|
|
||||||
|
|
||||||
// cur->win.ekey = key;
|
|
||||||
// cur->lastKey = key + step;
|
|
||||||
// cur->mixBlock = true;
|
|
||||||
|
|
||||||
// moveToNextRowInMem(pCheckInfo);
|
|
||||||
|
|
||||||
// pos += step;
|
|
||||||
// adjustPos = true;
|
|
||||||
// } else {
|
|
||||||
// // discard the memory record
|
|
||||||
// moveToNextRowInMem(pCheckInfo);
|
|
||||||
// }
|
|
||||||
// } else if ((key > tsArray[pos] && ascScan) || (key < tsArray[pos] && !ascScan)) {
|
|
||||||
// if (cur->win.skey == TSKEY_INITIAL_VAL) {
|
|
||||||
// cur->win.skey = tsArray[pos];
|
|
||||||
// }
|
|
||||||
|
|
||||||
// int32_t end = doBinarySearchKey(pCols->cols[0].pData, pCols->numOfRows, key, order);
|
|
||||||
// assert(end != -1);
|
|
||||||
|
|
||||||
// if (tsArray[end] == key) { // the value of key in cache equals to the end timestamp value, ignore it
|
|
||||||
// #if 0
|
|
||||||
// if (pCfg->update == TD_ROW_DISCARD_UPDATE) {
|
|
||||||
// moveToNextRowInMem(pCheckInfo);
|
|
||||||
// } else {
|
|
||||||
// end -= step;
|
|
||||||
// }
|
|
||||||
// #endif
|
|
||||||
// if (!TD_SUPPORT_UPDATE(pCfg->update)) {
|
|
||||||
// moveToNextRowInMem(pCheckInfo);
|
|
||||||
// } else {
|
|
||||||
// end -= step;
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
// int32_t qstart = 0, qend = 0;
|
|
||||||
// getQualifiedRowsPos(pTsdbReadHandle, pos, end, numOfRows, &qstart, &qend);
|
|
||||||
|
|
||||||
// if ((lastKeyAppend != TSKEY_INITIAL_VAL) && (lastKeyAppend != (ascScan ? tsArray[qstart] : tsArray[qend]))) {
|
|
||||||
// ++curRow;
|
|
||||||
// }
|
|
||||||
|
|
||||||
// numOfRows = doCopyRowsFromFileBlock(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, curRow, qstart, qend);
|
|
||||||
// pos += (qend - qstart + 1) * step;
|
|
||||||
// if (numOfRows > 0) {
|
|
||||||
// curRow = numOfRows - 1;
|
|
||||||
// }
|
|
||||||
|
|
||||||
// cur->win.ekey = ascScan ? tsArray[qend] : tsArray[qstart];
|
|
||||||
// cur->lastKey = cur->win.ekey + step;
|
|
||||||
// lastKeyAppend = cur->win.ekey;
|
|
||||||
// }
|
|
||||||
// } while (numOfRows < pTsdbReadHandle->outputCapacity);
|
|
||||||
|
|
||||||
// if (numOfRows < pTsdbReadHandle->outputCapacity) {
|
|
||||||
// /**
|
|
||||||
// * if cache is empty, load remain file block data. In contrast, if there are remain data in cache, do NOT
|
|
||||||
// * copy them all to result buffer, since it may be overlapped with file data block.
|
|
||||||
// */
|
|
||||||
// if (node == NULL || ((TD_ROW_KEY((STSRow*)SL_GET_NODE_DATA(node)) > pTsdbReadHandle->window.ekey) && ascScan)
|
|
||||||
// ||
|
|
||||||
// ((TD_ROW_KEY((STSRow*)SL_GET_NODE_DATA(node)) < pTsdbReadHandle->window.ekey) && !ascScan)) {
|
|
||||||
// // no data in cache or data in cache is greater than the ekey of time window, load data from file block
|
|
||||||
// if (cur->win.skey == TSKEY_INITIAL_VAL) {
|
|
||||||
// cur->win.skey = tsArray[pos];
|
|
||||||
// }
|
|
||||||
|
|
||||||
// int32_t start = -1, end = -1;
|
|
||||||
// getQualifiedRowsPos(pTsdbReadHandle, pos, endPos, numOfRows, &start, &end);
|
|
||||||
|
|
||||||
// numOfRows = doCopyRowsFromFileBlock(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, numOfRows, start, end);
|
|
||||||
// pos += (end - start + 1) * step;
|
|
||||||
|
|
||||||
// cur->win.ekey = ascScan ? tsArray[end] : tsArray[start];
|
|
||||||
// cur->lastKey = cur->win.ekey + step;
|
|
||||||
// cur->mixBlock = true;
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
// cur->blockCompleted = (((pos > endPos || cur->lastKey > pTsdbReadHandle->window.ekey) && ascScan) ||
|
|
||||||
// ((pos < endPos || cur->lastKey < pTsdbReadHandle->window.ekey) && !ascScan));
|
|
||||||
|
|
||||||
// if (!ascScan) {
|
|
||||||
// TSWAP(cur->win.skey, cur->win.ekey);
|
|
||||||
// }
|
|
||||||
|
|
||||||
// updateInfoAfterMerge(pTsdbReadHandle, pCheckInfo, numOfRows, pos);
|
|
||||||
// doCheckGeneratedBlockRange(pTsdbReadHandle);
|
|
||||||
|
|
||||||
// tsdbDebug("%p uid:%" PRIu64 ", data block created, mixblock:%d, brange:%" PRIu64 "-%" PRIu64 " rows:%d, %s",
|
|
||||||
// pTsdbReadHandle, pCheckInfo->tableId, cur->mixBlock, cur->win.skey, cur->win.ekey, cur->rows,
|
|
||||||
// pTsdbReadHandle->idStr);
|
|
||||||
// }
|
|
||||||
|
|
||||||
static void cleanupBlockOrderSupporter(SBlockOrderSupporter* pSup) {
|
static void cleanupBlockOrderSupporter(SBlockOrderSupporter* pSup) {
|
||||||
taosMemoryFreeClear(pSup->numOfBlocksPerTable);
|
taosMemoryFreeClear(pSup->numOfBlocksPerTable);
|
||||||
taosMemoryFreeClear(pSup->indexPerTable);
|
taosMemoryFreeClear(pSup->indexPerTable);
|
||||||
|
@ -1252,8 +836,9 @@ static int32_t initBlockIterator(STsdbReader* pReader, SDataBlockIter* pBlockIte
|
||||||
// access data blocks according to the offset of each block in asc/desc order.
|
// access data blocks according to the offset of each block in asc/desc order.
|
||||||
int32_t numOfTables = (int32_t)taosHashGetSize(pReader->status.pTableMap);
|
int32_t numOfTables = (int32_t)taosHashGetSize(pReader->status.pTableMap);
|
||||||
|
|
||||||
SBlockOrderSupporter sup = {0};
|
int64_t st = taosGetTimestampUs();
|
||||||
|
|
||||||
|
SBlockOrderSupporter sup = {0};
|
||||||
int32_t code = initBlockOrderSupporter(&sup, numOfTables);
|
int32_t code = initBlockOrderSupporter(&sup, numOfTables);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
return code;
|
return code;
|
||||||
|
@ -1302,11 +887,12 @@ static int32_t initBlockIterator(STsdbReader* pReader, SDataBlockIter* pBlockIte
|
||||||
SFileDataBlockInfo blockInfo = {.uid = sup.pDataBlockInfo[0][i].uid, .tbBlockIdx = i};
|
SFileDataBlockInfo blockInfo = {.uid = sup.pDataBlockInfo[0][i].uid, .tbBlockIdx = i};
|
||||||
taosArrayPush(pBlockIter->blockList, &blockInfo);
|
taosArrayPush(pBlockIter->blockList, &blockInfo);
|
||||||
}
|
}
|
||||||
tsdbDebug("%p create blocks info struct completed for one table, %d blocks not sorted %s", pReader, cnt,
|
|
||||||
pReader->idStr);
|
int64_t et = taosGetTimestampUs();
|
||||||
|
tsdbDebug("%p create blocks info struct completed for one table, %d blocks not sorted, elapsed time:%.2f ms %s", pReader, cnt,
|
||||||
|
(et - st)/1000.0, pReader->idStr);
|
||||||
|
|
||||||
pBlockIter->index = asc ? 0 : (numOfBlocks - 1);
|
pBlockIter->index = asc ? 0 : (numOfBlocks - 1);
|
||||||
|
|
||||||
cleanupBlockOrderSupporter(&sup);
|
cleanupBlockOrderSupporter(&sup);
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
@ -1340,7 +926,8 @@ static int32_t initBlockIterator(STsdbReader* pReader, SDataBlockIter* pBlockIte
|
||||||
tMergeTreeAdjust(pTree, tMergeTreeGetAdjustIndex(pTree));
|
tMergeTreeAdjust(pTree, tMergeTreeGetAdjustIndex(pTree));
|
||||||
}
|
}
|
||||||
|
|
||||||
tsdbDebug("%p %d data blocks sort completed, %s", pReader, cnt, pReader->idStr);
|
int64_t et = taosGetTimestampUs();
|
||||||
|
tsdbDebug("%p %d data blocks access order completed, elapsed time:%.2f ms %s", pReader, cnt, (et-st)/1000.0, pReader->idStr);
|
||||||
cleanupBlockOrderSupporter(&sup);
|
cleanupBlockOrderSupporter(&sup);
|
||||||
taosMemoryFree(pTree);
|
taosMemoryFree(pTree);
|
||||||
|
|
||||||
|
@ -1813,6 +1400,8 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader, STableBlockScanInfo*
|
||||||
SBlockData* pBlockData = &pReader->status.fileBlockData;
|
SBlockData* pBlockData = &pReader->status.fileBlockData;
|
||||||
int32_t step = ASCENDING_TRAVERSE(pReader->order) ? 1 : -1;
|
int32_t step = ASCENDING_TRAVERSE(pReader->order) ? 1 : -1;
|
||||||
|
|
||||||
|
int64_t st = taosGetTimestampUs();
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
// todo check the validate of row in file block
|
// todo check the validate of row in file block
|
||||||
{
|
{
|
||||||
|
@ -1851,10 +1440,11 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader, STableBlockScanInfo*
|
||||||
blockDataUpdateTsWindow(pResBlock, 0);
|
blockDataUpdateTsWindow(pResBlock, 0);
|
||||||
|
|
||||||
setComposedBlockFlag(pReader, true);
|
setComposedBlockFlag(pReader, true);
|
||||||
|
int64_t et = taosGetTimestampUs();
|
||||||
|
|
||||||
tsdbDebug("%p uid:%" PRIu64 ", composed data block created, brange:%" PRIu64 "-%" PRIu64 " rows:%d, %s", pReader,
|
tsdbDebug("%p uid:%" PRIu64 ", composed data block created, brange:%" PRIu64 "-%" PRIu64 " rows:%d, elapsed time:%.2f ms %s", pReader,
|
||||||
pBlockScanInfo->uid, pResBlock->info.window.skey, pResBlock->info.window.ekey, pResBlock->info.rows,
|
pBlockScanInfo->uid, pResBlock->info.window.skey, pResBlock->info.window.ekey, pResBlock->info.rows,
|
||||||
pReader->idStr);
|
(et - st)/1000.0, pReader->idStr);
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
@ -2031,7 +1621,9 @@ static TSDBKEY getCurrentKeyInBuf(SDataBlockIter* pBlockIter, STsdbReader* pRead
|
||||||
|
|
||||||
static int32_t moveToNextFile(STsdbReader* pReader, int32_t* numOfBlocks) {
|
static int32_t moveToNextFile(STsdbReader* pReader, int32_t* numOfBlocks) {
|
||||||
SReaderStatus* pStatus = &pReader->status;
|
SReaderStatus* pStatus = &pReader->status;
|
||||||
SArray* pIndexList = taosArrayInit(4, sizeof(SBlockIdx));
|
|
||||||
|
size_t numOfTables = taosHashGetSize(pReader->status.pTableMap);
|
||||||
|
SArray* pIndexList = taosArrayInit(numOfTables, sizeof(SBlockIdx));
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
bool hasNext = filesetIteratorNext(&pStatus->fileIter, pReader);
|
bool hasNext = filesetIteratorNext(&pStatus->fileIter, pReader);
|
||||||
|
@ -2799,24 +2391,57 @@ int32_t tsdbGetStbIdList(SMeta* pMeta, int64_t suid, SArray* list) {
|
||||||
// ====================================== EXPOSED APIs ======================================
|
// ====================================== EXPOSED APIs ======================================
|
||||||
int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, SArray* pTableList, STsdbReader** ppReader,
|
int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, SArray* pTableList, STsdbReader** ppReader,
|
||||||
const char* idstr) {
|
const char* idstr) {
|
||||||
int32_t code = tsdbReaderCreate(pVnode, pCond, ppReader, idstr);
|
int32_t code = tsdbReaderCreate(pVnode, pCond, ppReader, 4096, idstr);
|
||||||
if (code) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
goto _err;
|
goto _err;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pCond->suid != 0) {
|
// check for query time window
|
||||||
(*ppReader)->pSchema = metaGetTbTSchema((*ppReader)->pTsdb->pVnode->pMeta, (*ppReader)->suid, -1);
|
|
||||||
} else if (taosArrayGetSize(pTableList) > 0) {
|
|
||||||
STableKeyInfo* pKey = taosArrayGet(pTableList, 0);
|
|
||||||
(*ppReader)->pSchema = metaGetTbTSchema((*ppReader)->pTsdb->pVnode->pMeta, pKey->uid, -1);
|
|
||||||
}
|
|
||||||
|
|
||||||
STsdbReader* pReader = *ppReader;
|
STsdbReader* pReader = *ppReader;
|
||||||
if (isEmptyQueryTimeWindow(&pReader->window)) {
|
if (isEmptyQueryTimeWindow(&pReader->window)) {
|
||||||
tsdbDebug("%p query window not overlaps with the data set, no result returned, %s", pReader, pReader->idStr);
|
tsdbDebug("%p query window not overlaps with the data set, no result returned, %s", pReader, pReader->idStr);
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (pCond->type == TIMEWINDOW_RANGE_EXTERNAL) {
|
||||||
|
// update the SQueryTableDataCond to create inner reader
|
||||||
|
STimeWindow w = pCond->twindows;
|
||||||
|
int32_t order = pCond->order;
|
||||||
|
if (order == TSDB_ORDER_ASC) {
|
||||||
|
pCond->twindows.ekey = pCond->twindows.skey;
|
||||||
|
pCond->twindows.skey = INT64_MIN;
|
||||||
|
pCond->order = TSDB_ORDER_DESC;
|
||||||
|
} else {
|
||||||
|
pCond->twindows.skey = pCond->twindows.ekey;
|
||||||
|
pCond->twindows.ekey = INT64_MAX;
|
||||||
|
pCond->order = TSDB_ORDER_ASC;
|
||||||
|
}
|
||||||
|
|
||||||
|
code = tsdbReaderCreate(pVnode, pCond, &pReader->innerReader[0], 1, idstr);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
goto _err;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (order == TSDB_ORDER_ASC) {
|
||||||
|
pCond->twindows.skey = w.ekey;
|
||||||
|
pCond->twindows.ekey = INT64_MAX;
|
||||||
|
} else {
|
||||||
|
pCond->twindows.skey = INT64_MIN;
|
||||||
|
pCond->twindows.ekey = w.ekey;
|
||||||
|
}
|
||||||
|
code = tsdbReaderCreate(pVnode, pCond, &pReader->innerReader[1], 1, idstr);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
goto _err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pCond->suid != 0) {
|
||||||
|
pReader->pSchema = metaGetTbTSchema(pReader->pTsdb->pVnode->pMeta, pReader->suid, -1);
|
||||||
|
} else if (taosArrayGetSize(pTableList) > 0) {
|
||||||
|
STableKeyInfo* pKey = taosArrayGet(pTableList, 0);
|
||||||
|
pReader->pSchema = metaGetTbTSchema(pReader->pTsdb->pVnode->pMeta, pKey->uid, -1);
|
||||||
|
}
|
||||||
|
|
||||||
int32_t numOfTables = taosArrayGetSize(pTableList);
|
int32_t numOfTables = taosArrayGetSize(pTableList);
|
||||||
pReader->status.pTableMap = createDataBlockScanInfo(pReader, pTableList->pData, numOfTables);
|
pReader->status.pTableMap = createDataBlockScanInfo(pReader, pTableList->pData, numOfTables);
|
||||||
if (pReader->status.pTableMap == NULL) {
|
if (pReader->status.pTableMap == NULL) {
|
||||||
|
@ -2827,12 +2452,15 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, SArray* pTabl
|
||||||
goto _err;
|
goto _err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
code = tsdbTakeReadSnap(pReader->pTsdb, &pReader->pReadSnap);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
goto _err;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pReader->type == TIMEWINDOW_RANGE_CONTAINED) {
|
||||||
SDataBlockIter* pBlockIter = &pReader->status.blockIter;
|
SDataBlockIter* pBlockIter = &pReader->status.blockIter;
|
||||||
|
|
||||||
code = tsdbTakeReadSnap(pReader->pTsdb, &pReader->pReadSnap);
|
initFilesetIterator(&pReader->status.fileIter, pReader->pReadSnap->fs.aDFileSet, pReader->order, pReader->idStr);
|
||||||
if (code) goto _err;
|
|
||||||
|
|
||||||
initFilesetIterator(&pReader->status.fileIter, (*ppReader)->pReadSnap->fs.aDFileSet, pReader->order, pReader->idStr);
|
|
||||||
resetDataBlockIterator(&pReader->status.blockIter, pReader->order);
|
resetDataBlockIterator(&pReader->status.blockIter, pReader->order);
|
||||||
|
|
||||||
// no data in files, let's try buffer in memory
|
// no data in files, let's try buffer in memory
|
||||||
|
@ -2844,6 +2472,23 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, SArray* pTabl
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
STsdbReader* pPrevReader = pReader->innerReader[0];
|
||||||
|
SDataBlockIter* pBlockIter = &pPrevReader->status.blockIter;
|
||||||
|
|
||||||
|
initFilesetIterator(&pPrevReader->status.fileIter, pPrevReader->pReadSnap->fs.aDFileSet, pPrevReader->order, pPrevReader->idStr);
|
||||||
|
resetDataBlockIterator(&pPrevReader->status.blockIter, pPrevReader->order);
|
||||||
|
|
||||||
|
// no data in files, let's try buffer in memory
|
||||||
|
if (pPrevReader->status.fileIter.numOfFiles == 0) {
|
||||||
|
pPrevReader->status.loadFromFile = false;
|
||||||
|
} else {
|
||||||
|
code = initForFirstBlockInFile(pPrevReader, pBlockIter);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
tsdbDebug("%p total numOfTable:%d in this query %s", pReader, numOfTables, pReader->idStr);
|
tsdbDebug("%p total numOfTable:%d in this query %s", pReader, numOfTables, pReader->idStr);
|
||||||
return code;
|
return code;
|
||||||
|
@ -2881,20 +2526,6 @@ void tsdbReaderClose(STsdbReader* pReader) {
|
||||||
tsdbDataFReaderClose(&pReader->pFileReader);
|
tsdbDataFReaderClose(&pReader->pFileReader);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if 0
|
|
||||||
// if (pReader->status.pTableScanInfo != NULL) {
|
|
||||||
// pReader->status.pTableScanInfo = destroyTableCheckInfo(pReader->status.pTableScanInfo);
|
|
||||||
// }
|
|
||||||
|
|
||||||
// tsdbDestroyReadH(&pReader->rhelper);
|
|
||||||
|
|
||||||
// tdFreeDataCols(pReader->pDataCols);
|
|
||||||
// pReader->pDataCols = NULL;
|
|
||||||
//
|
|
||||||
// pReader->prev = doFreeColumnInfoData(pReader->prev);
|
|
||||||
// pReader->next = doFreeColumnInfoData(pReader->next);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
SIOCostSummary* pCost = &pReader->cost;
|
SIOCostSummary* pCost = &pReader->cost;
|
||||||
|
|
||||||
tsdbDebug("%p :io-cost summary: head-file read cnt:%" PRIu64 ", head-file time:%" PRIu64 " us, statis-info:%" PRId64
|
tsdbDebug("%p :io-cost summary: head-file read cnt:%" PRIu64 ", head-file time:%" PRIu64 " us, statis-info:%" PRId64
|
||||||
|
@ -2907,20 +2538,14 @@ void tsdbReaderClose(STsdbReader* pReader) {
|
||||||
taosMemoryFreeClear(pReader);
|
taosMemoryFreeClear(pReader);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool tsdbNextDataBlock(STsdbReader* pReader) {
|
static bool doTsdbNextDataBlock(STsdbReader* pReader) {
|
||||||
if (isEmptyQueryTimeWindow(&pReader->window)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// cleanup the data that belongs to the previous data block
|
// cleanup the data that belongs to the previous data block
|
||||||
SSDataBlock* pBlock = pReader->pResBlock;
|
SSDataBlock* pBlock = pReader->pResBlock;
|
||||||
blockDataCleanup(pBlock);
|
blockDataCleanup(pBlock);
|
||||||
|
|
||||||
int64_t stime = taosGetTimestampUs();
|
int64_t stime = taosGetTimestampUs();
|
||||||
int64_t elapsedTime = stime;
|
|
||||||
SReaderStatus* pStatus = &pReader->status;
|
SReaderStatus* pStatus = &pReader->status;
|
||||||
|
|
||||||
if (pReader->type == BLOCK_LOAD_OFFSET_ORDER) {
|
|
||||||
if (pStatus->loadFromFile) {
|
if (pStatus->loadFromFile) {
|
||||||
int32_t code = buildBlockFromFiles(pReader);
|
int32_t code = buildBlockFromFiles(pReader);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
@ -2937,25 +2562,76 @@ bool tsdbNextDataBlock(STsdbReader* pReader) {
|
||||||
buildBlockFromBufferSequentially(pReader);
|
buildBlockFromBufferSequentially(pReader);
|
||||||
return pBlock->info.rows > 0;
|
return pBlock->info.rows > 0;
|
||||||
}
|
}
|
||||||
} else if (pReader->type == BLOCK_LOAD_TABLESEQ_ORDER) {
|
|
||||||
} else if (pReader->type == BLOCK_LOAD_EXTERN_ORDER) {
|
|
||||||
} else {
|
|
||||||
ASSERT(0);
|
|
||||||
}
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void tsdbRetrieveDataBlockInfo(STsdbReader* pReader, SDataBlockInfo* pDataBlockInfo) {
|
bool tsdbNextDataBlock(STsdbReader* pReader) {
|
||||||
|
if (isEmptyQueryTimeWindow(&pReader->window)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pReader->innerReader[0] != NULL) {
|
||||||
|
bool ret = doTsdbNextDataBlock(pReader->innerReader[0]);
|
||||||
|
if (ret) {
|
||||||
|
pReader->step = EXTERNAL_ROWS_PREV;
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
tsdbReaderClose(pReader->innerReader[0]);
|
||||||
|
pReader->innerReader[0] = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
pReader->step = EXTERNAL_ROWS_MAIN;
|
||||||
|
bool ret = doTsdbNextDataBlock(pReader);
|
||||||
|
if (ret) {
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pReader->innerReader[1] != NULL) {
|
||||||
|
bool ret1 = doTsdbNextDataBlock(pReader->innerReader[1]);
|
||||||
|
if (ret1) {
|
||||||
|
pReader->step = EXTERNAL_ROWS_NEXT;
|
||||||
|
return ret1;
|
||||||
|
}
|
||||||
|
|
||||||
|
tsdbReaderClose(pReader->innerReader[1]);
|
||||||
|
pReader->innerReader[1] = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void setBlockInfo(STsdbReader* pReader, SDataBlockInfo* pDataBlockInfo) {
|
||||||
ASSERT(pDataBlockInfo != NULL && pReader != NULL);
|
ASSERT(pDataBlockInfo != NULL && pReader != NULL);
|
||||||
pDataBlockInfo->rows = pReader->pResBlock->info.rows;
|
pDataBlockInfo->rows = pReader->pResBlock->info.rows;
|
||||||
pDataBlockInfo->uid = pReader->pResBlock->info.uid;
|
pDataBlockInfo->uid = pReader->pResBlock->info.uid;
|
||||||
pDataBlockInfo->window = pReader->pResBlock->info.window;
|
pDataBlockInfo->window = pReader->pResBlock->info.window;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void tsdbRetrieveDataBlockInfo(STsdbReader* pReader, SDataBlockInfo* pDataBlockInfo) {
|
||||||
|
if (pReader->type == TIMEWINDOW_RANGE_EXTERNAL) {
|
||||||
|
if (pReader->step == EXTERNAL_ROWS_MAIN) {
|
||||||
|
setBlockInfo(pReader, pDataBlockInfo);
|
||||||
|
} else if (pReader->step == EXTERNAL_ROWS_PREV) {
|
||||||
|
setBlockInfo(pReader->innerReader[0], pDataBlockInfo);
|
||||||
|
} else {
|
||||||
|
setBlockInfo(pReader->innerReader[1], pDataBlockInfo);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
setBlockInfo(pReader, pDataBlockInfo);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
int32_t tsdbRetrieveDatablockSMA(STsdbReader* pReader, SColumnDataAgg*** pBlockStatis, bool* allHave) {
|
int32_t tsdbRetrieveDatablockSMA(STsdbReader* pReader, SColumnDataAgg*** pBlockStatis, bool* allHave) {
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
*allHave = false;
|
*allHave = false;
|
||||||
|
|
||||||
|
if(pReader->type == TIMEWINDOW_RANGE_EXTERNAL) {
|
||||||
|
*pBlockStatis = NULL;
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
// there is no statistics data for composed block
|
// there is no statistics data for composed block
|
||||||
if (pReader->status.composedDataBlock) {
|
if (pReader->status.composedDataBlock) {
|
||||||
*pBlockStatis = NULL;
|
*pBlockStatis = NULL;
|
||||||
|
@ -3025,7 +2701,7 @@ int32_t tsdbRetrieveDatablockSMA(STsdbReader* pReader, SColumnDataAgg*** pBlockS
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
SArray* tsdbRetrieveDataBlock(STsdbReader* pReader, SArray* pIdList) {
|
static SArray* doRetrieveDataBlock(STsdbReader* pReader) {
|
||||||
SReaderStatus* pStatus = &pReader->status;
|
SReaderStatus* pStatus = &pReader->status;
|
||||||
|
|
||||||
if (pStatus->composedDataBlock) {
|
if (pStatus->composedDataBlock) {
|
||||||
|
@ -3054,16 +2730,27 @@ SArray* tsdbRetrieveDataBlock(STsdbReader* pReader, SArray* pIdList) {
|
||||||
return pReader->pResBlock->pDataBlock;
|
return pReader->pResBlock->pDataBlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SArray* tsdbRetrieveDataBlock(STsdbReader* pReader, SArray* pIdList) {
|
||||||
|
if (pReader->type == TIMEWINDOW_RANGE_EXTERNAL) {
|
||||||
|
if (pReader->step == EXTERNAL_ROWS_PREV) {
|
||||||
|
return doRetrieveDataBlock(pReader->innerReader[0]);
|
||||||
|
} else if (pReader->step == EXTERNAL_ROWS_NEXT) {
|
||||||
|
return doRetrieveDataBlock(pReader->innerReader[1]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return doRetrieveDataBlock(pReader);
|
||||||
|
}
|
||||||
|
|
||||||
int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) {
|
int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) {
|
||||||
if (isEmptyQueryTimeWindow(&pReader->window)) {
|
if (isEmptyQueryTimeWindow(&pReader->window)) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
pReader->order = pCond->order;
|
pReader->order = pCond->order;
|
||||||
pReader->type = BLOCK_LOAD_OFFSET_ORDER;
|
pReader->type = TIMEWINDOW_RANGE_CONTAINED;
|
||||||
pReader->status.loadFromFile = true;
|
pReader->status.loadFromFile = true;
|
||||||
pReader->status.pTableIter = NULL;
|
pReader->status.pTableIter = NULL;
|
||||||
|
|
||||||
pReader->window = updateQueryTimeWindow(pReader->pTsdb, &pCond->twindows);
|
pReader->window = updateQueryTimeWindow(pReader->pTsdb, &pCond->twindows);
|
||||||
|
|
||||||
// allocate buffer in order to load data blocks from file
|
// allocate buffer in order to load data blocks from file
|
||||||
|
@ -3073,10 +2760,7 @@ int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) {
|
||||||
pReader->suppInfo.tsColAgg.colId = PRIMARYKEY_TIMESTAMP_COL_ID;
|
pReader->suppInfo.tsColAgg.colId = PRIMARYKEY_TIMESTAMP_COL_ID;
|
||||||
tsdbDataFReaderClose(&pReader->pFileReader);
|
tsdbDataFReaderClose(&pReader->pFileReader);
|
||||||
|
|
||||||
// todo set the correct numOfTables
|
int32_t numOfTables = taosHashGetSize(pReader->status.pTableMap);
|
||||||
int32_t numOfTables = 1;
|
|
||||||
SDataBlockIter* pBlockIter = &pReader->status.blockIter;
|
|
||||||
|
|
||||||
tsdbDataFReaderClose(&pReader->pFileReader);
|
tsdbDataFReaderClose(&pReader->pFileReader);
|
||||||
|
|
||||||
initFilesetIterator(&pReader->status.fileIter, pReader->pReadSnap->fs.aDFileSet, pReader->order, pReader->idStr);
|
initFilesetIterator(&pReader->status.fileIter, pReader->pReadSnap->fs.aDFileSet, pReader->order, pReader->idStr);
|
||||||
|
@ -3084,18 +2768,23 @@ int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) {
|
||||||
resetDataBlockScanInfo(pReader->status.pTableMap);
|
resetDataBlockScanInfo(pReader->status.pTableMap);
|
||||||
|
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
|
SDataBlockIter* pBlockIter = &pReader->status.blockIter;
|
||||||
|
|
||||||
// no data in files, let's try buffer in memory
|
// no data in files, let's try buffer in memory
|
||||||
if (pReader->status.fileIter.numOfFiles == 0) {
|
if (pReader->status.fileIter.numOfFiles == 0) {
|
||||||
pReader->status.loadFromFile = false;
|
pReader->status.loadFromFile = false;
|
||||||
} else {
|
} else {
|
||||||
code = initForFirstBlockInFile(pReader, pBlockIter);
|
code = initForFirstBlockInFile(pReader, pBlockIter);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
tsdbError("%p reset reader failed, numOfTables:%d, query range:%" PRId64 " - %" PRId64 " in query %s",
|
||||||
|
pReader, numOfTables, pReader->window.skey, pReader->window.ekey, pReader->idStr);
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
tsdbDebug("%p reset reader, suid:%" PRIu64 ", numOfTables:%d, query range:%" PRId64 " - %" PRId64 " in query %s",
|
tsdbDebug("%p reset reader, suid:%" PRIu64 ", numOfTables:%d, query range:%" PRId64 " - %" PRId64 " in query %s",
|
||||||
pReader, pReader->suid, numOfTables, pReader->window.skey, pReader->window.ekey, pReader->idStr);
|
pReader, pReader->suid, numOfTables, pReader->window.skey, pReader->window.ekey, pReader->idStr);
|
||||||
|
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3186,7 +2875,7 @@ int64_t tsdbGetNumOfRowsInMemTable(STsdbReader* pReader) {
|
||||||
|
|
||||||
STbData* d = NULL;
|
STbData* d = NULL;
|
||||||
if (pReader->pTsdb->mem != NULL) {
|
if (pReader->pTsdb->mem != NULL) {
|
||||||
tsdbGetTbDataFromMemTable(pReader->pTsdb->mem, pReader->suid, pBlockScanInfo->uid, &d);
|
tsdbGetTbDataFromMemTable(pReader->pReadSnap->pMem, pReader->suid, pBlockScanInfo->uid, &d);
|
||||||
if (d != NULL) {
|
if (d != NULL) {
|
||||||
rows += tsdbGetNRowsInTbData(d);
|
rows += tsdbGetNRowsInTbData(d);
|
||||||
}
|
}
|
||||||
|
@ -3194,7 +2883,7 @@ int64_t tsdbGetNumOfRowsInMemTable(STsdbReader* pReader) {
|
||||||
|
|
||||||
STbData* di = NULL;
|
STbData* di = NULL;
|
||||||
if (pReader->pTsdb->imem != NULL) {
|
if (pReader->pTsdb->imem != NULL) {
|
||||||
tsdbGetTbDataFromMemTable(pReader->pTsdb->imem, pReader->suid, pBlockScanInfo->uid, &di);
|
tsdbGetTbDataFromMemTable(pReader->pReadSnap->pIMem, pReader->suid, pBlockScanInfo->uid, &di);
|
||||||
if (di != NULL) {
|
if (di != NULL) {
|
||||||
rows += tsdbGetNRowsInTbData(di);
|
rows += tsdbGetNRowsInTbData(di);
|
||||||
}
|
}
|
||||||
|
|
|
@ -82,8 +82,6 @@ size_t getResultRowSize(struct SqlFunctionCtx* pCtx, int32_t numOfOutput);
|
||||||
void initResultRowInfo(SResultRowInfo* pResultRowInfo);
|
void initResultRowInfo(SResultRowInfo* pResultRowInfo);
|
||||||
void cleanupResultRowInfo(SResultRowInfo* pResultRowInfo);
|
void cleanupResultRowInfo(SResultRowInfo* pResultRowInfo);
|
||||||
|
|
||||||
void closeAllResultRows(SResultRowInfo* pResultRowInfo);
|
|
||||||
|
|
||||||
void initResultRow(SResultRow* pResultRow);
|
void initResultRow(SResultRow* pResultRow);
|
||||||
void closeResultRow(SResultRow* pResultRow);
|
void closeResultRow(SResultRow* pResultRow);
|
||||||
bool isResultRowClosed(SResultRow* pResultRow);
|
bool isResultRowClosed(SResultRow* pResultRow);
|
||||||
|
|
|
@ -108,7 +108,6 @@ typedef struct STaskCostInfo {
|
||||||
SFileBlockLoadRecorder* pRecoder;
|
SFileBlockLoadRecorder* pRecoder;
|
||||||
uint64_t elapsedTime;
|
uint64_t elapsedTime;
|
||||||
|
|
||||||
uint64_t firstStageMergeTime;
|
|
||||||
uint64_t winInfoSize;
|
uint64_t winInfoSize;
|
||||||
uint64_t tableInfoSize;
|
uint64_t tableInfoSize;
|
||||||
uint64_t hashSize;
|
uint64_t hashSize;
|
||||||
|
@ -351,9 +350,7 @@ typedef struct STableMergeScanInfo {
|
||||||
SArray* pColMatchInfo;
|
SArray* pColMatchInfo;
|
||||||
int32_t numOfOutput;
|
int32_t numOfOutput;
|
||||||
|
|
||||||
SExprInfo* pPseudoExpr;
|
SExprSupp pseudoSup;
|
||||||
int32_t numOfPseudoExpr;
|
|
||||||
SqlFunctionCtx* pPseudoCtx;
|
|
||||||
|
|
||||||
SQueryTableDataCond cond;
|
SQueryTableDataCond cond;
|
||||||
int32_t scanFlag; // table scan flag to denote if it is a repeat/reverse/main scan
|
int32_t scanFlag; // table scan flag to denote if it is a repeat/reverse/main scan
|
||||||
|
@ -592,6 +589,7 @@ typedef struct SProjectOperatorInfo {
|
||||||
SLimitInfo limitInfo;
|
SLimitInfo limitInfo;
|
||||||
bool mergeDataBlocks;
|
bool mergeDataBlocks;
|
||||||
SSDataBlock* pFinalRes;
|
SSDataBlock* pFinalRes;
|
||||||
|
SNode* pCondition;
|
||||||
} SProjectOperatorInfo;
|
} SProjectOperatorInfo;
|
||||||
|
|
||||||
typedef struct SIndefOperatorInfo {
|
typedef struct SIndefOperatorInfo {
|
||||||
|
|
|
@ -43,10 +43,6 @@ void cleanupResultRowInfo(SResultRowInfo* pResultRowInfo) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void closeAllResultRows(SResultRowInfo* pResultRowInfo) {
|
|
||||||
// do nothing
|
|
||||||
}
|
|
||||||
|
|
||||||
bool isResultRowClosed(SResultRow* pRow) { return (pRow->closed == true); }
|
bool isResultRowClosed(SResultRow* pRow) { return (pRow->closed == true); }
|
||||||
|
|
||||||
void closeResultRow(SResultRow* pResultRow) { pResultRow->closed = true; }
|
void closeResultRow(SResultRow* pResultRow) { pResultRow->closed = true; }
|
||||||
|
@ -160,11 +156,13 @@ int32_t getNumOfTotalRes(SGroupResInfo* pGroupResInfo) {
|
||||||
|
|
||||||
SArray* createSortInfo(SNodeList* pNodeList) {
|
SArray* createSortInfo(SNodeList* pNodeList) {
|
||||||
size_t numOfCols = 0;
|
size_t numOfCols = 0;
|
||||||
|
|
||||||
if (pNodeList != NULL) {
|
if (pNodeList != NULL) {
|
||||||
numOfCols = LIST_LENGTH(pNodeList);
|
numOfCols = LIST_LENGTH(pNodeList);
|
||||||
} else {
|
} else {
|
||||||
numOfCols = 0;
|
numOfCols = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
SArray* pList = taosArrayInit(numOfCols, sizeof(SBlockOrderInfo));
|
SArray* pList = taosArrayInit(numOfCols, sizeof(SBlockOrderInfo));
|
||||||
if (pList == NULL) {
|
if (pList == NULL) {
|
||||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
@ -196,10 +194,6 @@ SSDataBlock* createResDataBlock(SDataBlockDescNode* pNode) {
|
||||||
|
|
||||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||||
SSlotDescNode* pDescNode = (SSlotDescNode*)nodesListGetNode(pNode->pSlots, i);
|
SSlotDescNode* pDescNode = (SSlotDescNode*)nodesListGetNode(pNode->pSlots, i);
|
||||||
/*if (!pDescNode->output) { // todo disable it temporarily*/
|
|
||||||
/*continue;*/
|
|
||||||
/*}*/
|
|
||||||
|
|
||||||
SColumnInfoData idata =
|
SColumnInfoData idata =
|
||||||
createColumnInfoData(pDescNode->dataType.type, pDescNode->dataType.bytes, pDescNode->slotId);
|
createColumnInfoData(pDescNode->dataType.type, pDescNode->dataType.bytes, pDescNode->slotId);
|
||||||
idata.info.scale = pDescNode->dataType.scale;
|
idata.info.scale = pDescNode->dataType.scale;
|
||||||
|
@ -701,9 +695,6 @@ static int32_t setSelectValueColumnInfo(SqlFunctionCtx* pCtx, int32_t numOfOutpu
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef BUF_PAGE_DEBUG
|
|
||||||
qDebug("page_setSelect num:%d", num);
|
|
||||||
#endif
|
|
||||||
if (p != NULL) {
|
if (p != NULL) {
|
||||||
p->subsidiaries.pCtx = pValCtx;
|
p->subsidiaries.pCtx = pValCtx;
|
||||||
p->subsidiaries.num = num;
|
p->subsidiaries.num = num;
|
||||||
|
@ -852,7 +843,7 @@ int32_t initQueryTableDataCond(SQueryTableDataCond* pCond, const STableScanPhysi
|
||||||
// TODO: get it from stable scan node
|
// TODO: get it from stable scan node
|
||||||
pCond->twindows = pTableScanNode->scanRange;
|
pCond->twindows = pTableScanNode->scanRange;
|
||||||
pCond->suid = pTableScanNode->scan.suid;
|
pCond->suid = pTableScanNode->scan.suid;
|
||||||
pCond->type = BLOCK_LOAD_OFFSET_ORDER;
|
pCond->type = TIMEWINDOW_RANGE_CONTAINED;
|
||||||
pCond->startVersion = -1;
|
pCond->startVersion = -1;
|
||||||
pCond->endVersion = -1;
|
pCond->endVersion = -1;
|
||||||
// pCond->type = pTableScanNode->scanFlag;
|
// pCond->type = pTableScanNode->scanFlag;
|
||||||
|
@ -947,6 +938,7 @@ STimeWindow getFirstQualifiedTimeWindow(int64_t ts, STimeWindow* pWindow, SInter
|
||||||
}
|
}
|
||||||
|
|
||||||
// get the correct time window according to the handled timestamp
|
// get the correct time window according to the handled timestamp
|
||||||
|
// todo refactor
|
||||||
STimeWindow getActiveTimeWindow(SDiskbasedBuf* pBuf, SResultRowInfo* pResultRowInfo, int64_t ts, SInterval* pInterval,
|
STimeWindow getActiveTimeWindow(SDiskbasedBuf* pBuf, SResultRowInfo* pResultRowInfo, int64_t ts, SInterval* pInterval,
|
||||||
int32_t order) {
|
int32_t order) {
|
||||||
STimeWindow w = {0};
|
STimeWindow w = {0};
|
||||||
|
|
|
@ -1665,9 +1665,6 @@ void queryCostStatis(SExecTaskInfo* pTaskInfo) {
|
||||||
// hashSize += taosHashGetMemSize(pRuntimeEnv->tableqinfoGroupInfo.map);
|
// hashSize += taosHashGetMemSize(pRuntimeEnv->tableqinfoGroupInfo.map);
|
||||||
// pSummary->hashSize = hashSize;
|
// pSummary->hashSize = hashSize;
|
||||||
|
|
||||||
// add the merge time
|
|
||||||
pSummary->elapsedTime += pSummary->firstStageMergeTime;
|
|
||||||
|
|
||||||
// SResultRowPool* p = pTaskInfo->pool;
|
// SResultRowPool* p = pTaskInfo->pool;
|
||||||
// if (p != NULL) {
|
// if (p != NULL) {
|
||||||
// pSummary->winInfoSize = getResultRowPoolMemSize(p);
|
// pSummary->winInfoSize = getResultRowPoolMemSize(p);
|
||||||
|
@ -1676,17 +1673,16 @@ void queryCostStatis(SExecTaskInfo* pTaskInfo) {
|
||||||
// pSummary->winInfoSize = 0;
|
// pSummary->winInfoSize = 0;
|
||||||
// pSummary->numOfTimeWindows = 0;
|
// pSummary->numOfTimeWindows = 0;
|
||||||
// }
|
// }
|
||||||
//
|
|
||||||
// calculateOperatorProfResults(pQInfo);
|
|
||||||
|
|
||||||
SFileBlockLoadRecorder* pRecorder = pSummary->pRecoder;
|
SFileBlockLoadRecorder* pRecorder = pSummary->pRecoder;
|
||||||
if (pSummary->pRecoder != NULL) {
|
if (pSummary->pRecoder != NULL) {
|
||||||
qDebug("%s :cost summary: elapsed time:%" PRId64 " us, first merge:%" PRId64
|
qDebug(
|
||||||
" us, total blocks:%d, "
|
"%s :cost summary: elapsed time:%.2f ms, total blocks:%d, load block SMA:%d, load data block:%d, total rows:%"
|
||||||
"load block statis:%d, load data block:%d, total rows:%" PRId64 ", check rows:%" PRId64,
|
PRId64 ", check rows:%" PRId64, GET_TASKID(pTaskInfo), pSummary->elapsedTime / 1000.0,
|
||||||
GET_TASKID(pTaskInfo), pSummary->elapsedTime, pSummary->firstStageMergeTime, pRecorder->totalBlocks,
|
pRecorder->totalBlocks, pRecorder->loadBlockStatis, pRecorder->loadBlocks, pRecorder->totalRows,
|
||||||
pRecorder->loadBlockStatis, pRecorder->loadBlocks, pRecorder->totalRows, pRecorder->totalCheckedRows);
|
pRecorder->totalCheckedRows);
|
||||||
}
|
}
|
||||||
|
|
||||||
// qDebug("QInfo:0x%"PRIx64" :cost summary: winResPool size:%.2f Kb, numOfWin:%"PRId64", tableInfoSize:%.2f Kb,
|
// qDebug("QInfo:0x%"PRIx64" :cost summary: winResPool size:%.2f Kb, numOfWin:%"PRId64", tableInfoSize:%.2f Kb,
|
||||||
// hashTable:%.2f Kb", pQInfo->qId, pSummary->winInfoSize/1024.0,
|
// hashTable:%.2f Kb", pQInfo->qId, pSummary->winInfoSize/1024.0,
|
||||||
// pSummary->numOfTimeWindows, pSummary->tableInfoSize/1024.0, pSummary->hashSize/1024.0);
|
// pSummary->numOfTimeWindows, pSummary->tableInfoSize/1024.0, pSummary->hashSize/1024.0);
|
||||||
|
@ -3036,7 +3032,6 @@ static int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
closeAllResultRows(&pAggInfo->binfo.resultRowInfo);
|
|
||||||
initGroupedResultInfo(&pAggInfo->groupResInfo, pAggInfo->aggSup.pResultRowHashTable, 0);
|
initGroupedResultInfo(&pAggInfo->groupResInfo, pAggInfo->aggSup.pResultRowHashTable, 0);
|
||||||
OPTR_SET_OPENED(pOperator);
|
OPTR_SET_OPENED(pOperator);
|
||||||
|
|
||||||
|
@ -3334,6 +3329,7 @@ static SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
|
||||||
if (pLimitInfo->currentGroupId == 0 ||
|
if (pLimitInfo->currentGroupId == 0 ||
|
||||||
pLimitInfo->currentGroupId == pBlock->info.groupId) { // it is the first group
|
pLimitInfo->currentGroupId == pBlock->info.groupId) { // it is the first group
|
||||||
pLimitInfo->currentGroupId = pBlock->info.groupId;
|
pLimitInfo->currentGroupId = pBlock->info.groupId;
|
||||||
|
ASSERT(pTaskInfo->execModel != OPTR_EXEC_MODEL_STREAM);
|
||||||
continue;
|
continue;
|
||||||
} else if (pLimitInfo->currentGroupId != pBlock->info.groupId) {
|
} else if (pLimitInfo->currentGroupId != pBlock->info.groupId) {
|
||||||
// now it is the data from a new group
|
// now it is the data from a new group
|
||||||
|
@ -3342,6 +3338,7 @@ static SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
|
||||||
|
|
||||||
// ignore data block in current group
|
// ignore data block in current group
|
||||||
if (pLimitInfo->remainGroupOffset > 0) {
|
if (pLimitInfo->remainGroupOffset > 0) {
|
||||||
|
ASSERT(pTaskInfo->execModel != OPTR_EXEC_MODEL_STREAM);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3386,10 +3383,12 @@ static SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
|
||||||
if (pLimitInfo->remainOffset >= pInfo->pRes->info.rows) {
|
if (pLimitInfo->remainOffset >= pInfo->pRes->info.rows) {
|
||||||
pLimitInfo->remainOffset -= pInfo->pRes->info.rows;
|
pLimitInfo->remainOffset -= pInfo->pRes->info.rows;
|
||||||
blockDataCleanup(pInfo->pRes);
|
blockDataCleanup(pInfo->pRes);
|
||||||
|
ASSERT(pTaskInfo->execModel != OPTR_EXEC_MODEL_STREAM);
|
||||||
continue;
|
continue;
|
||||||
} else if (pLimitInfo->remainOffset < pInfo->pRes->info.rows && pLimitInfo->remainOffset > 0) {
|
} else if (pLimitInfo->remainOffset < pInfo->pRes->info.rows && pLimitInfo->remainOffset > 0) {
|
||||||
blockDataTrimFirstNRows(pInfo->pRes, pLimitInfo->remainOffset);
|
blockDataTrimFirstNRows(pInfo->pRes, pLimitInfo->remainOffset);
|
||||||
pLimitInfo->remainOffset = 0;
|
pLimitInfo->remainOffset = 0;
|
||||||
|
ASSERT(pTaskInfo->execModel != OPTR_EXEC_MODEL_STREAM);
|
||||||
}
|
}
|
||||||
|
|
||||||
// check for the limitation in each group
|
// check for the limitation in each group
|
||||||
|
@ -3397,6 +3396,7 @@ static SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
|
||||||
pLimitInfo->numOfOutputRows + pInfo->pRes->info.rows >= pLimitInfo->limit.limit) {
|
pLimitInfo->numOfOutputRows + pInfo->pRes->info.rows >= pLimitInfo->limit.limit) {
|
||||||
int32_t keepRows = (int32_t)(pLimitInfo->limit.limit - pLimitInfo->numOfOutputRows);
|
int32_t keepRows = (int32_t)(pLimitInfo->limit.limit - pLimitInfo->numOfOutputRows);
|
||||||
blockDataKeepFirstNRows(pInfo->pRes, keepRows);
|
blockDataKeepFirstNRows(pInfo->pRes, keepRows);
|
||||||
|
ASSERT(pTaskInfo->execModel != OPTR_EXEC_MODEL_STREAM);
|
||||||
if (pLimitInfo->slimit.limit > 0 && pLimitInfo->slimit.limit <= pLimitInfo->numOfOutputGroups) {
|
if (pLimitInfo->slimit.limit > 0 && pLimitInfo->slimit.limit <= pLimitInfo->numOfOutputGroups) {
|
||||||
pOperator->status = OP_EXEC_DONE;
|
pOperator->status = OP_EXEC_DONE;
|
||||||
}
|
}
|
||||||
|
@ -3406,27 +3406,32 @@ static SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
// no results generated
|
if (pProjectInfo->mergeDataBlocks && pTaskInfo->execModel != OPTR_EXEC_MODEL_STREAM) {
|
||||||
if (pInfo->pRes->info.rows == 0 || (!pProjectInfo->mergeDataBlocks)) {
|
if (pRes->info.rows > 0) {
|
||||||
break;
|
pFinalRes->info.groupId = pRes->info.groupId;
|
||||||
}
|
pFinalRes->info.version = pRes->info.version;
|
||||||
|
|
||||||
if (pProjectInfo->mergeDataBlocks) {
|
|
||||||
pFinalRes->info.groupId = pInfo->pRes->info.groupId;
|
|
||||||
pFinalRes->info.version = pInfo->pRes->info.version;
|
|
||||||
|
|
||||||
// continue merge data, ignore the group id
|
// continue merge data, ignore the group id
|
||||||
blockDataMerge(pFinalRes, pInfo->pRes);
|
blockDataMerge(pFinalRes, pRes);
|
||||||
|
if (pFinalRes->info.rows + pRes->info.rows <= pOperator->resultInfo.threshold) {
|
||||||
if (pFinalRes->info.rows + pInfo->pRes->info.rows <= pOperator->resultInfo.threshold) {
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// do apply filter
|
// do apply filter
|
||||||
SSDataBlock* p = pProjectInfo->mergeDataBlocks ? pFinalRes : pRes;
|
doFilter(pProjectInfo->pFilterNode, pFinalRes, NULL);
|
||||||
doFilter(pProjectInfo->pFilterNode, p, NULL);
|
if (pFinalRes->info.rows > 0 || pRes->info.rows == 0) {
|
||||||
if (p->info.rows > 0) {
|
break;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// do apply filter
|
||||||
|
if (pRes->info.rows > 0) {
|
||||||
|
doFilter(pProjectInfo->pFilterNode, pRes, NULL);
|
||||||
|
if (pRes->info.rows == 0) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3891,7 +3896,6 @@ SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhys
|
||||||
|
|
||||||
pInfo->binfo.pRes = pResBlock;
|
pInfo->binfo.pRes = pResBlock;
|
||||||
pInfo->pFinalRes = createOneDataBlock(pResBlock, false);
|
pInfo->pFinalRes = createOneDataBlock(pResBlock, false);
|
||||||
|
|
||||||
pInfo->pFilterNode = pProjPhyNode->node.pConditions;
|
pInfo->pFilterNode = pProjPhyNode->node.pConditions;
|
||||||
pInfo->mergeDataBlocks = pProjPhyNode->mergeDataBlock;
|
pInfo->mergeDataBlocks = pProjPhyNode->mergeDataBlock;
|
||||||
|
|
||||||
|
@ -4422,7 +4426,7 @@ static int32_t initTableblockDistQueryCond(uint64_t uid, SQueryTableDataCond* pC
|
||||||
|
|
||||||
pCond->twindows = (STimeWindow){.skey = INT64_MIN, .ekey = INT64_MAX};
|
pCond->twindows = (STimeWindow){.skey = INT64_MIN, .ekey = INT64_MAX};
|
||||||
pCond->suid = uid;
|
pCond->suid = uid;
|
||||||
pCond->type = BLOCK_LOAD_OFFSET_ORDER;
|
pCond->type = TIMEWINDOW_RANGE_CONTAINED;
|
||||||
pCond->startVersion = -1;
|
pCond->startVersion = -1;
|
||||||
pCond->endVersion = -1;
|
pCond->endVersion = -1;
|
||||||
|
|
||||||
|
|
|
@ -2934,6 +2934,7 @@ void destroyTableMergeScanOperatorInfo(void* param, int32_t numOfOutput) {
|
||||||
pTableScanInfo->pSortInputBlock = blockDataDestroy(pTableScanInfo->pSortInputBlock);
|
pTableScanInfo->pSortInputBlock = blockDataDestroy(pTableScanInfo->pSortInputBlock);
|
||||||
|
|
||||||
taosArrayDestroy(pTableScanInfo->pSortInfo);
|
taosArrayDestroy(pTableScanInfo->pSortInfo);
|
||||||
|
cleanupExprSupp(&pTableScanInfo->pseudoSup);
|
||||||
|
|
||||||
taosMemoryFreeClear(pTableScanInfo->rowEntryInfoOffset);
|
taosMemoryFreeClear(pTableScanInfo->rowEntryInfoOffset);
|
||||||
taosMemoryFreeClear(param);
|
taosMemoryFreeClear(param);
|
||||||
|
@ -2992,8 +2993,9 @@ SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanN
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pTableScanNode->scan.pScanPseudoCols != NULL) {
|
if (pTableScanNode->scan.pScanPseudoCols != NULL) {
|
||||||
pInfo->pPseudoExpr = createExprInfo(pTableScanNode->scan.pScanPseudoCols, NULL, &pInfo->numOfPseudoExpr);
|
SExprSupp* pSup = &pInfo->pseudoSup;
|
||||||
pInfo->pPseudoCtx = createSqlFunctionCtx(pInfo->pPseudoExpr, pInfo->numOfPseudoExpr, &pInfo->rowEntryInfoOffset);
|
pSup->pExprInfo = createExprInfo(pTableScanNode->scan.pScanPseudoCols, NULL, &pSup->numOfExprs);
|
||||||
|
pSup->pCtx = createSqlFunctionCtx(pSup->pExprInfo, pSup->numOfExprs, &pSup->rowEntryInfoOffset);
|
||||||
}
|
}
|
||||||
|
|
||||||
pInfo->scanInfo = (SScanInfo){.numOfAsc = pTableScanNode->scanSeq[0], .numOfDesc = pTableScanNode->scanSeq[1]};
|
pInfo->scanInfo = (SScanInfo){.numOfAsc = pTableScanNode->scanSeq[0], .numOfDesc = pTableScanNode->scanSeq[1]};
|
||||||
|
|
|
@ -1094,7 +1094,6 @@ static int32_t doOpenIntervalAgg(SOperatorInfo* pOperator) {
|
||||||
hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, scanFlag, NULL);
|
hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, scanFlag, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
closeAllResultRows(&pInfo->binfo.resultRowInfo);
|
|
||||||
initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, pInfo->order);
|
initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, pInfo->order);
|
||||||
OPTR_SET_OPENED(pOperator);
|
OPTR_SET_OPENED(pOperator);
|
||||||
|
|
||||||
|
@ -1250,7 +1249,6 @@ static SSDataBlock* doStateWindowAgg(SOperatorInfo* pOperator) {
|
||||||
pOperator->cost.openCost = (taosGetTimestampUs() - st) / 1000.0;
|
pOperator->cost.openCost = (taosGetTimestampUs() - st) / 1000.0;
|
||||||
|
|
||||||
pOperator->status = OP_RES_TO_RETURN;
|
pOperator->status = OP_RES_TO_RETURN;
|
||||||
closeAllResultRows(&pBInfo->resultRowInfo);
|
|
||||||
|
|
||||||
initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, TSDB_ORDER_ASC);
|
initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, TSDB_ORDER_ASC);
|
||||||
blockDataEnsureCapacity(pBInfo->pRes, pOperator->resultInfo.capacity);
|
blockDataEnsureCapacity(pBInfo->pRes, pOperator->resultInfo.capacity);
|
||||||
|
@ -2045,7 +2043,6 @@ static SSDataBlock* doSessionWindowAgg(SOperatorInfo* pOperator) {
|
||||||
|
|
||||||
// restore the value
|
// restore the value
|
||||||
pOperator->status = OP_RES_TO_RETURN;
|
pOperator->status = OP_RES_TO_RETURN;
|
||||||
closeAllResultRows(&pBInfo->resultRowInfo);
|
|
||||||
|
|
||||||
initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, TSDB_ORDER_ASC);
|
initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, TSDB_ORDER_ASC);
|
||||||
blockDataEnsureCapacity(pBInfo->pRes, pOperator->resultInfo.capacity);
|
blockDataEnsureCapacity(pBInfo->pRes, pOperator->resultInfo.capacity);
|
||||||
|
@ -2209,8 +2206,6 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
|
||||||
SSDataBlock* pResBlock = pSliceInfo->pRes;
|
SSDataBlock* pResBlock = pSliceInfo->pRes;
|
||||||
SExprSupp* pSup = &pOperator->exprSupp;
|
SExprSupp* pSup = &pOperator->exprSupp;
|
||||||
|
|
||||||
blockDataEnsureCapacity(pResBlock, pOperator->resultInfo.capacity);
|
|
||||||
|
|
||||||
// if (pOperator->status == OP_RES_TO_RETURN) {
|
// if (pOperator->status == OP_RES_TO_RETURN) {
|
||||||
// // doBuildResultDatablock(&pRuntimeEnv->groupResInfo, pRuntimeEnv, pIntervalInfo->pRes);
|
// // doBuildResultDatablock(&pRuntimeEnv->groupResInfo, pRuntimeEnv, pIntervalInfo->pRes);
|
||||||
// if (pResBlock->info.rows == 0 || !hasDataInGroupInfo(&pSliceInfo->groupResInfo)) {
|
// if (pResBlock->info.rows == 0 || !hasDataInGroupInfo(&pSliceInfo->groupResInfo)) {
|
||||||
|
|
|
@ -90,7 +90,7 @@ SNode* createValueNode(SAstCreateContext* pCxt, int32_t dataType, const SToken*
|
||||||
SNode* createDurationValueNode(SAstCreateContext* pCxt, const SToken* pLiteral);
|
SNode* createDurationValueNode(SAstCreateContext* pCxt, const SToken* pLiteral);
|
||||||
SNode* createDefaultDatabaseCondValue(SAstCreateContext* pCxt);
|
SNode* createDefaultDatabaseCondValue(SAstCreateContext* pCxt);
|
||||||
SNode* createPlaceholderValueNode(SAstCreateContext* pCxt, const SToken* pLiteral);
|
SNode* createPlaceholderValueNode(SAstCreateContext* pCxt, const SToken* pLiteral);
|
||||||
SNode* setProjectionAlias(SAstCreateContext* pCxt, SNode* pNode, const SToken* pAlias);
|
SNode* setProjectionAlias(SAstCreateContext* pCxt, SNode* pNode, SToken* pAlias);
|
||||||
SNode* createLogicConditionNode(SAstCreateContext* pCxt, ELogicConditionType type, SNode* pParam1, SNode* pParam2);
|
SNode* createLogicConditionNode(SAstCreateContext* pCxt, ELogicConditionType type, SNode* pParam1, SNode* pParam2);
|
||||||
SNode* createOperatorNode(SAstCreateContext* pCxt, EOperatorType type, SNode* pLeft, SNode* pRight);
|
SNode* createOperatorNode(SAstCreateContext* pCxt, EOperatorType type, SNode* pLeft, SNode* pRight);
|
||||||
SNode* createBetweenAnd(SAstCreateContext* pCxt, SNode* pExpr, SNode* pLeft, SNode* pRight);
|
SNode* createBetweenAnd(SAstCreateContext* pCxt, SNode* pExpr, SNode* pLeft, SNode* pRight);
|
||||||
|
|
|
@ -637,8 +637,9 @@ SNode* createInterpTimeRange(SAstCreateContext* pCxt, SNode* pStart, SNode* pEnd
|
||||||
return createBetweenAnd(pCxt, createPrimaryKeyCol(pCxt), pStart, pEnd);
|
return createBetweenAnd(pCxt, createPrimaryKeyCol(pCxt), pStart, pEnd);
|
||||||
}
|
}
|
||||||
|
|
||||||
SNode* setProjectionAlias(SAstCreateContext* pCxt, SNode* pNode, const SToken* pAlias) {
|
SNode* setProjectionAlias(SAstCreateContext* pCxt, SNode* pNode, SToken* pAlias) {
|
||||||
CHECK_PARSER_STATUS(pCxt);
|
CHECK_PARSER_STATUS(pCxt);
|
||||||
|
trimEscape(pAlias);
|
||||||
int32_t len = TMIN(sizeof(((SExprNode*)pNode)->aliasName) - 1, pAlias->n);
|
int32_t len = TMIN(sizeof(((SExprNode*)pNode)->aliasName) - 1, pAlias->n);
|
||||||
strncpy(((SExprNode*)pNode)->aliasName, pAlias->z, len);
|
strncpy(((SExprNode*)pNode)->aliasName, pAlias->z, len);
|
||||||
((SExprNode*)pNode)->aliasName[len] = '\0';
|
((SExprNode*)pNode)->aliasName[len] = '\0';
|
||||||
|
|
|
@ -66,7 +66,7 @@ int32_t qwHandleTaskComplete(QW_FPARAMS_DEF, SQWTaskCtx *ctx) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t qwExecTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *queryEnd) {
|
int32_t qwExecTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *queryStop) {
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
bool qcontinue = true;
|
bool qcontinue = true;
|
||||||
SSDataBlock *pRes = NULL;
|
SSDataBlock *pRes = NULL;
|
||||||
|
@ -104,8 +104,8 @@ int32_t qwExecTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *queryEnd) {
|
||||||
|
|
||||||
QW_ERR_RET(qwHandleTaskComplete(QW_FPARAMS(), ctx));
|
QW_ERR_RET(qwHandleTaskComplete(QW_FPARAMS(), ctx));
|
||||||
|
|
||||||
if (queryEnd) {
|
if (queryStop) {
|
||||||
*queryEnd = true;
|
*queryStop = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
break;
|
break;
|
||||||
|
@ -125,6 +125,10 @@ int32_t qwExecTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *queryEnd) {
|
||||||
QW_TASK_DLOG("data put into sink, rows:%d, continueExecTask:%d", rows, qcontinue);
|
QW_TASK_DLOG("data put into sink, rows:%d, continueExecTask:%d", rows, qcontinue);
|
||||||
|
|
||||||
if (!qcontinue) {
|
if (!qcontinue) {
|
||||||
|
if (queryStop) {
|
||||||
|
*queryStop = true;
|
||||||
|
}
|
||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -566,7 +570,7 @@ int32_t qwProcessCQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg) {
|
||||||
SQWPhaseInput input = {0};
|
SQWPhaseInput input = {0};
|
||||||
void *rsp = NULL;
|
void *rsp = NULL;
|
||||||
int32_t dataLen = 0;
|
int32_t dataLen = 0;
|
||||||
bool queryEnd = false;
|
bool queryStop = false;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
QW_ERR_JRET(qwHandlePrePhaseEvents(QW_FPARAMS(), QW_PHASE_PRE_CQUERY, &input, NULL));
|
QW_ERR_JRET(qwHandlePrePhaseEvents(QW_FPARAMS(), QW_PHASE_PRE_CQUERY, &input, NULL));
|
||||||
|
@ -576,7 +580,7 @@ int32_t qwProcessCQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg) {
|
||||||
atomic_store_8((int8_t *)&ctx->queryInQueue, 0);
|
atomic_store_8((int8_t *)&ctx->queryInQueue, 0);
|
||||||
atomic_store_8((int8_t *)&ctx->queryContinue, 0);
|
atomic_store_8((int8_t *)&ctx->queryContinue, 0);
|
||||||
|
|
||||||
QW_ERR_JRET(qwExecTask(QW_FPARAMS(), ctx, &queryEnd));
|
QW_ERR_JRET(qwExecTask(QW_FPARAMS(), ctx, &queryStop));
|
||||||
|
|
||||||
if (QW_EVENT_RECEIVED(ctx, QW_EVENT_FETCH)) {
|
if (QW_EVENT_RECEIVED(ctx, QW_EVENT_FETCH)) {
|
||||||
SOutputData sOutput = {0};
|
SOutputData sOutput = {0};
|
||||||
|
@ -627,7 +631,7 @@ int32_t qwProcessCQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg) {
|
||||||
}
|
}
|
||||||
|
|
||||||
QW_LOCK(QW_WRITE, &ctx->lock);
|
QW_LOCK(QW_WRITE, &ctx->lock);
|
||||||
if (queryEnd || code || 0 == atomic_load_8((int8_t *)&ctx->queryContinue)) {
|
if (queryStop || code || 0 == atomic_load_8((int8_t *)&ctx->queryContinue)) {
|
||||||
// Note: query is not running anymore
|
// Note: query is not running anymore
|
||||||
QW_SET_PHASE(ctx, 0);
|
QW_SET_PHASE(ctx, 0);
|
||||||
QW_UNLOCK(QW_WRITE, &ctx->lock);
|
QW_UNLOCK(QW_WRITE, &ctx->lock);
|
||||||
|
|
|
@ -1327,6 +1327,8 @@ class Task():
|
||||||
|
|
||||||
# TDengine 3.0 Error Codes:
|
# TDengine 3.0 Error Codes:
|
||||||
0x0333, # Object is creating # TODO: this really is NOT an acceptable error
|
0x0333, # Object is creating # TODO: this really is NOT an acceptable error
|
||||||
|
0x0369, # Tag already exists
|
||||||
|
0x0388, # Database not exist
|
||||||
0x03A0, # STable already exists
|
0x03A0, # STable already exists
|
||||||
0x03A1, # STable [does] not exist
|
0x03A1, # STable [does] not exist
|
||||||
0x03AA, # Tag already exists
|
0x03AA, # Tag already exists
|
||||||
|
|
|
@ -0,0 +1,250 @@
|
||||||
|
|
||||||
|
import taos
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
import socket
|
||||||
|
import os
|
||||||
|
import threading
|
||||||
|
import math
|
||||||
|
|
||||||
|
from util.log import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.dnodes import *
|
||||||
|
from util.common import *
|
||||||
|
sys.path.append("./7-tmq")
|
||||||
|
from tmqCommon import *
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
def __init__(self):
|
||||||
|
self.vgroups = 4
|
||||||
|
self.ctbNum = 100
|
||||||
|
self.rowsPerTbl = 1000
|
||||||
|
|
||||||
|
def init(self, conn, logSql):
|
||||||
|
tdLog.debug(f"start to excute {__file__}")
|
||||||
|
tdSql.init(conn.cursor(), False)
|
||||||
|
|
||||||
|
def prepareTestEnv(self):
|
||||||
|
tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
|
||||||
|
paraDict = {'dbName': 'dbt',
|
||||||
|
'dropFlag': 1,
|
||||||
|
'event': '',
|
||||||
|
'vgroups': 1,
|
||||||
|
'stbName': 'stb',
|
||||||
|
'colPrefix': 'c',
|
||||||
|
'tagPrefix': 't',
|
||||||
|
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
|
||||||
|
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
|
||||||
|
'ctbPrefix': 'ctb',
|
||||||
|
'ctbStartIdx': 0,
|
||||||
|
'ctbNum': 100,
|
||||||
|
'rowsPerTbl': 1000,
|
||||||
|
'batchNum': 100,
|
||||||
|
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
||||||
|
'pollDelay': 10,
|
||||||
|
'showMsg': 1,
|
||||||
|
'showRow': 1,
|
||||||
|
'snapshot': 1}
|
||||||
|
|
||||||
|
paraDict['vgroups'] = self.vgroups
|
||||||
|
paraDict['ctbNum'] = self.ctbNum
|
||||||
|
paraDict['rowsPerTbl'] = self.rowsPerTbl
|
||||||
|
|
||||||
|
tmqCom.initConsumerTable()
|
||||||
|
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
|
||||||
|
tdLog.info("create stb")
|
||||||
|
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
|
||||||
|
tdLog.info("create ctb")
|
||||||
|
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
|
||||||
|
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
|
||||||
|
|
||||||
|
tdLog.info("insert data")
|
||||||
|
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
|
||||||
|
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
|
||||||
|
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
|
||||||
|
|
||||||
|
tdLog.info("flush db to let data falls into the disk")
|
||||||
|
tdSql.query("flush database %s"%(paraDict['dbName']))
|
||||||
|
return
|
||||||
|
|
||||||
|
def tmqCase1(self):
|
||||||
|
tdLog.printNoPrefix("======== test case 1: ")
|
||||||
|
paraDict = {'dbName': 'dbt',
|
||||||
|
'dropFlag': 1,
|
||||||
|
'event': '',
|
||||||
|
'vgroups': 1,
|
||||||
|
'stbName': 'stb',
|
||||||
|
'colPrefix': 'c',
|
||||||
|
'tagPrefix': 't',
|
||||||
|
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
|
||||||
|
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
|
||||||
|
'ctbPrefix': 'ctb',
|
||||||
|
'ctbStartIdx': 0,
|
||||||
|
'ctbNum': 100,
|
||||||
|
'rowsPerTbl': 1000,
|
||||||
|
'batchNum': 500,
|
||||||
|
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
||||||
|
'pollDelay': 5,
|
||||||
|
'showMsg': 1,
|
||||||
|
'showRow': 1,
|
||||||
|
'snapshot': 1}
|
||||||
|
|
||||||
|
paraDict['vgroups'] = self.vgroups
|
||||||
|
paraDict['ctbNum'] = self.ctbNum
|
||||||
|
paraDict['rowsPerTbl'] = self.rowsPerTbl
|
||||||
|
|
||||||
|
topicNameList = ['topic1']
|
||||||
|
expectRowsList = []
|
||||||
|
tmqCom.initConsumerTable()
|
||||||
|
|
||||||
|
tdLog.info("create topics from stb with filter")
|
||||||
|
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
|
||||||
|
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
|
||||||
|
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
|
||||||
|
tdLog.info("create topic sql: %s"%sqlString)
|
||||||
|
tdSql.execute(sqlString)
|
||||||
|
# tdSql.query(queryString)
|
||||||
|
# expectRowsList.append(tdSql.getRows())
|
||||||
|
|
||||||
|
# init consume info, and start tmq_sim, then check consume result
|
||||||
|
tdLog.info("insert consume info to consume processor")
|
||||||
|
consumerId = 0
|
||||||
|
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2
|
||||||
|
topicList = topicNameList[0]
|
||||||
|
ifcheckdata = 1
|
||||||
|
ifManualCommit = 1
|
||||||
|
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest'
|
||||||
|
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||||
|
|
||||||
|
# after start consume, continue insert some data
|
||||||
|
paraDict['batchNum'] = 100
|
||||||
|
paraDict['startTs'] = paraDict['startTs'] + self.rowsPerTbl
|
||||||
|
pInsertThread = tmqCom.asyncInsertDataByInterlace(paraDict)
|
||||||
|
|
||||||
|
tdLog.info("start consume processor")
|
||||||
|
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
|
||||||
|
|
||||||
|
pInsertThread.join()
|
||||||
|
|
||||||
|
tdSql.query(queryString)
|
||||||
|
expectRowsList.append(tdSql.getRows())
|
||||||
|
|
||||||
|
tdLog.info("wait the consume result")
|
||||||
|
expectRows = 1
|
||||||
|
resultList = tmqCom.selectConsumeResult(expectRows)
|
||||||
|
|
||||||
|
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
|
||||||
|
if expectRowsList[0] != resultList[0]:
|
||||||
|
tdLog.exit("%d tmq consume rows error!"%consumerId)
|
||||||
|
|
||||||
|
# tmqCom.checkFileContent(consumerId, queryString)
|
||||||
|
|
||||||
|
tdSql.query("flush database %s"%(paraDict['dbName']))
|
||||||
|
|
||||||
|
for i in range(len(topicNameList)):
|
||||||
|
tmqCom.waitSubscriptionExit(tdSql,topicNameList[i])
|
||||||
|
tdSql.query("drop topic %s"%topicNameList[i])
|
||||||
|
|
||||||
|
tdLog.printNoPrefix("======== test case 1 end ...... ")
|
||||||
|
|
||||||
|
def tmqCase2(self):
|
||||||
|
tdLog.printNoPrefix("======== test case 2: ")
|
||||||
|
paraDict = {'dbName': 'dbt',
|
||||||
|
'dropFlag': 1,
|
||||||
|
'event': '',
|
||||||
|
'vgroups': 1,
|
||||||
|
'stbName': 'stb',
|
||||||
|
'colPrefix': 'c',
|
||||||
|
'tagPrefix': 't',
|
||||||
|
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
|
||||||
|
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
|
||||||
|
'ctbPrefix': 'ctb',
|
||||||
|
'ctbStartIdx': 0,
|
||||||
|
'ctbNum': 100,
|
||||||
|
'rowsPerTbl': 1000,
|
||||||
|
'batchNum': 500,
|
||||||
|
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
||||||
|
'pollDelay': 3,
|
||||||
|
'showMsg': 1,
|
||||||
|
'showRow': 1,
|
||||||
|
'snapshot': 1}
|
||||||
|
|
||||||
|
paraDict['vgroups'] = self.vgroups
|
||||||
|
paraDict['ctbNum'] = self.ctbNum
|
||||||
|
paraDict['rowsPerTbl'] = self.rowsPerTbl
|
||||||
|
|
||||||
|
topicNameList = ['topic1']
|
||||||
|
expectRowsList = []
|
||||||
|
tmqCom.initConsumerTable()
|
||||||
|
|
||||||
|
tdLog.info("create topics from stb with filter")
|
||||||
|
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
|
||||||
|
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
|
||||||
|
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
|
||||||
|
tdLog.info("create topic sql: %s"%sqlString)
|
||||||
|
tdSql.execute(sqlString)
|
||||||
|
tdSql.query(queryString)
|
||||||
|
expectRowsList.append(tdSql.getRows())
|
||||||
|
totalRowsInserted = expectRowsList[0]
|
||||||
|
|
||||||
|
# init consume info, and start tmq_sim, then check consume result
|
||||||
|
tdLog.info("insert consume info to consume processor")
|
||||||
|
consumerId = 1
|
||||||
|
expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] / 3)
|
||||||
|
topicList = topicNameList[0]
|
||||||
|
ifcheckdata = 1
|
||||||
|
ifManualCommit = 1
|
||||||
|
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest'
|
||||||
|
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||||
|
|
||||||
|
tdLog.info("start consume processor 0")
|
||||||
|
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
|
||||||
|
tdLog.info("wait the consume result")
|
||||||
|
|
||||||
|
expectRows = 1
|
||||||
|
resultList = tmqCom.selectConsumeResult(expectRows)
|
||||||
|
actConsumeRows = resultList[0]
|
||||||
|
|
||||||
|
tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(actConsumeRows, expectrowcnt, totalRowsInserted))
|
||||||
|
if not (expectrowcnt <= actConsumeRows and totalRowsInserted >= actConsumeRows):
|
||||||
|
tdLog.exit("%d tmq consume rows error!"%consumerId)
|
||||||
|
|
||||||
|
# reinit consume info, and start tmq_sim, then check consume result
|
||||||
|
tmqCom.initConsumerTable()
|
||||||
|
consumerId = 2
|
||||||
|
expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2/3)
|
||||||
|
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||||
|
|
||||||
|
tdLog.info("start consume processor 1")
|
||||||
|
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
|
||||||
|
tdLog.info("wait the consume result")
|
||||||
|
|
||||||
|
expectRows = 1
|
||||||
|
resultList = tmqCom.selectConsumeResult(expectRows)
|
||||||
|
|
||||||
|
actConsumeRows = resultList[0]
|
||||||
|
tdLog.info("act consume rows: %d, expect rows: %d, act insert rows: %d"%(actConsumeRows, expectrowcnt, totalRowsInserted))
|
||||||
|
if not ((actConsumeRows >= expectrowcnt) and (totalRowsInserted > actConsumeRows)):
|
||||||
|
tdLog.exit("%d tmq consume rows error!"%consumerId)
|
||||||
|
|
||||||
|
for i in range(len(topicNameList)):
|
||||||
|
tmqCom.waitSubscriptionExit(tdSql,topicNameList[i])
|
||||||
|
tdSql.query("drop topic %s"%topicNameList[i])
|
||||||
|
|
||||||
|
tdLog.printNoPrefix("======== test case 2 end ...... ")
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
tdSql.prepare()
|
||||||
|
self.prepareTestEnv()
|
||||||
|
self.tmqCase1()
|
||||||
|
self.tmqCase2()
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success(f"{__file__} successfully executed")
|
||||||
|
|
||||||
|
event = threading.Event()
|
||||||
|
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -17,8 +17,8 @@ from tmqCommon import *
|
||||||
|
|
||||||
class TDTestCase:
|
class TDTestCase:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.vgroups = 1
|
self.vgroups = 4
|
||||||
self.ctbNum = 100
|
self.ctbNum = 1
|
||||||
self.rowsPerTbl = 10000
|
self.rowsPerTbl = 10000
|
||||||
|
|
||||||
def init(self, conn, logSql):
|
def init(self, conn, logSql):
|
||||||
|
@ -38,9 +38,9 @@ class TDTestCase:
|
||||||
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
|
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
|
||||||
'ctbPrefix': 'ctb',
|
'ctbPrefix': 'ctb',
|
||||||
'ctbStartIdx': 0,
|
'ctbStartIdx': 0,
|
||||||
'ctbNum': 100,
|
'ctbNum': 1,
|
||||||
'rowsPerTbl': 10000,
|
'rowsPerTbl': 10000,
|
||||||
'batchNum': 3000,
|
'batchNum': 100,
|
||||||
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
||||||
'pollDelay': 10,
|
'pollDelay': 10,
|
||||||
'showMsg': 1,
|
'showMsg': 1,
|
||||||
|
@ -85,7 +85,7 @@ class TDTestCase:
|
||||||
'rowsPerTbl': 10000,
|
'rowsPerTbl': 10000,
|
||||||
'batchNum': 100,
|
'batchNum': 100,
|
||||||
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
||||||
'pollDelay': 3,
|
'pollDelay': 5,
|
||||||
'showMsg': 1,
|
'showMsg': 1,
|
||||||
'showRow': 1,
|
'showRow': 1,
|
||||||
'snapshot': 1}
|
'snapshot': 1}
|
||||||
|
@ -117,17 +117,16 @@ class TDTestCase:
|
||||||
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest'
|
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest'
|
||||||
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||||
|
|
||||||
tdLog.info("start consume processor")
|
|
||||||
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
|
|
||||||
|
|
||||||
# after start consume, continue insert some data
|
# after start consume, continue insert some data
|
||||||
paraDict['batchNum'] = 100
|
paraDict['batchNum'] = 100
|
||||||
paraDict['startTs'] = paraDict['startTs'] + self.rowsPerTbl
|
paraDict['startTs'] = paraDict['startTs'] + self.rowsPerTbl
|
||||||
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
|
pInsertThread = tmqCom.asyncInsertDataByInterlace(paraDict)
|
||||||
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
|
|
||||||
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
|
tdLog.info("start consume processor")
|
||||||
|
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
|
||||||
|
|
||||||
|
pInsertThread.join()
|
||||||
|
|
||||||
#
|
|
||||||
tdSql.query(queryString)
|
tdSql.query(queryString)
|
||||||
expectRowsList.append(tdSql.getRows())
|
expectRowsList.append(tdSql.getRows())
|
||||||
|
|
||||||
|
@ -135,15 +134,16 @@ class TDTestCase:
|
||||||
expectRows = 1
|
expectRows = 1
|
||||||
resultList = tmqCom.selectConsumeResult(expectRows)
|
resultList = tmqCom.selectConsumeResult(expectRows)
|
||||||
|
|
||||||
|
|
||||||
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
|
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
|
||||||
if expectRowsList[0] != resultList[0]:
|
if expectRowsList[0] != resultList[0]:
|
||||||
tdLog.exit("%d tmq consume rows error!"%consumerId)
|
tdLog.exit("%d tmq consume rows error!"%consumerId)
|
||||||
|
|
||||||
tmqCom.checkFileContent(consumerId, queryString)
|
tmqCom.checkFileContent(consumerId, queryString)
|
||||||
|
|
||||||
time.sleep(10)
|
tdSql.query("flush database %s"%(paraDict['dbName']))
|
||||||
|
|
||||||
for i in range(len(topicNameList)):
|
for i in range(len(topicNameList)):
|
||||||
|
tmqCom.waitSubscriptionExit(tdSql,topicNameList[i])
|
||||||
tdSql.query("drop topic %s"%topicNameList[i])
|
tdSql.query("drop topic %s"%topicNameList[i])
|
||||||
|
|
||||||
tdLog.printNoPrefix("======== test case 1 end ...... ")
|
tdLog.printNoPrefix("======== test case 1 end ...... ")
|
||||||
|
@ -204,13 +204,12 @@ class TDTestCase:
|
||||||
|
|
||||||
expectRows = 1
|
expectRows = 1
|
||||||
resultList = tmqCom.selectConsumeResult(expectRows)
|
resultList = tmqCom.selectConsumeResult(expectRows)
|
||||||
|
actConsumeRows = resultList[0]
|
||||||
|
|
||||||
if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]):
|
tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(actConsumeRows, expectrowcnt, totalRowsInserted))
|
||||||
tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted))
|
if not (expectrowcnt <= actConsumeRows and totalRowsInserted >= actConsumeRows):
|
||||||
tdLog.exit("%d tmq consume rows error!"%consumerId)
|
tdLog.exit("%d tmq consume rows error!"%consumerId)
|
||||||
|
|
||||||
firstConsumeRows = resultList[0]
|
|
||||||
|
|
||||||
# reinit consume info, and start tmq_sim, then check consume result
|
# reinit consume info, and start tmq_sim, then check consume result
|
||||||
tmqCom.initConsumerTable()
|
tmqCom.initConsumerTable()
|
||||||
consumerId = 2
|
consumerId = 2
|
||||||
|
@ -224,15 +223,13 @@ class TDTestCase:
|
||||||
expectRows = 1
|
expectRows = 1
|
||||||
resultList = tmqCom.selectConsumeResult(expectRows)
|
resultList = tmqCom.selectConsumeResult(expectRows)
|
||||||
|
|
||||||
actConsumeTotalRows = firstConsumeRows + resultList[0]
|
actConsumeRows = resultList[0]
|
||||||
|
tdLog.info("act consume rows: %d, expect rows: %d, act insert rows: %d"%(actConsumeRows, expectrowcnt, totalRowsInserted))
|
||||||
if not (expectrowcnt >= resultList[0] and totalRowsInserted == actConsumeTotalRows):
|
if not ((actConsumeRows >= expectrowcnt) and (totalRowsInserted > actConsumeRows)):
|
||||||
tdLog.info("act consume rows, first: %d, second: %d "%(firstConsumeRows, resultList[0]))
|
|
||||||
tdLog.info("and sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted))
|
|
||||||
tdLog.exit("%d tmq consume rows error!"%consumerId)
|
tdLog.exit("%d tmq consume rows error!"%consumerId)
|
||||||
|
|
||||||
time.sleep(10)
|
|
||||||
for i in range(len(topicNameList)):
|
for i in range(len(topicNameList)):
|
||||||
|
tmqCom.waitSubscriptionExit(tdSql,topicNameList[i])
|
||||||
tdSql.query("drop topic %s"%topicNameList[i])
|
tdSql.query("drop topic %s"%topicNameList[i])
|
||||||
|
|
||||||
tdLog.printNoPrefix("======== test case 2 end ...... ")
|
tdLog.printNoPrefix("======== test case 2 end ...... ")
|
||||||
|
@ -241,7 +238,7 @@ class TDTestCase:
|
||||||
tdSql.prepare()
|
tdSql.prepare()
|
||||||
self.prepareTestEnv()
|
self.prepareTestEnv()
|
||||||
self.tmqCase1()
|
self.tmqCase1()
|
||||||
# self.tmqCase2()
|
self.tmqCase2()
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
tdSql.close()
|
tdSql.close()
|
||||||
|
|
|
@ -151,41 +151,6 @@ class TDTestCase:
|
||||||
if not (totalConsumeRows == totalRowsFromQury):
|
if not (totalConsumeRows == totalRowsFromQury):
|
||||||
tdLog.exit("tmq consume rows error!")
|
tdLog.exit("tmq consume rows error!")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# tdLog.info("****************************************************************************")
|
|
||||||
# tmqCom.initConsumerTable()
|
|
||||||
# consumerId = 1
|
|
||||||
# expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2
|
|
||||||
# topicList = topicFromStb1
|
|
||||||
# ifcheckdata = 0
|
|
||||||
# ifManualCommit = 0
|
|
||||||
# keyList = 'group.id:cgrp2,\
|
|
||||||
# enable.auto.commit:true,\
|
|
||||||
# auto.commit.interval.ms:3000,\
|
|
||||||
# auto.offset.reset:earliest'
|
|
||||||
# tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
|
||||||
|
|
||||||
# tdLog.info("start consume processor")
|
|
||||||
# tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
|
|
||||||
|
|
||||||
# expectRows = 1
|
|
||||||
# resultList = tmqCom.selectConsumeResult(expectRows)
|
|
||||||
# totalConsumeRows = 0
|
|
||||||
# for i in range(expectRows):
|
|
||||||
# totalConsumeRows += resultList[i]
|
|
||||||
|
|
||||||
# tdSql.query(queryString)
|
|
||||||
# totalRowsFromQury = tdSql.getRows()
|
|
||||||
|
|
||||||
# tdLog.info("act consume rows: %d, act query rows: %d"%(totalConsumeRows, totalRowsFromQury))
|
|
||||||
# if not (totalConsumeRows == totalRowsFromQury):
|
|
||||||
# tdLog.exit("tmq consume rows error!")
|
|
||||||
|
|
||||||
|
|
||||||
# tdLog.info("****************************************************************************")
|
|
||||||
|
|
||||||
tmqCom.waitSubscriptionExit(tdSql, topicFromStb1)
|
tmqCom.waitSubscriptionExit(tdSql, topicFromStb1)
|
||||||
tdSql.query("drop topic %s"%topicFromStb1)
|
tdSql.query("drop topic %s"%topicFromStb1)
|
||||||
|
|
||||||
|
@ -259,7 +224,7 @@ class TDTestCase:
|
||||||
tdLog.info("create some new child table and insert data ")
|
tdLog.info("create some new child table and insert data ")
|
||||||
paraDict["batchNum"] = 100
|
paraDict["batchNum"] = 100
|
||||||
paraDict["ctbPrefix"] = 'newCtb'
|
paraDict["ctbPrefix"] = 'newCtb'
|
||||||
# tmqCom.insert_data_with_autoCreateTbl(tdSql,paraDict["dbName"],paraDict["stbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"])
|
tmqCom.insert_data_with_autoCreateTbl(tdSql,paraDict["dbName"],paraDict["stbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"])
|
||||||
|
|
||||||
tdLog.info("insert process end, and start to check consume result")
|
tdLog.info("insert process end, and start to check consume result")
|
||||||
expectRows = 1
|
expectRows = 1
|
||||||
|
|
|
@ -0,0 +1,225 @@
|
||||||
|
|
||||||
|
import taos
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
import socket
|
||||||
|
import os
|
||||||
|
import threading
|
||||||
|
from enum import Enum
|
||||||
|
|
||||||
|
from util.log import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.dnodes import *
|
||||||
|
sys.path.append("./7-tmq")
|
||||||
|
from tmqCommon import *
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
def __init__(self):
|
||||||
|
self.snapshot = 0
|
||||||
|
self.vgroups = 4
|
||||||
|
self.ctbNum = 1000
|
||||||
|
self.rowsPerTbl = 10
|
||||||
|
|
||||||
|
def init(self, conn, logSql):
|
||||||
|
tdLog.debug(f"start to excute {__file__}")
|
||||||
|
tdSql.init(conn.cursor(), False)
|
||||||
|
|
||||||
|
# drop some ntbs
|
||||||
|
def tmqCase1(self):
|
||||||
|
tdLog.printNoPrefix("======== test case 1: ")
|
||||||
|
paraDict = {'dbName': 'dbt',
|
||||||
|
'dropFlag': 1,
|
||||||
|
'event': '',
|
||||||
|
'vgroups': 4,
|
||||||
|
'stbName': 'stb',
|
||||||
|
'colPrefix': 'c',
|
||||||
|
'tagPrefix': 't',
|
||||||
|
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'TIMESTAMP', 'count':1}],
|
||||||
|
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
|
||||||
|
'ctbPrefix': 'ntb',
|
||||||
|
'ctbStartIdx': 0,
|
||||||
|
'ctbNum': 1000,
|
||||||
|
'rowsPerTbl': 100,
|
||||||
|
'batchNum': 100,
|
||||||
|
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
||||||
|
'endTs': 0,
|
||||||
|
'pollDelay': 5,
|
||||||
|
'showMsg': 1,
|
||||||
|
'showRow': 1,
|
||||||
|
'snapshot': 0}
|
||||||
|
paraDict['snapshot'] = self.snapshot
|
||||||
|
paraDict['vgroups'] = self.vgroups
|
||||||
|
paraDict['ctbNum'] = self.ctbNum
|
||||||
|
paraDict['rowsPerTbl'] = self.rowsPerTbl
|
||||||
|
|
||||||
|
tmqCom.initConsumerTable()
|
||||||
|
tdLog.info("start create database....")
|
||||||
|
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
|
||||||
|
tdLog.info("start create normal tables....")
|
||||||
|
tmqCom.create_ntable(tsql=tdSql, dbname=paraDict["dbName"], tbname_prefix=paraDict["ctbPrefix"], tbname_index_start_num = 1, column_elm_list=paraDict["colSchema"], colPrefix='c', tblNum=paraDict["ctbNum"])
|
||||||
|
tdLog.info("start insert data into normal tables....")
|
||||||
|
tmqCom.insert_rows_into_ntbl(tsql=tdSql, dbname=paraDict["dbName"], tbname_prefix=paraDict["ctbPrefix"], tbname_index_start_num = 1, column_ele_list=paraDict["colSchema"],startTs=paraDict["startTs"], tblNum=paraDict["ctbNum"], rows=paraDict["rowsPerTbl"])
|
||||||
|
|
||||||
|
tdLog.info("create topics from database")
|
||||||
|
topicFromDb = 'topic_dbt'
|
||||||
|
tdSql.execute("create topic %s as database %s" %(topicFromDb, paraDict['dbName']))
|
||||||
|
|
||||||
|
if self.snapshot == 0:
|
||||||
|
consumerId = 0
|
||||||
|
elif self.snapshot == 1:
|
||||||
|
consumerId = 1
|
||||||
|
|
||||||
|
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"])
|
||||||
|
topicList = topicFromDb
|
||||||
|
ifcheckdata = 1
|
||||||
|
ifManualCommit = 1
|
||||||
|
keyList = 'group.id:cgrp1,\
|
||||||
|
enable.auto.commit:true,\
|
||||||
|
auto.commit.interval.ms:1000,\
|
||||||
|
auto.offset.reset:earliest'
|
||||||
|
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||||
|
|
||||||
|
tdLog.info("start consume processor")
|
||||||
|
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
|
||||||
|
|
||||||
|
tmqCom.getStartConsumeNotifyFromTmqsim()
|
||||||
|
tdLog.info("drop some ntables")
|
||||||
|
# drop 1/4 ctbls from half offset
|
||||||
|
paraDict["ctbStartIdx"] = paraDict["ctbStartIdx"] + int(paraDict["ctbNum"] * 1 / 2)
|
||||||
|
paraDict["ctbNum"] = int(paraDict["ctbNum"] / 4)
|
||||||
|
tmqCom.drop_ctable(tdSql, dbname=paraDict['dbName'], count=paraDict["ctbNum"], default_ctbname_prefix=paraDict["ctbPrefix"], ctbStartIdx=paraDict["ctbStartIdx"])
|
||||||
|
|
||||||
|
tdLog.info("start to check consume result")
|
||||||
|
expectRows = 1
|
||||||
|
resultList = tmqCom.selectConsumeResult(expectRows)
|
||||||
|
totalConsumeRows = 0
|
||||||
|
for i in range(expectRows):
|
||||||
|
totalConsumeRows += resultList[i]
|
||||||
|
|
||||||
|
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
|
||||||
|
|
||||||
|
if not ((totalConsumeRows >= expectrowcnt * 3/4) and (totalConsumeRows < expectrowcnt)):
|
||||||
|
tdLog.exit("tmq consume rows error with snapshot = 0!")
|
||||||
|
|
||||||
|
tdLog.info("wait subscriptions exit ....")
|
||||||
|
tmqCom.waitSubscriptionExit(tdSql, topicFromDb)
|
||||||
|
|
||||||
|
tdSql.query("drop topic %s"%topicFromDb)
|
||||||
|
tdLog.info("success dorp topic: %s"%topicFromDb)
|
||||||
|
tdLog.printNoPrefix("======== test case 1 end ...... ")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# drop some ntbs and create some new ntbs
|
||||||
|
def tmqCase2(self):
|
||||||
|
tdLog.printNoPrefix("======== test case 2: ")
|
||||||
|
paraDict = {'dbName': 'dbt',
|
||||||
|
'dropFlag': 1,
|
||||||
|
'event': '',
|
||||||
|
'vgroups': 4,
|
||||||
|
'stbName': 'stb',
|
||||||
|
'colPrefix': 'c',
|
||||||
|
'tagPrefix': 't',
|
||||||
|
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'TIMESTAMP', 'count':1}],
|
||||||
|
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
|
||||||
|
'ctbPrefix': 'ntb',
|
||||||
|
'ctbStartIdx': 0,
|
||||||
|
'ctbNum': 1000,
|
||||||
|
'rowsPerTbl': 100,
|
||||||
|
'batchNum': 100,
|
||||||
|
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
||||||
|
'endTs': 0,
|
||||||
|
'pollDelay': 10,
|
||||||
|
'showMsg': 1,
|
||||||
|
'showRow': 1,
|
||||||
|
'snapshot': 0}
|
||||||
|
paraDict['snapshot'] = self.snapshot
|
||||||
|
paraDict['vgroups'] = self.vgroups
|
||||||
|
paraDict['ctbNum'] = self.ctbNum
|
||||||
|
paraDict['rowsPerTbl'] = self.rowsPerTbl
|
||||||
|
|
||||||
|
tmqCom.initConsumerTable()
|
||||||
|
tdLog.info("start create database....")
|
||||||
|
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
|
||||||
|
tdLog.info("start create normal tables....")
|
||||||
|
tmqCom.create_ntable(tsql=tdSql, dbname=paraDict["dbName"], tbname_prefix=paraDict["ctbPrefix"], tbname_index_start_num = 1, column_elm_list=paraDict["colSchema"], colPrefix='c', tblNum=paraDict["ctbNum"])
|
||||||
|
tdLog.info("start insert data into normal tables....")
|
||||||
|
tmqCom.insert_rows_into_ntbl(tsql=tdSql, dbname=paraDict["dbName"], tbname_prefix=paraDict["ctbPrefix"], tbname_index_start_num = 1, column_ele_list=paraDict["colSchema"],startTs=paraDict["startTs"], tblNum=paraDict["ctbNum"], rows=paraDict["rowsPerTbl"])
|
||||||
|
|
||||||
|
tdLog.info("create topics from database")
|
||||||
|
topicFromDb = 'topic_dbt'
|
||||||
|
tdSql.execute("create topic %s as database %s" %(topicFromDb, paraDict['dbName']))
|
||||||
|
|
||||||
|
if self.snapshot == 0:
|
||||||
|
consumerId = 2
|
||||||
|
elif self.snapshot == 1:
|
||||||
|
consumerId = 3
|
||||||
|
|
||||||
|
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2)
|
||||||
|
topicList = topicFromDb
|
||||||
|
ifcheckdata = 1
|
||||||
|
ifManualCommit = 1
|
||||||
|
keyList = 'group.id:cgrp1,\
|
||||||
|
enable.auto.commit:true,\
|
||||||
|
auto.commit.interval.ms:1000,\
|
||||||
|
auto.offset.reset:earliest'
|
||||||
|
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||||
|
|
||||||
|
tdLog.info("start consume processor")
|
||||||
|
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
|
||||||
|
|
||||||
|
tmqCom.getStartConsumeNotifyFromTmqsim()
|
||||||
|
tdLog.info("drop some ntables")
|
||||||
|
# drop 1/4 ctbls from half offset
|
||||||
|
paraDict["ctbStartIdx"] = paraDict["ctbStartIdx"] + int(paraDict["ctbNum"] * 1 / 2)
|
||||||
|
paraDict["ctbNum"] = int(paraDict["ctbNum"] / 4)
|
||||||
|
tmqCom.drop_ctable(tdSql, dbname=paraDict['dbName'], count=paraDict["ctbNum"], default_ctbname_prefix=paraDict["ctbPrefix"], ctbStartIdx=paraDict["ctbStartIdx"])
|
||||||
|
|
||||||
|
tdLog.info("start create some new normal tables....")
|
||||||
|
paraDict["ctbPrefix"] = 'newCtb'
|
||||||
|
paraDict["ctbNum"] = self.ctbNum
|
||||||
|
tmqCom.create_ntable(tsql=tdSql, dbname=paraDict["dbName"], tbname_prefix=paraDict["ctbPrefix"], tbname_index_start_num = 1, column_elm_list=paraDict["colSchema"], colPrefix='c', tblNum=paraDict["ctbNum"])
|
||||||
|
tdLog.info("start insert data into these new normal tables....")
|
||||||
|
tmqCom.insert_rows_into_ntbl(tsql=tdSql, dbname=paraDict["dbName"], tbname_prefix=paraDict["ctbPrefix"], tbname_index_start_num = 1, column_ele_list=paraDict["colSchema"],startTs=paraDict["startTs"], tblNum=paraDict["ctbNum"], rows=paraDict["rowsPerTbl"])
|
||||||
|
|
||||||
|
tdLog.info("start to check consume result")
|
||||||
|
expectRows = 1
|
||||||
|
resultList = tmqCom.selectConsumeResult(expectRows)
|
||||||
|
totalConsumeRows = 0
|
||||||
|
for i in range(expectRows):
|
||||||
|
totalConsumeRows += resultList[i]
|
||||||
|
|
||||||
|
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
|
||||||
|
|
||||||
|
if not ((totalConsumeRows >= expectrowcnt / 2 * (1 + 3/4)) and (totalConsumeRows < expectrowcnt)):
|
||||||
|
tdLog.exit("tmq consume rows error with snapshot = 0!")
|
||||||
|
|
||||||
|
tdLog.info("wait subscriptions exit ....")
|
||||||
|
tmqCom.waitSubscriptionExit(tdSql, topicFromDb)
|
||||||
|
|
||||||
|
tdSql.query("drop topic %s"%topicFromDb)
|
||||||
|
tdLog.info("success dorp topic: %s"%topicFromDb)
|
||||||
|
tdLog.printNoPrefix("======== test case 2 end ...... ")
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
tdLog.printNoPrefix("=============================================")
|
||||||
|
tdLog.printNoPrefix("======== snapshot is 0: only consume from wal")
|
||||||
|
self.snapshot = 0
|
||||||
|
self.tmqCase1()
|
||||||
|
self.tmqCase2()
|
||||||
|
|
||||||
|
# tdLog.printNoPrefix("====================================================================")
|
||||||
|
# tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal")
|
||||||
|
# self.snapshot = 1
|
||||||
|
# self.tmqCase1()
|
||||||
|
# self.tmqCase2()
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success(f"{__file__} successfully executed")
|
||||||
|
|
||||||
|
event = threading.Event()
|
||||||
|
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -18,7 +18,7 @@ class TDTestCase:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.snapshot = 0
|
self.snapshot = 0
|
||||||
self.vgroups = 4
|
self.vgroups = 4
|
||||||
self.ctbNum = 100
|
self.ctbNum = 1000
|
||||||
self.rowsPerTbl = 10
|
self.rowsPerTbl = 10
|
||||||
|
|
||||||
def init(self, conn, logSql):
|
def init(self, conn, logSql):
|
||||||
|
@ -39,9 +39,9 @@ class TDTestCase:
|
||||||
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
|
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
|
||||||
'ctbPrefix': 'ntb',
|
'ctbPrefix': 'ntb',
|
||||||
'ctbStartIdx': 0,
|
'ctbStartIdx': 0,
|
||||||
'ctbNum': 100,
|
'ctbNum': 1000,
|
||||||
'rowsPerTbl': 1000,
|
'rowsPerTbl': 100,
|
||||||
'batchNum': 1000,
|
'batchNum': 100,
|
||||||
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
||||||
'endTs': 0,
|
'endTs': 0,
|
||||||
'pollDelay': 5,
|
'pollDelay': 5,
|
||||||
|
@ -125,9 +125,9 @@ class TDTestCase:
|
||||||
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
|
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
|
||||||
'ctbPrefix': 'ntb',
|
'ctbPrefix': 'ntb',
|
||||||
'ctbStartIdx': 0,
|
'ctbStartIdx': 0,
|
||||||
'ctbNum': 100,
|
'ctbNum': 1000,
|
||||||
'rowsPerTbl': 1000,
|
'rowsPerTbl': 100,
|
||||||
'batchNum': 1000,
|
'batchNum': 100,
|
||||||
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
||||||
'endTs': 0,
|
'endTs': 0,
|
||||||
'pollDelay': 10,
|
'pollDelay': 10,
|
||||||
|
@ -203,16 +203,16 @@ class TDTestCase:
|
||||||
tdLog.printNoPrefix("======== test case 2 end ...... ")
|
tdLog.printNoPrefix("======== test case 2 end ...... ")
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
tdLog.printNoPrefix("=============================================")
|
# tdLog.printNoPrefix("=============================================")
|
||||||
tdLog.printNoPrefix("======== snapshot is 0: only consume from wal")
|
# tdLog.printNoPrefix("======== snapshot is 0: only consume from wal")
|
||||||
self.snapshot = 0
|
# self.snapshot = 0
|
||||||
# self.tmqCase1()
|
# self.tmqCase1()
|
||||||
self.tmqCase2()
|
# self.tmqCase2()
|
||||||
|
|
||||||
tdLog.printNoPrefix("====================================================================")
|
tdLog.printNoPrefix("====================================================================")
|
||||||
tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal")
|
tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal")
|
||||||
self.snapshot = 1
|
self.snapshot = 1
|
||||||
# self.tmqCase1()
|
self.tmqCase1()
|
||||||
self.tmqCase2()
|
self.tmqCase2()
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
|
@ -210,7 +210,7 @@ python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py
|
||||||
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py
|
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py
|
||||||
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py
|
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py
|
||||||
python3 ./test.py -f 7-tmq/tmqAutoCreateTbl.py
|
python3 ./test.py -f 7-tmq/tmqAutoCreateTbl.py
|
||||||
#python3 ./test.py -f 7-tmq/tmqDnodeRestart.py
|
python3 ./test.py -f 7-tmq/tmqDnodeRestart.py
|
||||||
python3 ./test.py -f 7-tmq/tmqUpdate-1ctb.py
|
python3 ./test.py -f 7-tmq/tmqUpdate-1ctb.py
|
||||||
python3 ./test.py -f 7-tmq/tmqUpdateWithConsume.py
|
python3 ./test.py -f 7-tmq/tmqUpdateWithConsume.py
|
||||||
python3 ./test.py -f 7-tmq/tmqUpdate-multiCtb-snapshot0.py
|
python3 ./test.py -f 7-tmq/tmqUpdate-multiCtb-snapshot0.py
|
||||||
|
@ -219,12 +219,14 @@ python3 ./test.py -f 7-tmq/tmqDelete-1ctb.py
|
||||||
python3 ./test.py -f 7-tmq/tmqDelete-multiCtb.py
|
python3 ./test.py -f 7-tmq/tmqDelete-multiCtb.py
|
||||||
python3 ./test.py -f 7-tmq/tmqDropStb.py
|
python3 ./test.py -f 7-tmq/tmqDropStb.py
|
||||||
python3 ./test.py -f 7-tmq/tmqDropStbCtb.py
|
python3 ./test.py -f 7-tmq/tmqDropStbCtb.py
|
||||||
python3 ./test.py -f 7-tmq/tmqDropNtb.py
|
python3 ./test.py -f 7-tmq/tmqDropNtb-snapshot0.py
|
||||||
|
python3 ./test.py -f 7-tmq/tmqDropNtb-snapshot1.py
|
||||||
python3 ./test.py -f 7-tmq/tmqUdf.py
|
python3 ./test.py -f 7-tmq/tmqUdf.py
|
||||||
python3 ./test.py -f 7-tmq/tmqUdf-multCtb-snapshot0.py
|
python3 ./test.py -f 7-tmq/tmqUdf-multCtb-snapshot0.py
|
||||||
python3 ./test.py -f 7-tmq/tmqUdf-multCtb-snapshot1.py
|
python3 ./test.py -f 7-tmq/tmqUdf-multCtb-snapshot1.py
|
||||||
python3 ./test.py -f 7-tmq/stbTagFilter-1ctb.py
|
python3 ./test.py -f 7-tmq/stbTagFilter-1ctb.py
|
||||||
|
python3 ./test.py -f 7-tmq/dataFromTsdbNWal.py
|
||||||
|
python3 ./test.py -f 7-tmq/dataFromTsdbNWal-multiCtb.py
|
||||||
# python3 ./test.py -f 7-tmq/stbTagFilter-multiCtb.py
|
# python3 ./test.py -f 7-tmq/stbTagFilter-multiCtb.py
|
||||||
|
|
||||||
#------------querPolicy 2-----------
|
#------------querPolicy 2-----------
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
Subproject commit 9cfa195713d1cae9edf417a8d49bde87dd971016
|
Subproject commit 0b8a3373bb7548f8106d13e7d3b0a988d3c4d48a
|
Loading…
Reference in New Issue