Merge pull request #19915 from taosdata/fix/nodisk
ehn(query): dynamic set the initial buffer size for data block from d…
This commit is contained in:
commit
e378668571
|
@ -423,19 +423,6 @@ static STimeWindow updateQueryTimeWindow(STsdb* pTsdb, STimeWindow* pWindow) {
|
|||
return win;
|
||||
}
|
||||
|
||||
static void limitOutputBufferSize(const SQueryTableDataCond* pCond, int32_t* capacity) {
|
||||
int32_t rowLen = 0;
|
||||
for (int32_t i = 0; i < pCond->numOfCols; ++i) {
|
||||
rowLen += pCond->colList[i].bytes;
|
||||
}
|
||||
|
||||
// make sure the output SSDataBlock size be less than 2MB.
|
||||
const int32_t TWOMB = 2 * 1024 * 1024;
|
||||
if ((*capacity) * rowLen > TWOMB) {
|
||||
(*capacity) = TWOMB / rowLen;
|
||||
}
|
||||
}
|
||||
|
||||
// init file iterator
|
||||
static int32_t initFilesetIterator(SFilesetIter* pIter, SArray* aDFileSet, STsdbReader* pReader) {
|
||||
size_t numOfFileset = taosArrayGetSize(aDFileSet);
|
||||
|
@ -618,9 +605,6 @@ static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, STsd
|
|||
goto _end;
|
||||
}
|
||||
|
||||
// todo refactor.
|
||||
limitOutputBufferSize(pCond, &pReader->capacity);
|
||||
|
||||
// allocate buffer in order to load data blocks from file
|
||||
SBlockLoadSuppInfo* pSup = &pReader->suppInfo;
|
||||
pSup->pColAgg = taosArrayInit(pCond->numOfCols, sizeof(SColumnDataAgg));
|
||||
|
@ -3835,11 +3819,9 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, void* pTableL
|
|||
pCond->twindows.ekey -= 1;
|
||||
}
|
||||
|
||||
int32_t capacity = 0;
|
||||
if (pResBlock == NULL) {
|
||||
capacity = 4096;
|
||||
} else {
|
||||
capacity = pResBlock->info.capacity;
|
||||
int32_t capacity = pVnode->config.tsdbCfg.maxRows;
|
||||
if (pResBlock != NULL) {
|
||||
blockDataEnsureCapacity(pResBlock, capacity);
|
||||
}
|
||||
|
||||
int32_t code = tsdbReaderCreate(pVnode, pCond, ppReader, capacity, pResBlock, idstr);
|
||||
|
|
|
@ -781,6 +781,10 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) {
|
|||
if (code != TSDB_CODE_SUCCESS) {
|
||||
T_LONG_JMP(pTaskInfo->env, code);
|
||||
}
|
||||
|
||||
if (pInfo->pResBlock->info.capacity > pOperator->resultInfo.capacity) {
|
||||
pOperator->resultInfo.capacity = pInfo->pResBlock->info.capacity;
|
||||
}
|
||||
}
|
||||
|
||||
SSDataBlock* result = doGroupedTableScan(pOperator);
|
||||
|
@ -884,7 +888,7 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode,
|
|||
|
||||
initResultSizeInfo(&pOperator->resultInfo, 4096);
|
||||
pInfo->pResBlock = createDataBlockFromDescNode(pDescNode);
|
||||
blockDataEnsureCapacity(pInfo->pResBlock, pOperator->resultInfo.capacity);
|
||||
// blockDataEnsureCapacity(pInfo->pResBlock, pOperator->resultInfo.capacity);
|
||||
|
||||
code = filterInitFromNode((SNode*)pTableScanNode->scan.node.pConditions, &pOperator->exprSupp.pFilterInfo, 0);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
|
|
Loading…
Reference in New Issue