Merge pull request #20204 from taosdata/fix/3_coverity
fix(query): fix coverity issue.
This commit is contained in:
commit
a3ffddba2d
|
@ -916,8 +916,10 @@ static int32_t doLoadFileBlock(STsdbReader* pReader, SArray* pIndexList, SBlockN
|
|||
pBlockNum->numOfBlocks += 1;
|
||||
}
|
||||
|
||||
if ((pScanInfo->pBlockList != NULL )&& (taosArrayGetSize(pScanInfo->pBlockList) > 0)) {
|
||||
numOfQTable += 1;
|
||||
if (pScanInfo->pBlockList != NULL) {
|
||||
if (taosArrayGetSize(pScanInfo->pBlockList) > 0) {
|
||||
numOfQTable += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4720,8 +4722,13 @@ int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) {
|
|||
return code;
|
||||
}
|
||||
|
||||
static int32_t getBucketIndex(int32_t startRow, int32_t bucketRange, int32_t numOfRows) {
|
||||
return (numOfRows - startRow) / bucketRange;
|
||||
static int32_t getBucketIndex(int32_t startRow, int32_t bucketRange, int32_t numOfRows, int32_t numOfBucket) {
|
||||
int32_t bucketIndex = ((numOfRows - startRow) / bucketRange);
|
||||
if (bucketIndex == numOfBucket) {
|
||||
bucketIndex -= 1;
|
||||
}
|
||||
|
||||
return bucketIndex;
|
||||
}
|
||||
|
||||
int32_t tsdbGetFileBlocksDistInfo(STsdbReader* pReader, STableBlockDistInfo* pTableBlockInfo) {
|
||||
|
@ -4730,6 +4737,8 @@ int32_t tsdbGetFileBlocksDistInfo(STsdbReader* pReader, STableBlockDistInfo* pTa
|
|||
pTableBlockInfo->totalRows = 0;
|
||||
pTableBlockInfo->numOfVgroups = 1;
|
||||
|
||||
const int32_t numOfBuckets = 20.0;
|
||||
|
||||
// find the start data block in file
|
||||
|
||||
tsdbAcquireReader(pReader);
|
||||
|
@ -4742,7 +4751,7 @@ int32_t tsdbGetFileBlocksDistInfo(STsdbReader* pReader, STableBlockDistInfo* pTa
|
|||
pTableBlockInfo->defMinRows = pc->minRows;
|
||||
pTableBlockInfo->defMaxRows = pc->maxRows;
|
||||
|
||||
int32_t bucketRange = ceil((pc->maxRows - pc->minRows) / 20.0);
|
||||
int32_t bucketRange = ceil((pc->maxRows - pc->minRows) / numOfBuckets);
|
||||
|
||||
pTableBlockInfo->numOfFiles += 1;
|
||||
|
||||
|
@ -4780,7 +4789,7 @@ int32_t tsdbGetFileBlocksDistInfo(STsdbReader* pReader, STableBlockDistInfo* pTa
|
|||
|
||||
pTableBlockInfo->totalSize += pBlock->aSubBlock[0].szBlock;
|
||||
|
||||
int32_t bucketIndex = getBucketIndex(pTableBlockInfo->defMinRows, bucketRange, numOfRows);
|
||||
int32_t bucketIndex = getBucketIndex(pTableBlockInfo->defMinRows, bucketRange, numOfRows, numOfBuckets);
|
||||
pTableBlockInfo->blockRowsHisto[bucketIndex]++;
|
||||
|
||||
hasNext = blockIteratorNext(&pStatus->blockIter, pReader->idStr);
|
||||
|
|
|
@ -37,10 +37,12 @@ int32_t dsCreateDataSinker(const SDataSinkNode* pDataSink, DataSinkHandle* pHand
|
|||
switch ((int)nodeType(pDataSink)) {
|
||||
case QUERY_NODE_PHYSICAL_PLAN_DISPATCH:
|
||||
return createDataDispatcher(&gDataSinkManager, pDataSink, pHandle);
|
||||
case QUERY_NODE_PHYSICAL_PLAN_DELETE:
|
||||
case QUERY_NODE_PHYSICAL_PLAN_DELETE: {
|
||||
return createDataDeleter(&gDataSinkManager, pDataSink, pHandle, pParam);
|
||||
case QUERY_NODE_PHYSICAL_PLAN_QUERY_INSERT:
|
||||
}
|
||||
case QUERY_NODE_PHYSICAL_PLAN_QUERY_INSERT: {
|
||||
return createDataInserter(&gDataSinkManager, pDataSink, pHandle, pParam);
|
||||
}
|
||||
}
|
||||
|
||||
qError("invalid input node type:%d, %s", nodeType(pDataSink), id);
|
||||
|
|
|
@ -1996,6 +1996,11 @@ SSchemaWrapper* extractQueriedColumnSchema(SScanPhysiNode* pScanNode);
|
|||
|
||||
int32_t extractTableSchemaInfo(SReadHandle* pHandle, SScanPhysiNode* pScanNode, SExecTaskInfo* pTaskInfo) {
|
||||
SMetaReader mr = {0};
|
||||
if (pHandle == NULL) {
|
||||
terrno = TSDB_CODE_INVALID_PARA;
|
||||
return terrno;
|
||||
}
|
||||
|
||||
metaReaderInit(&mr, pHandle->meta, 0);
|
||||
int32_t code = metaGetTableEntryByUidCache(&mr, pScanNode->uid);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
|
|
|
@ -5579,7 +5579,7 @@ int32_t blockDistFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
|
|||
int32_t bucketRange = (pData->defMaxRows - pData->defMinRows) / numOfBuckets;
|
||||
|
||||
for (int32_t i = 0; i < tListLen(pData->blockRowsHisto); ++i) {
|
||||
len = sprintf(st + VARSTR_HEADER_SIZE, "%04d |", pData->defMinRows + bucketRange * i);
|
||||
len = sprintf(st + VARSTR_HEADER_SIZE, "%04d |", pData->defMinRows + bucketRange * (i + 1));
|
||||
|
||||
int32_t num = 0;
|
||||
if (pData->blockRowsHisto[i] > 0) {
|
||||
|
|
Loading…
Reference in New Issue