Merge pull request #11957 from taosdata/feature/3.0_liaohj
fix(query): enable the limitation on the number of query results
This commit is contained in:
commit
b159b0ff12
|
@ -358,8 +358,15 @@ SRequestObj* launchQuery(STscObj* pTscObj, const char* sql, int sqlLen) {
|
||||||
SQuery* pQuery = NULL;
|
SQuery* pQuery = NULL;
|
||||||
|
|
||||||
int32_t code = buildRequest(pTscObj, sql, sqlLen, &pRequest);
|
int32_t code = buildRequest(pTscObj, sql, sqlLen, &pRequest);
|
||||||
if (TSDB_CODE_SUCCESS == code) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
code = parseSql(pRequest, false, &pQuery, NULL);
|
terrno = code;
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
code = parseSql(pRequest, false, &pQuery, NULL);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
pRequest->code = code;
|
||||||
|
return pRequest;
|
||||||
}
|
}
|
||||||
|
|
||||||
return launchQueryImpl(pRequest, pQuery, code, false);
|
return launchQueryImpl(pRequest, pQuery, code, false);
|
||||||
|
@ -410,7 +417,7 @@ SRequestObj* execQuery(STscObj* pTscObj, const char* sql, int sqlLen) {
|
||||||
|
|
||||||
while (retryNum++ < REQUEST_MAX_TRY_TIMES) {
|
while (retryNum++ < REQUEST_MAX_TRY_TIMES) {
|
||||||
pRequest = launchQuery(pTscObj, sql, sqlLen);
|
pRequest = launchQuery(pTscObj, sql, sqlLen);
|
||||||
if (TSDB_CODE_SUCCESS == pRequest->code || !NEED_CLIENT_HANDLE_ERROR(pRequest->code)) {
|
if (pRequest == NULL || TSDB_CODE_SUCCESS == pRequest->code || !NEED_CLIENT_HANDLE_ERROR(pRequest->code)) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -4994,13 +4994,20 @@ static int32_t handleLimitOffset(SOperatorInfo* pOperator, SSDataBlock* pBlock)
|
||||||
pProjectInfo->curOffset = 0;
|
pProjectInfo->curOffset = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pRes->info.rows >= pOperator->resultInfo.threshold) {
|
// check for the limitation in each group
|
||||||
|
if (pProjectInfo->limit.limit > 0 && pProjectInfo->curOutput + pRes->info.rows >= pProjectInfo->limit.limit) {
|
||||||
|
pRes->info.rows = (int32_t)(pProjectInfo->limit.limit - pProjectInfo->curOutput);
|
||||||
|
|
||||||
// check for the limitation in each group
|
if (pProjectInfo->slimit.limit == -1 || pProjectInfo->slimit.limit <= pProjectInfo->curGroupOutput) {
|
||||||
if (pProjectInfo->limit.limit > 0 && pProjectInfo->curOutput + pRes->info.rows >= pProjectInfo->limit.limit) {
|
pOperator->status = OP_EXEC_DONE;
|
||||||
pRes->info.rows = (int32_t)(pProjectInfo->limit.limit - pProjectInfo->curOutput);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return PROJECT_RETRIEVE_DONE;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If there are slimit/soffset value exists, multi-round result can not be packed into one group, since the
|
||||||
|
// they may not belong to the same group the limit/offset value is not valid in this case.
|
||||||
|
if (pRes->info.rows >= pOperator->resultInfo.threshold || pProjectInfo->slimit.offset != -1 || pProjectInfo->slimit.limit != -1) {
|
||||||
return PROJECT_RETRIEVE_DONE;
|
return PROJECT_RETRIEVE_DONE;
|
||||||
} else { // not full enough, continue to accumulate the output data in the buffer.
|
} else { // not full enough, continue to accumulate the output data in the buffer.
|
||||||
return PROJECT_RETRIEVE_CONTINUE;
|
return PROJECT_RETRIEVE_CONTINUE;
|
||||||
|
|
|
@ -1069,7 +1069,6 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt) {
|
||||||
|
|
||||||
if (TSDB_QUERY_HAS_TYPE(pCxt->pOutput->insertType, TSDB_QUERY_TYPE_STMT_INSERT) && tbNum > 0) {
|
if (TSDB_QUERY_HAS_TYPE(pCxt->pOutput->insertType, TSDB_QUERY_TYPE_STMT_INSERT) && tbNum > 0) {
|
||||||
return buildInvalidOperationMsg(&pCxt->msg, "single table allowed in one stmt");
|
return buildInvalidOperationMsg(&pCxt->msg, "single table allowed in one stmt");
|
||||||
;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
destroyInsertParseContextForTable(pCxt);
|
destroyInsertParseContextForTable(pCxt);
|
||||||
|
|
Loading…
Reference in New Issue