Merge pull request #2322 from taosdata/bugfix/client-coverity
TD-617: fix some coverity issues
This commit is contained in:
commit
c83f9708b9
|
@ -151,8 +151,8 @@ JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_dumpMemoryLeakImp
|
|||
JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_initImp(JNIEnv *env, jobject jobj, jstring jconfigDir) {
|
||||
if (jconfigDir != NULL) {
|
||||
const char *confDir = (*env)->GetStringUTFChars(env, jconfigDir, NULL);
|
||||
if (confDir && strlen(configDir) != 0) {
|
||||
strcpy(configDir, confDir);
|
||||
if (confDir && strlen(confDir) != 0) {
|
||||
tstrncpy(configDir, confDir, TSDB_FILENAME_LEN);
|
||||
}
|
||||
(*env)->ReleaseStringUTFChars(env, jconfigDir, confDir);
|
||||
}
|
||||
|
@ -385,7 +385,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getAffectedRowsIm
|
|||
}
|
||||
|
||||
jint ret = taos_affected_rows((SSqlObj *)res);
|
||||
jniTrace("jobj:%p, conn:%p, sql:%p, affect rows:%d", jobj, tscon, (void *)con, res, ret);
|
||||
jniTrace("jobj:%p, conn:%p, sql:%p, affect rows:%d", jobj, tscon, (SSqlObj*)res, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -411,10 +411,10 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getSchemaMetaData
|
|||
// jobject arrayListObj = (*env)->NewObject(env, g_arrayListClass, g_arrayListConstructFp, "");
|
||||
|
||||
if (num_fields == 0) {
|
||||
jniError("jobj:%p, conn:%p, resultset:%p, fields size is %d", jobj, tscon, res, num_fields);
|
||||
jniError("jobj:%p, conn:%p, resultset:%p, fields size is %d", jobj, tscon, (void*)res, num_fields);
|
||||
return JNI_NUM_OF_FIELDS_0;
|
||||
} else {
|
||||
jniTrace("jobj:%p, conn:%p, resultset:%p, fields size is %d", jobj, tscon, res, num_fields);
|
||||
jniTrace("jobj:%p, conn:%p, resultset:%p, fields size is %d", jobj, tscon, (void*)res, num_fields);
|
||||
for (int i = 0; i < num_fields; ++i) {
|
||||
jobject metadataObj = (*env)->NewObject(env, g_metadataClass, g_metadataConstructFp);
|
||||
(*env)->SetIntField(env, metadataObj, g_metadataColtypeField, fields[i].type);
|
||||
|
@ -465,7 +465,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn
|
|||
int num_fields = taos_num_fields(result);
|
||||
|
||||
if (num_fields == 0) {
|
||||
jniError("jobj:%p, conn:%p, resultset:%p, fields size is %d", jobj, tscon, res, num_fields);
|
||||
jniError("jobj:%p, conn:%p, resultset:%p, fields size is %d", jobj, tscon, (void*)res, num_fields);
|
||||
return JNI_NUM_OF_FIELDS_0;
|
||||
}
|
||||
|
||||
|
@ -473,7 +473,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn
|
|||
if (row == NULL) {
|
||||
int tserrno = taos_errno(result);
|
||||
if (tserrno == 0) {
|
||||
jniTrace("jobj:%p, conn:%p, resultset:%p, fields size is %d, fetch row to the end", jobj, tscon, res, num_fields);
|
||||
jniTrace("jobj:%p, conn:%p, resultset:%p, fields size is %d, fetch row to the end", jobj, tscon, (void*)res, num_fields);
|
||||
return JNI_FETCH_END;
|
||||
} else {
|
||||
jniTrace("jobj:%p, conn:%p, interruptted query", jobj, tscon);
|
||||
|
@ -571,9 +571,9 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_subscribeImp(JNI
|
|||
sub = (jlong)tsub;
|
||||
|
||||
if (sub == 0) {
|
||||
jniTrace("jobj:%p, failed to subscribe: topic:%s", jobj, jtopic);
|
||||
jniTrace("jobj:%p, failed to subscribe: topic:%s", jobj, topic);
|
||||
} else {
|
||||
jniTrace("jobj:%p, successfully subscribe: topic: %s", jobj, jtopic);
|
||||
jniTrace("jobj:%p, successfully subscribe: topic: %s", jobj, topic);
|
||||
}
|
||||
|
||||
if (topic != NULL) (*env)->ReleaseStringUTFChars(env, jtopic, topic);
|
||||
|
@ -583,7 +583,7 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_subscribeImp(JNI
|
|||
}
|
||||
|
||||
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_consumeImp(JNIEnv *env, jobject jobj, jlong sub) {
|
||||
jniTrace("jobj:%p, in TSDBJNIConnector_consumeImp, sub:%ld", jobj, sub);
|
||||
jniTrace("jobj:%p, in TSDBJNIConnector_consumeImp, sub:%" PRId64, jobj, sub);
|
||||
jniGetGlobalMethod(env);
|
||||
|
||||
TAOS_SUB *tsub = (TAOS_SUB *)sub;
|
||||
|
|
|
@ -285,7 +285,7 @@ void taos_fetch_row_a(TAOS_RES *taosa, void (*fp)(void *, TAOS_RES *, TAOS_ROW),
|
|||
|
||||
tscProcessSql(pSql);
|
||||
} else {
|
||||
SSchedMsg schedMsg;
|
||||
SSchedMsg schedMsg = { 0 };
|
||||
schedMsg.fp = tscProcessFetchRow;
|
||||
schedMsg.ahandle = pSql;
|
||||
schedMsg.thandle = pRes->tsrow;
|
||||
|
@ -387,7 +387,7 @@ void tscQueueAsyncError(void(*fp), void *param, int32_t code) {
|
|||
int32_t* c = malloc(sizeof(int32_t));
|
||||
*c = code;
|
||||
|
||||
SSchedMsg schedMsg;
|
||||
SSchedMsg schedMsg = { 0 };
|
||||
schedMsg.fp = tscProcessAsyncError;
|
||||
schedMsg.ahandle = fp;
|
||||
schedMsg.thandle = param;
|
||||
|
@ -403,7 +403,7 @@ void tscQueueAsyncRes(SSqlObj *pSql) {
|
|||
tscError("%p add into queued async res, code:%s", pSql, tstrerror(pSql->res.code));
|
||||
}
|
||||
|
||||
SSchedMsg schedMsg;
|
||||
SSchedMsg schedMsg = { 0 };
|
||||
schedMsg.fp = tscProcessAsyncRes;
|
||||
schedMsg.ahandle = pSql;
|
||||
schedMsg.thandle = (void *)1;
|
||||
|
@ -420,7 +420,7 @@ void tscProcessAsyncFree(SSchedMsg *pMsg) {
|
|||
void tscQueueAsyncFreeResult(SSqlObj *pSql) {
|
||||
tscTrace("%p sqlObj put in queue to async free", pSql);
|
||||
|
||||
SSchedMsg schedMsg;
|
||||
SSchedMsg schedMsg = { 0 };
|
||||
schedMsg.fp = tscProcessAsyncFree;
|
||||
schedMsg.ahandle = pSql;
|
||||
schedMsg.thandle = (void *)1;
|
||||
|
|
|
@ -1853,26 +1853,14 @@ static void last_row_function(SQLFunctionCtx *pCtx) {
|
|||
static void last_row_finalizer(SQLFunctionCtx *pCtx) {
|
||||
// do nothing at the first stage
|
||||
SResultInfo *pResInfo = GET_RES_INFO(pCtx);
|
||||
if (pCtx->currentStage == SECONDARY_STAGE_MERGE) {
|
||||
if (pResInfo->hasResult != DATA_SET_FLAG) {
|
||||
if (pCtx->outputType == TSDB_DATA_TYPE_BINARY || pCtx->outputType == TSDB_DATA_TYPE_NCHAR) {
|
||||
setVardataNull(pCtx->aOutputBuf, pCtx->outputType);
|
||||
} else {
|
||||
setNull(pCtx->aOutputBuf, pCtx->outputType, pCtx->outputBytes);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
if (pResInfo->hasResult != DATA_SET_FLAG) {
|
||||
if (pCtx->outputType == TSDB_DATA_TYPE_BINARY || pCtx->outputType == TSDB_DATA_TYPE_NCHAR) {
|
||||
setVardataNull(pCtx->aOutputBuf, pCtx->outputType);
|
||||
} else {
|
||||
setNull(pCtx->aOutputBuf, pCtx->outputType, pCtx->outputBytes);
|
||||
}
|
||||
|
||||
return;
|
||||
if (pResInfo->hasResult != DATA_SET_FLAG) {
|
||||
if (pCtx->outputType == TSDB_DATA_TYPE_BINARY || pCtx->outputType == TSDB_DATA_TYPE_NCHAR) {
|
||||
setVardataNull(pCtx->aOutputBuf, pCtx->outputType);
|
||||
} else {
|
||||
setNull(pCtx->aOutputBuf, pCtx->outputType, pCtx->outputBytes);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
GET_RES_INFO(pCtx)->numOfRes = 1;
|
||||
|
|
|
@ -252,7 +252,7 @@ int32_t tsParseOneColumnData(SSchema *pSchema, SSQLToken *pToken, char *payload,
|
|||
numType = tscToInteger(pToken, &iv, &endptr);
|
||||
if (TK_ILLEGAL == numType) {
|
||||
return tscInvalidSQLErrMsg(msg, "invalid bigint data", pToken->z);
|
||||
} else if (errno == ERANGE || iv > INT64_MAX || iv <= INT64_MIN) {
|
||||
} else if (errno == ERANGE || iv == INT64_MIN) {
|
||||
return tscInvalidSQLErrMsg(msg, "bigint data overflow", pToken->z);
|
||||
}
|
||||
|
||||
|
@ -594,7 +594,6 @@ int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int3
|
|||
size_t remain = pDataBlock->nAllocSize - pDataBlock->size;
|
||||
const int factor = 5;
|
||||
uint32_t nAllocSizeOld = pDataBlock->nAllocSize;
|
||||
assert(pDataBlock->headerSize >= 0);
|
||||
|
||||
// expand the allocated size
|
||||
if (remain < rowSize * factor) {
|
||||
|
|
|
@ -1445,7 +1445,7 @@ static int32_t setExprInfoForFunctions(SQueryInfo* pQueryInfo, SSchema* pSchema,
|
|||
}
|
||||
|
||||
if (aliasName != NULL) {
|
||||
strcpy(columnName, aliasName);
|
||||
tstrncpy(columnName, aliasName, sizeof(columnName));
|
||||
} else {
|
||||
getRevisedName(columnName, functionID, sizeof(columnName) - 1, pSchema[pColIndex->columnIndex].name);
|
||||
}
|
||||
|
@ -2221,7 +2221,6 @@ int32_t setShowInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
|||
// db prefix in tagCond, show table conds in payload
|
||||
SSQLToken* pDbPrefixToken = &pShowInfo->prefix;
|
||||
if (pDbPrefixToken->type != 0) {
|
||||
assert(pDbPrefixToken->n >= 0);
|
||||
|
||||
if (pDbPrefixToken->n >= TSDB_DB_NAME_LEN) { // db name is too long
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||
|
@ -4765,7 +4764,7 @@ int32_t parseLimitClause(SQueryInfo* pQueryInfo, int32_t clauseIndex, SQuerySQL*
|
|||
pQueryInfo->clauseLimit = pQueryInfo->limit.limit;
|
||||
pQueryInfo->slimit = pQuerySql->slimit;
|
||||
|
||||
tscTrace("%p limit:%d, offset:%" PRId64 " slimit:%d, soffset:%" PRId64, pSql, pQueryInfo->limit.limit,
|
||||
tscTrace("%p limit:%" PRId64 ", offset:%" PRId64 " slimit:%" PRId64 ", soffset:%" PRId64, pSql, pQueryInfo->limit.limit,
|
||||
pQueryInfo->limit.offset, pQueryInfo->slimit.limit, pQueryInfo->slimit.offset);
|
||||
|
||||
if (pQueryInfo->slimit.offset < 0 || pQueryInfo->limit.offset < 0) {
|
||||
|
@ -5255,10 +5254,10 @@ static int32_t doAddGroupbyColumnsOnDemand(SQueryInfo* pQueryInfo) {
|
|||
|
||||
for (int32_t i = 0; i < pQueryInfo->groupbyExpr.numOfGroupCols; ++i) {
|
||||
SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, i);
|
||||
|
||||
SSchema s;
|
||||
int16_t colIndex = pColIndex->colIndex;
|
||||
if (colIndex == TSDB_TBNAME_COLUMN_INDEX) {
|
||||
SSchema s = tGetTableNameColumnSchema();
|
||||
s = tGetTableNameColumnSchema();
|
||||
type = s.type;
|
||||
bytes = s.bytes;
|
||||
name = s.name;
|
||||
|
|
|
@ -230,6 +230,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
|
|||
if (ds == NULL) {
|
||||
tscError("%p failed to create merge structure", pSql);
|
||||
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
tfree(pReducer);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -266,6 +267,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
|
|||
|
||||
// no data actually, no need to merge result.
|
||||
if (idx == 0) {
|
||||
tfree(pReducer);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -282,6 +284,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
|
|||
|
||||
pRes->code = tLoserTreeCreate(&pReducer->pLoserTree, pReducer->numOfBuffer, param, treeComparator);
|
||||
if (pReducer->pLoserTree == NULL || pRes->code != 0) {
|
||||
tfree(pReducer);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -325,7 +328,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
|
|||
tfree(pReducer->pResultBuf);
|
||||
tfree(pReducer->pFinalRes);
|
||||
tfree(pReducer->prevRowOfInput);
|
||||
|
||||
tfree(pReducer);
|
||||
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
return;
|
||||
}
|
||||
|
@ -685,6 +688,7 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr
|
|||
|
||||
if (createOrderDescriptor(pOrderDesc, pCmd, pModel) != TSDB_CODE_SUCCESS) {
|
||||
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
tfree(pSchema);
|
||||
return pRes->code;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue