Merge branch 'develop' into filterComboTest
This commit is contained in:
commit
daed646d4c
|
@ -53,11 +53,7 @@ typedef struct STableComInfo {
|
|||
} STableComInfo;
|
||||
|
||||
typedef struct STableMeta {
|
||||
// super table if it is created according to super table, otherwise, tableInfo is used
|
||||
union {
|
||||
struct STableMeta *pSTable;
|
||||
STableComInfo tableInfo;
|
||||
};
|
||||
uint8_t tableType;
|
||||
int16_t sversion;
|
||||
SCMVgroupInfo vgroupInfo;
|
||||
|
@ -214,7 +210,7 @@ typedef struct SQueryInfo {
|
|||
int16_t numOfTables;
|
||||
STableMetaInfo **pTableMetaInfo;
|
||||
struct STSBuf * tsBuf;
|
||||
int64_t * defaultVal; // default value for interpolation
|
||||
int64_t * fillVal; // default value for interpolation
|
||||
char * msg; // pointer to the pCmd->payload to keep error message temporarily
|
||||
int64_t clauseLimit; // limit for current sub clause
|
||||
|
||||
|
|
|
@ -390,16 +390,16 @@ static void function_finalizer(SQLFunctionCtx *pCtx) {
|
|||
|
||||
if (pResInfo->hasResult != DATA_SET_FLAG) {
|
||||
tscTrace("no result generated, result is set to NULL");
|
||||
if (pCtx->outputType == TSDB_DATA_TYPE_BINARY || pCtx->outputType == TSDB_DATA_TYPE_NCHAR) {
|
||||
setVardataNull(pCtx->aOutputBuf, pCtx->outputType);
|
||||
} else {
|
||||
setNull(pCtx->aOutputBuf, pCtx->outputType, pCtx->outputBytes);
|
||||
}
|
||||
}
|
||||
|
||||
doFinalizer(pCtx);
|
||||
}
|
||||
|
||||
static bool usePreVal(SQLFunctionCtx *pCtx) {
|
||||
return pCtx->preAggVals.isSet && pCtx->size == pCtx->preAggVals.size;
|
||||
}
|
||||
|
||||
/*
|
||||
* count function does need the finalize, if data is missing, the default value, which is 0, is used
|
||||
* count function does not use the pCtx->interResBuf to keep the intermediate buffer
|
||||
|
@ -412,7 +412,7 @@ static void count_function(SQLFunctionCtx *pCtx) {
|
|||
* 2. for general non-primary key columns, pCtx->hasNull may be true or false, pCtx->preAggVals.isSet == true;
|
||||
* 3. for primary key column, pCtx->hasNull always be false, pCtx->preAggVals.isSet == false;
|
||||
*/
|
||||
if (usePreVal(pCtx)) {
|
||||
if (pCtx->preAggVals.isSet) {
|
||||
numOfElem = pCtx->size - pCtx->preAggVals.statis.numOfNull;
|
||||
} else {
|
||||
if (pCtx->hasNull) {
|
||||
|
@ -537,7 +537,7 @@ static void do_sum(SQLFunctionCtx *pCtx) {
|
|||
int32_t notNullElems = 0;
|
||||
|
||||
// Only the pre-computing information loaded and actual data does not loaded
|
||||
if (pCtx->preAggVals.isSet && pCtx->preAggVals.size == pCtx->size) {
|
||||
if (pCtx->preAggVals.isSet) {
|
||||
notNullElems = pCtx->size - pCtx->preAggVals.statis.numOfNull;
|
||||
assert(pCtx->size >= pCtx->preAggVals.statis.numOfNull);
|
||||
|
||||
|
@ -768,7 +768,7 @@ static void avg_function(SQLFunctionCtx *pCtx) {
|
|||
SAvgInfo *pAvgInfo = (SAvgInfo *)pResInfo->interResultBuf;
|
||||
double * pVal = &pAvgInfo->sum;
|
||||
|
||||
if (usePreVal(pCtx)) {
|
||||
if (pCtx->preAggVals.isSet) {
|
||||
// Pre-aggregation
|
||||
notNullElems = pCtx->size - pCtx->preAggVals.statis.numOfNull;
|
||||
assert(notNullElems >= 0);
|
||||
|
@ -932,7 +932,7 @@ static void avg_finalizer(SQLFunctionCtx *pCtx) {
|
|||
|
||||
static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin, int32_t *notNullElems) {
|
||||
// data in current data block are qualified to the query
|
||||
if (usePreVal(pCtx)) {
|
||||
if (pCtx->preAggVals.isSet) {
|
||||
*notNullElems = pCtx->size - pCtx->preAggVals.statis.numOfNull;
|
||||
assert(*notNullElems >= 0);
|
||||
|
||||
|
@ -947,6 +947,8 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin,
|
|||
index = pCtx->preAggVals.statis.maxIndex;
|
||||
}
|
||||
|
||||
TSKEY key = TSKEY_INITIAL_VAL;
|
||||
if (pCtx->ptsList != NULL) {
|
||||
/**
|
||||
* NOTE: work around the bug caused by invalid pre-calculated function.
|
||||
* Here the selectivity + ts will not return correct value.
|
||||
|
@ -957,7 +959,8 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin,
|
|||
index = 0;
|
||||
}
|
||||
|
||||
TSKEY key = pCtx->ptsList[index];
|
||||
key = pCtx->ptsList[index];
|
||||
}
|
||||
|
||||
if (pCtx->inputType >= TSDB_DATA_TYPE_TINYINT && pCtx->inputType <= TSDB_DATA_TYPE_BIGINT) {
|
||||
int64_t val = GET_INT64_VAL(tval);
|
||||
|
@ -1865,12 +1868,22 @@ static void last_row_finalizer(SQLFunctionCtx *pCtx) {
|
|||
SResultInfo *pResInfo = GET_RES_INFO(pCtx);
|
||||
if (pCtx->currentStage == SECONDARY_STAGE_MERGE) {
|
||||
if (pResInfo->hasResult != DATA_SET_FLAG) {
|
||||
if (pCtx->outputType == TSDB_DATA_TYPE_BINARY || pCtx->outputType == TSDB_DATA_TYPE_NCHAR) {
|
||||
setVardataNull(pCtx->aOutputBuf, pCtx->outputType);
|
||||
} else {
|
||||
setNull(pCtx->aOutputBuf, pCtx->outputType, pCtx->outputBytes);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
if (pResInfo->hasResult != DATA_SET_FLAG) {
|
||||
if (pCtx->outputType == TSDB_DATA_TYPE_BINARY || pCtx->outputType == TSDB_DATA_TYPE_NCHAR) {
|
||||
setVardataNull(pCtx->aOutputBuf, pCtx->outputType);
|
||||
} else {
|
||||
setNull(pCtx->aOutputBuf, pCtx->outputType, pCtx->outputBytes);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -2886,7 +2899,12 @@ static void leastsquares_finalizer(SQLFunctionCtx *pCtx) {
|
|||
SLeastsquareInfo *pInfo = pResInfo->interResultBuf;
|
||||
|
||||
if (pInfo->num == 0) {
|
||||
if (pCtx->outputType == TSDB_DATA_TYPE_BINARY || pCtx->outputType == TSDB_DATA_TYPE_NCHAR) {
|
||||
setVardataNull(pCtx->aOutputBuf, pCtx->outputType);
|
||||
} else {
|
||||
setNull(pCtx->aOutputBuf, pCtx->outputType, pCtx->outputBytes);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -2913,10 +2931,6 @@ static void leastsquares_finalizer(SQLFunctionCtx *pCtx) {
|
|||
}
|
||||
|
||||
static void date_col_output_function(SQLFunctionCtx *pCtx) {
|
||||
if (pCtx->scanFlag == REVERSE_SCAN) {
|
||||
return;
|
||||
}
|
||||
|
||||
SET_VAL(pCtx, pCtx->size, 1);
|
||||
*(int64_t *)(pCtx->aOutputBuf) = pCtx->nStartQueryTimestamp;
|
||||
}
|
||||
|
@ -3081,7 +3095,7 @@ static void diff_function(SQLFunctionCtx *pCtx) {
|
|||
pOutput += 1;
|
||||
pTimestamp += 1;
|
||||
} else {
|
||||
*pOutput = pData[i] - pData[i - step];
|
||||
*pOutput = pData[i] - pCtx->param[1].i64Key; // direct previous may be null
|
||||
*pTimestamp = pCtx->ptsList[i];
|
||||
|
||||
pOutput += 1;
|
||||
|
@ -3113,7 +3127,7 @@ static void diff_function(SQLFunctionCtx *pCtx) {
|
|||
pOutput += 1;
|
||||
pTimestamp += 1;
|
||||
} else {
|
||||
*pOutput = pData[i] - pData[i - step];
|
||||
*pOutput = pData[i] - pCtx->param[1].i64Key;
|
||||
*pTimestamp = pCtx->ptsList[i];
|
||||
|
||||
pOutput += 1;
|
||||
|
@ -3144,7 +3158,7 @@ static void diff_function(SQLFunctionCtx *pCtx) {
|
|||
pOutput += 1;
|
||||
pTimestamp += 1;
|
||||
} else {
|
||||
*pOutput = pData[i] - pData[i - step];
|
||||
*pOutput = pData[i] - pCtx->param[1].dKey;
|
||||
*pTimestamp = pCtx->ptsList[i];
|
||||
pOutput += 1;
|
||||
pTimestamp += 1;
|
||||
|
@ -3175,7 +3189,7 @@ static void diff_function(SQLFunctionCtx *pCtx) {
|
|||
pOutput += 1;
|
||||
pTimestamp += 1;
|
||||
} else {
|
||||
*pOutput = pData[i] - pData[i - step];
|
||||
*pOutput = pData[i] - pCtx->param[1].dKey;
|
||||
*pTimestamp = pCtx->ptsList[i];
|
||||
|
||||
pOutput += 1;
|
||||
|
@ -3207,7 +3221,7 @@ static void diff_function(SQLFunctionCtx *pCtx) {
|
|||
pOutput += 1;
|
||||
pTimestamp += 1;
|
||||
} else {
|
||||
*pOutput = pData[i] - pData[i - step];
|
||||
*pOutput = pData[i] - pCtx->param[1].i64Key;
|
||||
*pTimestamp = pCtx->ptsList[i];
|
||||
|
||||
pOutput += 1;
|
||||
|
@ -3239,7 +3253,7 @@ static void diff_function(SQLFunctionCtx *pCtx) {
|
|||
pOutput += 1;
|
||||
pTimestamp += 1;
|
||||
} else {
|
||||
*pOutput = pData[i] - pData[i - step];
|
||||
*pOutput = pData[i] - pCtx->param[1].i64Key;
|
||||
*pTimestamp = pCtx->ptsList[i];
|
||||
|
||||
pOutput += 1;
|
||||
|
@ -3420,7 +3434,7 @@ static void spread_function(SQLFunctionCtx *pCtx) {
|
|||
|
||||
// todo : opt with pre-calculated result
|
||||
// column missing cause the hasNull to be true
|
||||
if (usePreVal(pCtx)) {
|
||||
if (pCtx->preAggVals.isSet) {
|
||||
numOfElems = pCtx->size - pCtx->preAggVals.statis.numOfNull;
|
||||
|
||||
// all data are null in current data block, ignore current data block
|
||||
|
@ -3446,14 +3460,8 @@ static void spread_function(SQLFunctionCtx *pCtx) {
|
|||
pInfo->max = GET_DOUBLE_VAL(&(pCtx->preAggVals.statis.max));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// if (pInfo->min > pCtx->param[1].dKey) {
|
||||
// pInfo->min = pCtx->param[1].dKey;
|
||||
// }
|
||||
//
|
||||
// if (pInfo->max < pCtx->param[2].dKey) {
|
||||
// pInfo->max = pCtx->param[2].dKey;
|
||||
// }
|
||||
|
||||
goto _spread_over;
|
||||
}
|
||||
|
||||
void *pData = GET_INPUT_CHAR(pCtx);
|
||||
|
@ -3873,7 +3881,11 @@ static void interp_function(SQLFunctionCtx *pCtx) {
|
|||
*(TSKEY *)pCtx->aOutputBuf = pInfoDetail->ts;
|
||||
} else {
|
||||
if (pInfoDetail->type == TSDB_FILL_NULL) {
|
||||
if (pCtx->outputType == TSDB_DATA_TYPE_BINARY || pCtx->outputType == TSDB_DATA_TYPE_NCHAR) {
|
||||
setVardataNull(pCtx->aOutputBuf, pCtx->outputType);
|
||||
} else {
|
||||
setNull(pCtx->aOutputBuf, pCtx->outputType, pCtx->outputBytes);
|
||||
}
|
||||
} else if (pInfoDetail->type == TSDB_FILL_SET_VALUE) {
|
||||
tVariantDump(&pCtx->param[1], pCtx->aOutputBuf, pCtx->inputType);
|
||||
} else if (pInfoDetail->type == TSDB_FILL_PREV) {
|
||||
|
@ -3924,11 +3936,15 @@ static void interp_function(SQLFunctionCtx *pCtx) {
|
|||
taosDoLinearInterpolation(pCtx->outputType, &point1, &point2, &point);
|
||||
}
|
||||
|
||||
} else {
|
||||
if (srcType == TSDB_DATA_TYPE_BINARY || srcType == TSDB_DATA_TYPE_NCHAR) {
|
||||
setVardataNull(pCtx->aOutputBuf, pCtx->inputBytes);
|
||||
} else {
|
||||
setNull(pCtx->aOutputBuf, srcType, pCtx->inputBytes);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
free(interpInfo.pInterpDetail);
|
||||
}
|
||||
|
|
|
@ -384,14 +384,11 @@ int tscProcessLocalCmd(SSqlObj *pSql) {
|
|||
|
||||
// keep the code in local variable in order to avoid invalid read in case of async query
|
||||
int32_t code = pSql->res.code;
|
||||
|
||||
if (pSql->fp != NULL) { // callback function
|
||||
if (code == 0) {
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
(*pSql->fp)(pSql->param, pSql, 0);
|
||||
} else {
|
||||
tscQueueAsyncRes(pSql);
|
||||
}
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -42,36 +42,43 @@ enum {
|
|||
static int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t * numOfRows);
|
||||
|
||||
static int32_t tscToInteger(SSQLToken *pToken, int64_t *value, char **endPtr) {
|
||||
// int32_t numType = isValidNumber(pToken);
|
||||
// if (TK_ILLEGAL == numType) {
|
||||
// return numType;
|
||||
// }
|
||||
if (pToken->n == 0) {
|
||||
return TK_ILLEGAL;
|
||||
}
|
||||
|
||||
int32_t radix = 10;
|
||||
if (pToken->type == TK_HEX) {
|
||||
radix = 16;
|
||||
} else if (pToken->type == TK_OCT) {
|
||||
radix = 8;
|
||||
} else if (pToken->type == TK_BIN) {
|
||||
radix = 2;
|
||||
|
||||
int32_t radixList[3] = {16, 8, 2};
|
||||
if (pToken->type == TK_HEX || pToken->type == TK_OCT || pToken->type == TK_BIN) {
|
||||
radix = radixList[pToken->type - TK_HEX];
|
||||
}
|
||||
|
||||
errno = 0;
|
||||
*value = strtoll(pToken->z, endPtr, radix);
|
||||
|
||||
// not a valid integer number, return error
|
||||
if ((pToken->type == TK_STRING || pToken->type == TK_ID) && ((*endPtr - pToken->z) != pToken->n)) {
|
||||
return TK_ILLEGAL;
|
||||
}
|
||||
|
||||
return pToken->type;
|
||||
}
|
||||
|
||||
static int32_t tscToDouble(SSQLToken *pToken, double *value, char **endPtr) {
|
||||
// int32_t numType = isValidNumber(pToken);
|
||||
// if (TK_ILLEGAL == numType) {
|
||||
// return numType;
|
||||
// }
|
||||
if (pToken->n == 0) {
|
||||
return TK_ILLEGAL;
|
||||
}
|
||||
|
||||
errno = 0;
|
||||
*value = strtod(pToken->z, endPtr);
|
||||
|
||||
// not a valid integer number, return error
|
||||
if ((pToken->type == TK_STRING || pToken->type == TK_ID) && ((*endPtr - pToken->z) != pToken->n)) {
|
||||
return TK_ILLEGAL;
|
||||
} else {
|
||||
return pToken->type;
|
||||
}
|
||||
}
|
||||
|
||||
int tsParseTime(SSQLToken *pToken, int64_t *time, char **next, char *error, int16_t timePrec) {
|
||||
int32_t index = 0;
|
||||
|
@ -305,8 +312,7 @@ int32_t tsParseOneColumnData(SSchema *pSchema, SSQLToken *pToken, char *payload,
|
|||
case TSDB_DATA_TYPE_BINARY:
|
||||
// binary data cannot be null-terminated char string, otherwise the last char of the string is lost
|
||||
if (pToken->type == TK_NULL) {
|
||||
varDataSetLen(payload, sizeof(int8_t));
|
||||
*(uint8_t*) varDataVal(payload) = TSDB_DATA_BINARY_NULL;
|
||||
setVardataNull(payload, TSDB_DATA_TYPE_BINARY);
|
||||
} else { // too long values will return invalid sql, not be truncated automatically
|
||||
if (pToken->n + VARSTR_HEADER_SIZE > pSchema->bytes) { //todo refactor
|
||||
return tscInvalidSQLErrMsg(msg, "string data overflow", pToken->z);
|
||||
|
@ -319,8 +325,7 @@ int32_t tsParseOneColumnData(SSchema *pSchema, SSQLToken *pToken, char *payload,
|
|||
|
||||
case TSDB_DATA_TYPE_NCHAR:
|
||||
if (pToken->type == TK_NULL) {
|
||||
varDataSetLen(payload, sizeof(int32_t));
|
||||
*(uint32_t*) varDataVal(payload) = TSDB_DATA_NCHAR_NULL;
|
||||
setVardataNull(payload, TSDB_DATA_TYPE_NCHAR);
|
||||
} else {
|
||||
// if the converted output len is over than pColumnModel->bytes, return error: 'Argument list too long'
|
||||
size_t output = 0;
|
||||
|
@ -422,9 +427,9 @@ int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[
|
|||
return -1;
|
||||
}
|
||||
|
||||
if (((sToken.type != TK_NOW) && (sToken.type != TK_INTEGER) && (sToken.type != TK_STRING) &&
|
||||
(sToken.type != TK_FLOAT) && (sToken.type != TK_BOOL) && (sToken.type != TK_NULL)) ||
|
||||
(sToken.n == 0) || (sToken.type == TK_RP)) {
|
||||
int16_t type = sToken.type;
|
||||
if ((type != TK_NOW && type != TK_INTEGER && type != TK_STRING && type != TK_FLOAT && type != TK_BOOL &&
|
||||
type != TK_NULL && type != TK_HEX && type != TK_OCT && type != TK_BIN) || (sToken.n == 0) || (type == TK_RP)) {
|
||||
tscInvalidSQLErrMsg(error, "invalid data or symbol", sToken.z);
|
||||
*code = TSDB_CODE_INVALID_SQL;
|
||||
return -1;
|
||||
|
@ -1306,8 +1311,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
|
|||
SQueryInfo *pQueryInfo = NULL;
|
||||
tscGetQueryInfoDetailSafely(pCmd, pCmd->clauseIndex, &pQueryInfo);
|
||||
|
||||
uint16_t type = (sToken.type == TK_INSERT)? TSDB_QUERY_TYPE_INSERT:TSDB_QUERY_TYPE_IMPORT;
|
||||
TSDB_QUERY_SET_TYPE(pQueryInfo->type, type);
|
||||
TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_INSERT);
|
||||
|
||||
sToken = tStrGetToken(pSql->sqlstr, &index, false, 0, NULL);
|
||||
if (sToken.type != TK_INTO) {
|
||||
|
|
|
@ -3290,13 +3290,14 @@ static int32_t setExprToCond(tSQLExpr** parent, tSQLExpr* pExpr, const char* msg
|
|||
|
||||
static int32_t handleExprInQueryCond(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, SCondExpr* pCondExpr, int32_t* type,
|
||||
int32_t parentOptr) {
|
||||
const char* msg1 = "meter query cannot use tags filter";
|
||||
const char* msg1 = "table query cannot use tags filter";
|
||||
const char* msg2 = "illegal column name";
|
||||
const char* msg3 = "only one query time range allowed";
|
||||
const char* msg4 = "only one join condition allowed";
|
||||
const char* msg5 = "not support ordinary column join";
|
||||
const char* msg6 = "only one query condition on tbname allowed";
|
||||
const char* msg7 = "only in/like allowed in filter table name";
|
||||
const char* msg8 = "wildcard string should be less than 20 characters";
|
||||
|
||||
tSQLExpr* pLeft = (*pExpr)->pLeft;
|
||||
tSQLExpr* pRight = (*pExpr)->pRight;
|
||||
|
@ -3344,7 +3345,7 @@ static int32_t handleExprInQueryCond(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, S
|
|||
// check for like expression
|
||||
if ((*pExpr)->nSQLOptr == TK_LIKE) {
|
||||
if (pRight->val.nLen > TSDB_PATTERN_STRING_MAX_LEN) {
|
||||
return TSDB_CODE_INVALID_SQL;
|
||||
return invalidSqlErrMsg(pQueryInfo->msg, msg8);
|
||||
}
|
||||
|
||||
SSchema* pSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta);
|
||||
|
@ -3361,6 +3362,10 @@ static int32_t handleExprInQueryCond(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, S
|
|||
return invalidSqlErrMsg(pQueryInfo->msg, msg7);
|
||||
}
|
||||
|
||||
if (!UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
|
||||
return invalidSqlErrMsg(pQueryInfo->msg, msg1);
|
||||
}
|
||||
|
||||
if (pCondExpr->pTableCond == NULL) {
|
||||
pCondExpr->pTableCond = *pExpr;
|
||||
pCondExpr->relType = parentOptr;
|
||||
|
@ -3808,9 +3813,7 @@ int32_t parseWhereClause(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, SSqlObj* pSql
|
|||
const char* msg2 = "invalid filter expression";
|
||||
|
||||
int32_t ret = TSDB_CODE_SUCCESS;
|
||||
|
||||
pQueryInfo->window.skey = 0;
|
||||
pQueryInfo->window.ekey = INT64_MAX;
|
||||
pQueryInfo->window = TSWINDOW_INITIALIZER;
|
||||
|
||||
// tags query condition may be larger than 512bytes, therefore, we need to prepare enough large space
|
||||
SStringBuilder sb; memset(&sb, 0, sizeof(sb));
|
||||
|
@ -4012,9 +4015,9 @@ int32_t parseFillClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySQL) {
|
|||
|
||||
size_t size = tscSqlExprNumOfExprs(pQueryInfo);
|
||||
|
||||
if (pQueryInfo->defaultVal == NULL) {
|
||||
pQueryInfo->defaultVal = calloc(size, sizeof(int64_t));
|
||||
if (pQueryInfo->defaultVal == NULL) {
|
||||
if (pQueryInfo->fillVal == NULL) {
|
||||
pQueryInfo->fillVal = calloc(size, sizeof(int64_t));
|
||||
if (pQueryInfo->fillVal == NULL) {
|
||||
return TSDB_CODE_CLI_OUT_OF_MEMORY;
|
||||
}
|
||||
}
|
||||
|
@ -4025,7 +4028,11 @@ int32_t parseFillClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySQL) {
|
|||
pQueryInfo->fillType = TSDB_FILL_NULL;
|
||||
for (int32_t i = START_INTERPO_COL_IDX; i < size; ++i) {
|
||||
TAOS_FIELD* pFields = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
|
||||
setNull((char*)&pQueryInfo->defaultVal[i], pFields->type, pFields->bytes);
|
||||
if (pFields->type == TSDB_DATA_TYPE_BINARY || pFields->type == TSDB_DATA_TYPE_NCHAR) {
|
||||
setVardataNull((char*) &pQueryInfo->fillVal[i], pFields->type);
|
||||
} else {
|
||||
setNull((char*)&pQueryInfo->fillVal[i], pFields->type, pFields->bytes);
|
||||
};
|
||||
}
|
||||
} else if (strncasecmp(pItem->pVar.pz, "prev", 4) == 0 && pItem->pVar.nLen == 4) {
|
||||
pQueryInfo->fillType = TSDB_FILL_PREV;
|
||||
|
@ -4058,11 +4065,11 @@ int32_t parseFillClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySQL) {
|
|||
TAOS_FIELD* pFields = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
|
||||
|
||||
if (pFields->type == TSDB_DATA_TYPE_BINARY || pFields->type == TSDB_DATA_TYPE_NCHAR) {
|
||||
setNull((char*)(&pQueryInfo->defaultVal[i]), pFields->type, pFields->bytes);
|
||||
setVardataNull((char*) &pQueryInfo->fillVal[i], pFields->type);
|
||||
continue;
|
||||
}
|
||||
|
||||
int32_t ret = tVariantDump(&pFillToken->a[j].pVar, (char*)&pQueryInfo->defaultVal[i], pFields->type);
|
||||
int32_t ret = tVariantDump(&pFillToken->a[j].pVar, (char*)&pQueryInfo->fillVal[i], pFields->type);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
return invalidSqlErrMsg(pQueryInfo->msg, msg);
|
||||
}
|
||||
|
@ -4076,9 +4083,9 @@ int32_t parseFillClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySQL) {
|
|||
TAOS_FIELD* pFields = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
|
||||
|
||||
if (pFields->type == TSDB_DATA_TYPE_BINARY || pFields->type == TSDB_DATA_TYPE_NCHAR) {
|
||||
setNull((char*)(&pQueryInfo->defaultVal[i]), pFields->type, pFields->bytes);
|
||||
setVardataNull((char*) &pQueryInfo->fillVal[i], pFields->type);
|
||||
} else {
|
||||
tVariantDump(&lastItem->pVar, (char*)&pQueryInfo->defaultVal[i], pFields->type);
|
||||
tVariantDump(&lastItem->pVar, (char*)&pQueryInfo->fillVal[i], pFields->type);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -5334,13 +5341,6 @@ int32_t doLocalQueryProcess(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) {
|
|||
}
|
||||
}
|
||||
|
||||
SColumnIndex ind = {0};
|
||||
SSqlExpr* pExpr1 = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TAG_DUMMY, &ind, TSDB_DATA_TYPE_INT,
|
||||
tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize, tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize, false);
|
||||
|
||||
const char* name = (pExprList->a[0].aliasName != NULL)? pExprList->a[0].aliasName:functionsInfo[index].name;
|
||||
strncpy(pExpr1->aliasName, name, tListLen(pExpr1->aliasName));
|
||||
|
||||
switch (index) {
|
||||
case 0:
|
||||
pQueryInfo->command = TSDB_SQL_CURRENT_DB;
|
||||
|
@ -5359,6 +5359,13 @@ int32_t doLocalQueryProcess(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
default: { return invalidSqlErrMsg(pQueryInfo->msg, msg3); }
|
||||
}
|
||||
|
||||
SColumnIndex ind = {0};
|
||||
SSqlExpr* pExpr1 = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TAG_DUMMY, &ind, TSDB_DATA_TYPE_INT,
|
||||
tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize, tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize, false);
|
||||
|
||||
const char* name = (pExprList->a[0].aliasName != NULL)? pExprList->a[0].aliasName:functionsInfo[index].name;
|
||||
strncpy(pExpr1->aliasName, name, tListLen(pExpr1->aliasName));
|
||||
}
|
||||
|
||||
// can only perform the parameters based on the macro definitation
|
||||
|
@ -5606,6 +5613,7 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) {
|
|||
const char* msg3 = "fill only available for interval query";
|
||||
const char* msg4 = "fill option not supported in stream computing";
|
||||
const char* msg5 = "sql too long"; // todo ADD support
|
||||
const char* msg6 = "from missing in subclause";
|
||||
|
||||
SSqlCmd* pCmd = &pSql->cmd;
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
|
||||
|
@ -5623,8 +5631,11 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) {
|
|||
}
|
||||
|
||||
tVariantList* pSrcMeterName = pInfo->pCreateTableInfo->pSelect->from;
|
||||
tVariant* pVar = &pSrcMeterName->a[0].pVar;
|
||||
if (pSrcMeterName == NULL || pSrcMeterName->nExpr == 0) {
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
|
||||
}
|
||||
|
||||
tVariant* pVar = &pSrcMeterName->a[0].pVar;
|
||||
SSQLToken srcToken = {.z = pVar->pz, .n = pVar->nLen, .type = TK_STRING};
|
||||
if (tscValidateName(&srcToken) != TSDB_CODE_SUCCESS) {
|
||||
return invalidSqlErrMsg(pQueryInfo->msg, msg1);
|
||||
|
|
|
@ -145,7 +145,7 @@ static SFillColInfo* createFillColInfo(SQueryInfo* pQueryInfo) {
|
|||
pFillCol[i].flag = pExpr->colInfo.flag;
|
||||
pFillCol[i].col.offset = offset;
|
||||
pFillCol[i].functionId = pExpr->functionId;
|
||||
pFillCol[i].defaultVal.i = pQueryInfo->defaultVal[i];
|
||||
pFillCol[i].fillVal.i = pQueryInfo->fillVal[i];
|
||||
offset += pExpr->resBytes;
|
||||
}
|
||||
|
||||
|
@ -946,8 +946,7 @@ static void doInterpolateResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, boo
|
|||
}
|
||||
|
||||
while (1) {
|
||||
int64_t newRows = -1;
|
||||
taosGenerateDataBlock(pFillInfo, pResPages, &newRows, pLocalReducer->resColModel->capacity);
|
||||
int64_t newRows = taosGenerateDataBlock(pFillInfo, pResPages, pLocalReducer->resColModel->capacity);
|
||||
|
||||
if (pQueryInfo->limit.offset < newRows) {
|
||||
newRows -= pQueryInfo->limit.offset;
|
||||
|
|
|
@ -781,8 +781,8 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
|||
|
||||
if (pQueryInfo->fillType != TSDB_FILL_NONE) {
|
||||
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
|
||||
*((int64_t *)pMsg) = htobe64(pQueryInfo->defaultVal[i]);
|
||||
pMsg += sizeof(pQueryInfo->defaultVal[0]);
|
||||
*((int64_t *)pMsg) = htobe64(pQueryInfo->fillVal[i]);
|
||||
pMsg += sizeof(pQueryInfo->fillVal[0]);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -223,7 +223,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
|
|||
int16_t offset = tscFieldInfoGetOffset(pQueryInfo, i);
|
||||
TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
|
||||
|
||||
assignVal(pSql->res.data + offset, (char *)(&pQueryInfo->defaultVal[i]), pField->bytes, pField->type);
|
||||
assignVal(pSql->res.data + offset, (char *)(&pQueryInfo->fillVal[i]), pField->bytes, pField->type);
|
||||
row[i] = pSql->res.data + offset;
|
||||
}
|
||||
|
||||
|
|
|
@ -61,7 +61,7 @@ TSKEY tscGetSubscriptionProgress(void* sub, int64_t uid, TSKEY dflt) {
|
|||
SSub* pSub = (SSub*)sub;
|
||||
|
||||
SSubscriptionProgress target = {.uid = uid, .key = 0};
|
||||
SSubscriptionProgress* p = taosArraySearch(pSub->progress, tscCompareSubscriptionProgress, &target);
|
||||
SSubscriptionProgress* p = taosArraySearch(pSub->progress, &target, tscCompareSubscriptionProgress);
|
||||
if (p == NULL) {
|
||||
return dflt;
|
||||
}
|
||||
|
@ -74,7 +74,7 @@ void tscUpdateSubscriptionProgress(void* sub, int64_t uid, TSKEY ts) {
|
|||
SSub* pSub = (SSub*)sub;
|
||||
|
||||
SSubscriptionProgress target = {.uid = uid, .key = ts};
|
||||
SSubscriptionProgress* p = taosArraySearch(pSub->progress, tscCompareSubscriptionProgress, &target);
|
||||
SSubscriptionProgress* p = taosArraySearch(pSub->progress, &target, tscCompareSubscriptionProgress);
|
||||
if (p != NULL) {
|
||||
p->key = ts;
|
||||
}
|
||||
|
@ -211,7 +211,7 @@ static int tscUpdateSubscription(STscObj* pObj, SSub* pSub) {
|
|||
if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) {
|
||||
STableMeta * pTableMeta = pTableMetaInfo->pTableMeta;
|
||||
SSubscriptionProgress target = {.uid = pTableMeta->uid, .key = 0};
|
||||
SSubscriptionProgress* p = taosArraySearch(pSub->progress, tscCompareSubscriptionProgress, &target);
|
||||
SSubscriptionProgress* p = taosArraySearch(pSub->progress, &target, tscCompareSubscriptionProgress);
|
||||
if (p == NULL) {
|
||||
taosArrayClear(pSub->progress);
|
||||
taosArrayPush(pSub->progress, &target);
|
||||
|
|
|
@ -281,7 +281,7 @@ void tscClearInterpInfo(SQueryInfo* pQueryInfo) {
|
|||
}
|
||||
|
||||
pQueryInfo->fillType = TSDB_FILL_NONE;
|
||||
tfree(pQueryInfo->defaultVal);
|
||||
tfree(pQueryInfo->fillVal);
|
||||
}
|
||||
|
||||
int32_t tscCreateResPointerInfo(SSqlRes* pRes, SQueryInfo* pQueryInfo) {
|
||||
|
@ -1616,7 +1616,7 @@ static void freeQueryInfoImpl(SQueryInfo* pQueryInfo) {
|
|||
|
||||
pQueryInfo->tsBuf = tsBufDestory(pQueryInfo->tsBuf);
|
||||
|
||||
tfree(pQueryInfo->defaultVal);
|
||||
tfree(pQueryInfo->fillVal);
|
||||
}
|
||||
|
||||
void tscClearSubqueryInfo(SSqlCmd* pCmd) {
|
||||
|
@ -1768,7 +1768,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
|
|||
pNewQueryInfo->order = pQueryInfo->order;
|
||||
pNewQueryInfo->clauseLimit = pQueryInfo->clauseLimit;
|
||||
pNewQueryInfo->pTableMetaInfo = NULL;
|
||||
pNewQueryInfo->defaultVal = NULL;
|
||||
pNewQueryInfo->fillVal = NULL;
|
||||
pNewQueryInfo->numOfTables = 0;
|
||||
pNewQueryInfo->tsBuf = NULL;
|
||||
|
||||
|
@ -1780,8 +1780,8 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
|
|||
tscTagCondCopy(&pNewQueryInfo->tagCond, &pQueryInfo->tagCond);
|
||||
|
||||
if (pQueryInfo->fillType != TSDB_FILL_NONE) {
|
||||
pNewQueryInfo->defaultVal = malloc(pQueryInfo->fieldsInfo.numOfOutput * sizeof(int64_t));
|
||||
memcpy(pNewQueryInfo->defaultVal, pQueryInfo->defaultVal, pQueryInfo->fieldsInfo.numOfOutput * sizeof(int64_t));
|
||||
pNewQueryInfo->fillVal = malloc(pQueryInfo->fieldsInfo.numOfOutput * sizeof(int64_t));
|
||||
memcpy(pNewQueryInfo->fillVal, pQueryInfo->fillVal, pQueryInfo->fieldsInfo.numOfOutput * sizeof(int64_t));
|
||||
}
|
||||
|
||||
if (tscAllocPayload(pnCmd, TSDB_DEFAULT_PAYLOAD_SIZE) != TSDB_CODE_SUCCESS) {
|
||||
|
|
|
@ -154,8 +154,8 @@ typedef struct SDataCol {
|
|||
static FORCE_INLINE void dataColReset(SDataCol *pDataCol) { pDataCol->len = 0; }
|
||||
|
||||
void dataColInit(SDataCol *pDataCol, STColumn *pCol, void **pBuf, int maxPoints);
|
||||
void dataColAppendVal(SDataCol *pCol, void *value, int numOfPoints, int maxPoints);
|
||||
void dataColPopPoints(SDataCol *pCol, int pointsToPop, int numOfPoints);
|
||||
void dataColAppendVal(SDataCol *pCol, void *value, int numOfRows, int maxPoints);
|
||||
void dataColPopPoints(SDataCol *pCol, int pointsToPop, int numOfRows);
|
||||
void dataColSetOffset(SDataCol *pCol, int nEle);
|
||||
|
||||
bool isNEleNull(SDataCol *pCol, int nEle);
|
||||
|
@ -195,7 +195,7 @@ typedef struct {
|
|||
int maxPoints; // max number of points
|
||||
int bufSize;
|
||||
|
||||
int numOfPoints;
|
||||
int numOfRows;
|
||||
int numOfCols; // Total number of cols
|
||||
int sversion; // TODO: set sversion
|
||||
void * buf;
|
||||
|
@ -205,7 +205,7 @@ typedef struct {
|
|||
#define keyCol(pCols) (&((pCols)->cols[0])) // Key column
|
||||
#define dataColsKeyAt(pCols, idx) ((TSKEY *)(keyCol(pCols)->pData))[(idx)]
|
||||
#define dataColsKeyFirst(pCols) dataColsKeyAt(pCols, 0)
|
||||
#define dataColsKeyLast(pCols) ((pCols->numOfPoints == 0) ? 0 : dataColsKeyAt(pCols, (pCols)->numOfPoints - 1))
|
||||
#define dataColsKeyLast(pCols) ((pCols->numOfRows == 0) ? 0 : dataColsKeyAt(pCols, (pCols)->numOfRows - 1))
|
||||
|
||||
SDataCols *tdNewDataCols(int maxRowSize, int maxCols, int maxRows);
|
||||
void tdResetDataCols(SDataCols *pCols);
|
||||
|
|
|
@ -187,29 +187,29 @@ void dataColInit(SDataCol *pDataCol, STColumn *pCol, void **pBuf, int maxPoints)
|
|||
|
||||
}
|
||||
|
||||
void dataColAppendVal(SDataCol *pCol, void *value, int numOfPoints, int maxPoints) {
|
||||
void dataColAppendVal(SDataCol *pCol, void *value, int numOfRows, int maxPoints) {
|
||||
ASSERT(pCol != NULL && value != NULL);
|
||||
|
||||
switch (pCol->type) {
|
||||
case TSDB_DATA_TYPE_BINARY:
|
||||
case TSDB_DATA_TYPE_NCHAR:
|
||||
// set offset
|
||||
pCol->dataOff[numOfPoints] = pCol->len;
|
||||
pCol->dataOff[numOfRows] = pCol->len;
|
||||
// Copy data
|
||||
memcpy(POINTER_SHIFT(pCol->pData, pCol->len), value, varDataTLen(value));
|
||||
// Update the length
|
||||
pCol->len += varDataTLen(value);
|
||||
break;
|
||||
default:
|
||||
ASSERT(pCol->len == TYPE_BYTES[pCol->type] * numOfPoints);
|
||||
ASSERT(pCol->len == TYPE_BYTES[pCol->type] * numOfRows);
|
||||
memcpy(POINTER_SHIFT(pCol->pData, pCol->len), value, pCol->bytes);
|
||||
pCol->len += pCol->bytes;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void dataColPopPoints(SDataCol *pCol, int pointsToPop, int numOfPoints) {
|
||||
int pointsLeft = numOfPoints - pointsToPop;
|
||||
void dataColPopPoints(SDataCol *pCol, int pointsToPop, int numOfRows) {
|
||||
int pointsLeft = numOfRows - pointsToPop;
|
||||
|
||||
ASSERT(pointsLeft > 0);
|
||||
|
||||
|
@ -221,7 +221,7 @@ void dataColPopPoints(SDataCol *pCol, int pointsToPop, int numOfPoints) {
|
|||
memmove(pCol->pData, POINTER_SHIFT(pCol->pData, toffset), pCol->len);
|
||||
dataColSetOffset(pCol, pointsLeft);
|
||||
} else {
|
||||
ASSERT(pCol->len == TYPE_BYTES[pCol->type] * numOfPoints);
|
||||
ASSERT(pCol->len == TYPE_BYTES[pCol->type] * numOfRows);
|
||||
pCol->len = TYPE_BYTES[pCol->type] * pointsLeft;
|
||||
memmove(pCol->pData, POINTER_SHIFT(pCol->pData, TYPE_BYTES[pCol->type] * pointsToPop), pCol->len);
|
||||
}
|
||||
|
@ -322,7 +322,7 @@ SDataCols *tdDupDataCols(SDataCols *pDataCols, bool keepData) {
|
|||
|
||||
pRet->numOfCols = pDataCols->numOfCols;
|
||||
pRet->sversion = pDataCols->sversion;
|
||||
if (keepData) pRet->numOfPoints = pDataCols->numOfPoints;
|
||||
if (keepData) pRet->numOfRows = pDataCols->numOfRows;
|
||||
|
||||
for (int i = 0; i < pDataCols->numOfCols; i++) {
|
||||
pRet->cols[i].type = pDataCols->cols[i].type;
|
||||
|
@ -352,7 +352,7 @@ SDataCols *tdDupDataCols(SDataCols *pDataCols, bool keepData) {
|
|||
}
|
||||
|
||||
void tdResetDataCols(SDataCols *pCols) {
|
||||
pCols->numOfPoints = 0;
|
||||
pCols->numOfRows = 0;
|
||||
for (int i = 0; i < pCols->maxCols; i++) {
|
||||
dataColReset(pCols->cols + i);
|
||||
}
|
||||
|
@ -365,14 +365,14 @@ void tdAppendDataRowToDataCol(SDataRow row, SDataCols *pCols) {
|
|||
SDataCol *pCol = pCols->cols + i;
|
||||
void * value = tdGetRowDataOfCol(row, pCol->type, pCol->offset);
|
||||
|
||||
dataColAppendVal(pCol, value, pCols->numOfPoints, pCols->maxPoints);
|
||||
dataColAppendVal(pCol, value, pCols->numOfRows, pCols->maxPoints);
|
||||
}
|
||||
pCols->numOfPoints++;
|
||||
pCols->numOfRows++;
|
||||
}
|
||||
|
||||
// Pop pointsToPop points from the SDataCols
|
||||
void tdPopDataColsPoints(SDataCols *pCols, int pointsToPop) {
|
||||
int pointsLeft = pCols->numOfPoints - pointsToPop;
|
||||
int pointsLeft = pCols->numOfRows - pointsToPop;
|
||||
if (pointsLeft <= 0) {
|
||||
tdResetDataCols(pCols);
|
||||
return;
|
||||
|
@ -380,14 +380,14 @@ void tdPopDataColsPoints(SDataCols *pCols, int pointsToPop) {
|
|||
|
||||
for (int iCol = 0; iCol < pCols->numOfCols; iCol++) {
|
||||
SDataCol *pCol = pCols->cols + iCol;
|
||||
dataColPopPoints(pCol, pointsToPop, pCols->numOfPoints);
|
||||
dataColPopPoints(pCol, pointsToPop, pCols->numOfRows);
|
||||
}
|
||||
pCols->numOfPoints = pointsLeft;
|
||||
pCols->numOfRows = pointsLeft;
|
||||
}
|
||||
|
||||
int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge) {
|
||||
ASSERT(rowsToMerge > 0 && rowsToMerge <= source->numOfPoints);
|
||||
ASSERT(target->numOfPoints + rowsToMerge <= target->maxPoints);
|
||||
ASSERT(rowsToMerge > 0 && rowsToMerge <= source->numOfRows);
|
||||
ASSERT(target->numOfRows + rowsToMerge <= target->maxPoints);
|
||||
ASSERT(target->numOfCols == source->numOfCols);
|
||||
|
||||
SDataCols *pTarget = NULL;
|
||||
|
@ -395,10 +395,10 @@ int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge) {
|
|||
if (dataColsKeyLast(target) < dataColsKeyFirst(source)) { // No overlap
|
||||
for (int i = 0; i < rowsToMerge; i++) {
|
||||
for (int j = 0; j < source->numOfCols; j++) {
|
||||
dataColAppendVal(target->cols + j, tdGetColDataOfRow(source->cols + j, i), target->numOfPoints,
|
||||
dataColAppendVal(target->cols + j, tdGetColDataOfRow(source->cols + j, i), target->numOfRows,
|
||||
target->maxPoints);
|
||||
}
|
||||
target->numOfPoints++;
|
||||
target->numOfRows++;
|
||||
}
|
||||
} else {
|
||||
pTarget = tdDupDataCols(target, true);
|
||||
|
@ -406,7 +406,7 @@ int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge) {
|
|||
|
||||
int iter1 = 0;
|
||||
int iter2 = 0;
|
||||
tdMergeTwoDataCols(target, pTarget, &iter1, source, &iter2, pTarget->numOfPoints + rowsToMerge);
|
||||
tdMergeTwoDataCols(target, pTarget, &iter1, source, &iter2, pTarget->numOfRows + rowsToMerge);
|
||||
}
|
||||
|
||||
tdFreeDataCols(pTarget);
|
||||
|
@ -421,30 +421,30 @@ void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, SDataCol
|
|||
// TODO: add resolve duplicate key here
|
||||
tdResetDataCols(target);
|
||||
|
||||
while (target->numOfPoints < tRows) {
|
||||
if (*iter1 >= src1->numOfPoints && *iter2 >= src2->numOfPoints) break;
|
||||
while (target->numOfRows < tRows) {
|
||||
if (*iter1 >= src1->numOfRows && *iter2 >= src2->numOfRows) break;
|
||||
|
||||
TSKEY key1 = (*iter1 >= src1->numOfPoints) ? INT64_MAX : ((TSKEY *)(src1->cols[0].pData))[*iter1];
|
||||
TSKEY key2 = (*iter2 >= src2->numOfPoints) ? INT64_MAX : ((TSKEY *)(src2->cols[0].pData))[*iter2];
|
||||
TSKEY key1 = (*iter1 >= src1->numOfRows) ? INT64_MAX : ((TSKEY *)(src1->cols[0].pData))[*iter1];
|
||||
TSKEY key2 = (*iter2 >= src2->numOfRows) ? INT64_MAX : ((TSKEY *)(src2->cols[0].pData))[*iter2];
|
||||
|
||||
if (key1 <= key2) {
|
||||
for (int i = 0; i < src1->numOfCols; i++) {
|
||||
ASSERT(target->cols[i].type == src1->cols[i].type);
|
||||
dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src1->cols + i, *iter1), target->numOfPoints,
|
||||
dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src1->cols + i, *iter1), target->numOfRows,
|
||||
target->maxPoints);
|
||||
}
|
||||
|
||||
target->numOfPoints++;
|
||||
target->numOfRows++;
|
||||
(*iter1)++;
|
||||
if (key1 == key2) (*iter2)++;
|
||||
} else {
|
||||
for (int i = 0; i < src2->numOfCols; i++) {
|
||||
ASSERT(target->cols[i].type == src2->cols[i].type);
|
||||
dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src2->cols + i, *iter2), target->numOfPoints,
|
||||
dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src2->cols + i, *iter2), target->numOfRows,
|
||||
target->maxPoints);
|
||||
}
|
||||
|
||||
target->numOfPoints++;
|
||||
target->numOfRows++;
|
||||
(*iter2)++;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -142,7 +142,7 @@ int32_t rpcDebugFlag = 135;
|
|||
int32_t uDebugFlag = 131;
|
||||
int32_t debugFlag = 131;
|
||||
int32_t sDebugFlag = 135;
|
||||
int32_t tsdbDebugFlag = 131;
|
||||
int32_t tsdbDebugFlag = 135;
|
||||
|
||||
// the maximum number of results for projection query on super table that are returned from
|
||||
// one virtual node, to order according to timestamp
|
||||
|
@ -202,6 +202,8 @@ char tsTimezone[64] = {0};
|
|||
char tsLocale[TSDB_LOCALE_LEN] = {0};
|
||||
char tsCharset[TSDB_LOCALE_LEN] = {0}; // default encode string
|
||||
|
||||
int32_t tsMaxBinaryDisplayWidth = 30;
|
||||
|
||||
static pthread_once_t tsInitGlobalCfgOnce = PTHREAD_ONCE_INIT;
|
||||
|
||||
void taosSetAllDebugFlag() {
|
||||
|
@ -1227,6 +1229,16 @@ static void doInitGlobalConfig() {
|
|||
cfg.ptrLength = 0;
|
||||
cfg.unitType = TAOS_CFG_UTYPE_NONE;
|
||||
taosInitConfigOption(cfg);
|
||||
|
||||
cfg.option = "maxBinaryDisplayWidth";
|
||||
cfg.ptr = &tsMaxBinaryDisplayWidth;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
||||
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT;
|
||||
cfg.minValue = 1;
|
||||
cfg.maxValue = 0x7fffffff;
|
||||
cfg.ptrLength = 0;
|
||||
cfg.unitType = TAOS_CFG_UTYPE_NONE;
|
||||
taosInitConfigOption(cfg);
|
||||
}
|
||||
|
||||
void taosInitGlobalCfg() {
|
||||
|
|
|
@ -42,9 +42,6 @@ static void getStatics_i8(const TSKEY *primaryKey, const void *pData, int32_t nu
|
|||
|
||||
ASSERT(numOfRow <= INT16_MAX);
|
||||
|
||||
// int64_t lastKey = 0;
|
||||
// int8_t lastVal = TSDB_DATA_TINYINT_NULL;
|
||||
|
||||
for (int32_t i = 0; i < numOfRow; ++i) {
|
||||
if (isNull((char *)&data[i], TSDB_DATA_TYPE_TINYINT)) {
|
||||
(*numOfNull) += 1;
|
||||
|
@ -213,15 +210,6 @@ static void getStatics_f(const TSKEY *primaryKey, const void *pData, int32_t num
|
|||
fmax = fv;
|
||||
*maxIndex = i;
|
||||
}
|
||||
|
||||
// if (isNull(&lastVal, TSDB_DATA_TYPE_FLOAT)) {
|
||||
// lastKey = primaryKey[i];
|
||||
// lastVal = data[i];
|
||||
// } else {
|
||||
// *wsum = lastVal * (primaryKey[i] - lastKey);
|
||||
// lastKey = primaryKey[i];
|
||||
// lastVal = data[i];
|
||||
// }
|
||||
}
|
||||
|
||||
double csum = 0;
|
||||
|
@ -232,9 +220,9 @@ static void getStatics_f(const TSKEY *primaryKey, const void *pData, int32_t num
|
|||
SET_DOUBLE_VAL_ALIGN(max, &fmax);
|
||||
SET_DOUBLE_VAL_ALIGN(min, &fmin);
|
||||
#else
|
||||
*sum = csum;
|
||||
*max = fmax;
|
||||
*min = fmin;
|
||||
*(double*)sum = csum;
|
||||
*(double*)max = fmax;
|
||||
*(double*)min = fmin;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -267,15 +255,6 @@ static void getStatics_d(const TSKEY *primaryKey, const void *pData, int32_t num
|
|||
dmax = dv;
|
||||
*maxIndex = i;
|
||||
}
|
||||
|
||||
// if (isNull(&lastVal, TSDB_DATA_TYPE_DOUBLE)) {
|
||||
// lastKey = primaryKey[i];
|
||||
// lastVal = data[i];
|
||||
// } else {
|
||||
// *wsum = lastVal * (primaryKey[i] - lastKey);
|
||||
// lastKey = primaryKey[i];
|
||||
// lastVal = data[i];
|
||||
// }
|
||||
}
|
||||
|
||||
double csum = 0;
|
||||
|
@ -288,24 +267,64 @@ static void getStatics_d(const TSKEY *primaryKey, const void *pData, int32_t num
|
|||
SET_DOUBLE_VAL_ALIGN(max, &dmax);
|
||||
SET_DOUBLE_VAL_ALIGN(min, &dmin);
|
||||
#else
|
||||
*sum = csum;
|
||||
*max = dmax;
|
||||
*min = dmin;
|
||||
*(double*) sum = csum;
|
||||
*(double*) max = dmax;
|
||||
*(double*) min = dmin;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void getStatics_bin(const TSKEY *primaryKey, const void *pData, int32_t numOfRow, int64_t *min, int64_t *max,
|
||||
int64_t *sum, int16_t *minIndex, int16_t *maxIndex, int16_t *numOfNull) {
|
||||
const char* data = pData;
|
||||
ASSERT(numOfRow <= INT16_MAX);
|
||||
|
||||
for (int32_t i = 0; i < numOfRow; ++i) {
|
||||
if (isNull((const char*) varDataVal(data), TSDB_DATA_TYPE_BINARY)) {
|
||||
(*numOfNull) += 1;
|
||||
}
|
||||
|
||||
data += varDataLen(data);
|
||||
}
|
||||
|
||||
*sum = 0;
|
||||
*max = 0;
|
||||
*min = 0;
|
||||
*minIndex = 0;
|
||||
*maxIndex = 0;
|
||||
}
|
||||
|
||||
static void getStatics_nchr(const TSKEY *primaryKey, const void *pData, int32_t numOfRow, int64_t *min, int64_t *max,
|
||||
int64_t *sum, int16_t *minIndex, int16_t *maxIndex, int16_t *numOfNull) {
|
||||
const char* data = pData;
|
||||
ASSERT(numOfRow <= INT16_MAX);
|
||||
|
||||
for (int32_t i = 0; i < numOfRow; ++i) {
|
||||
if (isNull((const char*) varDataVal(data), TSDB_DATA_TYPE_NCHAR)) {
|
||||
(*numOfNull) += 1;
|
||||
}
|
||||
|
||||
data += varDataLen(data);
|
||||
}
|
||||
|
||||
*sum = 0;
|
||||
*max = 0;
|
||||
*min = 0;
|
||||
*minIndex = 0;
|
||||
*maxIndex = 0;
|
||||
}
|
||||
|
||||
tDataTypeDescriptor tDataTypeDesc[11] = {
|
||||
{TSDB_DATA_TYPE_NULL, 6, 1, "NOTYPE", NULL, NULL, NULL},
|
||||
{TSDB_DATA_TYPE_BOOL, 4, CHAR_BYTES, "BOOL", tsCompressBool, tsDecompressBool, NULL},
|
||||
{TSDB_DATA_TYPE_BOOL, 4, CHAR_BYTES, "BOOL", tsCompressBool, tsDecompressBool, getStatics_i8},
|
||||
{TSDB_DATA_TYPE_TINYINT, 7, CHAR_BYTES, "TINYINT", tsCompressTinyint, tsDecompressTinyint, getStatics_i8},
|
||||
{TSDB_DATA_TYPE_SMALLINT, 8, SHORT_BYTES, "SMALLINT", tsCompressSmallint, tsDecompressSmallint, getStatics_i16},
|
||||
{TSDB_DATA_TYPE_INT, 3, INT_BYTES, "INT", tsCompressInt, tsDecompressInt, getStatics_i32},
|
||||
{TSDB_DATA_TYPE_BIGINT, 6, LONG_BYTES, "BIGINT", tsCompressBigint, tsDecompressBigint, getStatics_i64},
|
||||
{TSDB_DATA_TYPE_FLOAT, 5, FLOAT_BYTES, "FLOAT", tsCompressFloat, tsDecompressFloat, getStatics_f},
|
||||
{TSDB_DATA_TYPE_DOUBLE, 6, DOUBLE_BYTES, "DOUBLE", tsCompressDouble, tsDecompressDouble, getStatics_d},
|
||||
{TSDB_DATA_TYPE_BINARY, 6, 0, "BINARY", tsCompressString, tsDecompressString, NULL},
|
||||
{TSDB_DATA_TYPE_BINARY, 6, 0, "BINARY", tsCompressString, tsDecompressString, getStatics_bin},
|
||||
{TSDB_DATA_TYPE_TIMESTAMP, 9, LONG_BYTES, "TIMESTAMP", tsCompressTimestamp, tsDecompressTimestamp, getStatics_i64},
|
||||
{TSDB_DATA_TYPE_NCHAR, 5, 8, "NCHAR", tsCompressString, tsDecompressString, NULL},
|
||||
{TSDB_DATA_TYPE_NCHAR, 5, 8, "NCHAR", tsCompressString, tsDecompressString, getStatics_nchr},
|
||||
};
|
||||
|
||||
char tTokenTypeSwitcher[13] = {
|
||||
|
@ -362,6 +381,18 @@ bool isNull(const char *val, int32_t type) {
|
|||
};
|
||||
}
|
||||
|
||||
void setVardataNull(char* val, int32_t type) {
|
||||
if (type == TSDB_DATA_TYPE_BINARY) {
|
||||
varDataSetLen(val, sizeof(int8_t));
|
||||
*(uint8_t*) varDataVal(val) = TSDB_DATA_BINARY_NULL;
|
||||
} else if (type == TSDB_DATA_TYPE_NCHAR) {
|
||||
varDataSetLen(val, sizeof(int32_t));
|
||||
*(uint32_t*) varDataVal(val) = TSDB_DATA_NCHAR_NULL;
|
||||
} else {
|
||||
assert(0);
|
||||
}
|
||||
}
|
||||
|
||||
void setNull(char *val, int32_t type, int32_t bytes) { setNullN(val, type, bytes, 1); }
|
||||
|
||||
void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems) {
|
||||
|
@ -464,7 +495,7 @@ void assignVal(char *val, const char *src, int32_t len, int32_t type) {
|
|||
break;
|
||||
};
|
||||
case TSDB_DATA_TYPE_NCHAR: {
|
||||
wcsncpy((wchar_t*)val, (wchar_t*)src, len / TSDB_NCHAR_SIZE);
|
||||
varDataCopy(val, src);
|
||||
break;
|
||||
};
|
||||
default: {
|
||||
|
|
|
@ -188,7 +188,7 @@ public class TSDBDriver implements java.sql.Driver {
|
|||
}
|
||||
|
||||
public boolean acceptsURL(String url) throws SQLException {
|
||||
return true;
|
||||
return StringUtils.isNotBlank(url) && url.startsWith(URL_PREFIX);
|
||||
}
|
||||
|
||||
public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) throws SQLException {
|
||||
|
|
|
@ -157,6 +157,7 @@ extern tDataTypeDescriptor tDataTypeDesc[11];
|
|||
bool isValidDataType(int32_t type, int32_t length);
|
||||
bool isNull(const char *val, int32_t type);
|
||||
|
||||
void setVardataNull(char* val, int32_t type);
|
||||
void setNull(char *val, int32_t type, int32_t bytes);
|
||||
void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems);
|
||||
|
||||
|
@ -326,8 +327,7 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
|
|||
|
||||
#define TSDB_QUERY_TYPE_TAG_FILTER_QUERY 0x400u
|
||||
#define TSDB_QUERY_TYPE_INSERT 0x100u // insert type
|
||||
#define TSDB_QUERY_TYPE_IMPORT 0x200u // import data
|
||||
#define TSDB_QUERY_TYPE_MULTITABLE_QUERY 0x800u
|
||||
#define TSDB_QUERY_TYPE_MULTITABLE_QUERY 0x200u
|
||||
|
||||
#define TSDB_QUERY_HAS_TYPE(x, _type) (((x) & (_type)) != 0)
|
||||
#define TSDB_QUERY_SET_TYPE(x, _type) ((x) |= (_type))
|
||||
|
|
|
@ -443,7 +443,7 @@ typedef struct {
|
|||
int16_t numOfOutput; // final output columns numbers
|
||||
int16_t tagNameRelType; // relation of tag criteria and tbname criteria
|
||||
int16_t fillType; // interpolate type
|
||||
uint64_t defaultVal; // default value array list
|
||||
uint64_t fillVal; // default value array list
|
||||
int32_t tsOffset; // offset value in current msg body, NOTE: ts list is compressed
|
||||
int32_t tsLen; // total length of ts comp block
|
||||
int32_t tsNumOfBlocks; // ts comp block numbers
|
||||
|
|
|
@ -29,18 +29,6 @@
|
|||
#define MAX_COMMAND_SIZE 65536
|
||||
#define HISTORY_FILE ".taos_history"
|
||||
|
||||
#define BOOL_OUTPUT_LENGTH 6
|
||||
#define TINYINT_OUTPUT_LENGTH 6
|
||||
#define SMALLINT_OUTPUT_LENGTH 7
|
||||
#define INT_OUTPUT_LENGTH 11
|
||||
#define BIGINT_OUTPUT_LENGTH 21
|
||||
#define FLOAT_OUTPUT_LENGTH 20
|
||||
#define DOUBLE_OUTPUT_LENGTH 25
|
||||
#define BINARY_OUTPUT_LENGTH 20
|
||||
|
||||
// dynamic config timestamp width according to maximum time precision
|
||||
extern int32_t TIMESTAMP_OUTPUT_LENGTH;
|
||||
|
||||
typedef struct SShellHistory {
|
||||
char* hist[MAX_HISTORY_SIZE];
|
||||
int hstart;
|
||||
|
@ -80,7 +68,7 @@ void get_history_path(char* history);
|
|||
void cleanup_handler(void* arg);
|
||||
void exitShell();
|
||||
int shellDumpResult(TAOS* con, char* fname, int* error_no, bool printMode);
|
||||
void shellPrintNChar(char* str, int width, bool printMode);
|
||||
void shellPrintNChar(const char* str, int length, int width);
|
||||
void shellGetGrantInfo(void *con);
|
||||
int isCommentLine(char *line);
|
||||
|
||||
|
|
|
@ -352,36 +352,30 @@ void *shellLoopQuery(void *arg) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
void shellPrintNChar(char *str, int width, bool printMode) {
|
||||
int col_left = width;
|
||||
void shellPrintNChar(const char *str, int length, int width) {
|
||||
int pos = 0, cols = 0;
|
||||
while (pos < length) {
|
||||
wchar_t wc;
|
||||
while (col_left > 0) {
|
||||
if (*str == '\0') break;
|
||||
char *tstr = str;
|
||||
int byte_width = mbtowc(&wc, tstr, MB_CUR_MAX);
|
||||
if (byte_width <= 0) break;
|
||||
int col_width = wcwidth(wc);
|
||||
if (col_width <= 0) {
|
||||
str += byte_width;
|
||||
continue;
|
||||
pos += mbtowc(&wc, str + pos, MB_CUR_MAX);
|
||||
if (pos > length) {
|
||||
break;
|
||||
}
|
||||
|
||||
int w = wcwidth(wc);
|
||||
if (w > 0) {
|
||||
if (width > 0 && cols + w > width) {
|
||||
break;
|
||||
}
|
||||
if (col_left < col_width) break;
|
||||
printf("%lc", wc);
|
||||
str += byte_width;
|
||||
col_left -= col_width;
|
||||
cols += w;
|
||||
}
|
||||
}
|
||||
|
||||
while (col_left > 0) {
|
||||
printf(" ");
|
||||
col_left--;
|
||||
for (; cols < width; cols++) {
|
||||
putchar(' ');
|
||||
}
|
||||
}
|
||||
|
||||
if (!printMode) {
|
||||
printf("|");
|
||||
} else {
|
||||
printf("\n");
|
||||
}
|
||||
}
|
||||
|
||||
int get_old_terminal_mode(struct termios *tio) {
|
||||
/* Make sure stdin is a terminal. */
|
||||
|
|
|
@ -35,6 +35,9 @@ int prompt_size = 6;
|
|||
TAOS_RES *result = NULL;
|
||||
SShellHistory history;
|
||||
|
||||
#define DEFAULT_MAX_BINARY_DISPLAY_WIDTH 30
|
||||
extern int32_t tsMaxBinaryDisplayWidth;
|
||||
|
||||
/*
|
||||
* FUNCTION: Initialize the shell.
|
||||
*/
|
||||
|
@ -195,7 +198,15 @@ int32_t shellRunCommand(TAOS *con, char *command) {
|
|||
} else if (regex_match(command, "^[\t ]*clear[ \t;]*$", REG_EXTENDED | REG_ICASE)) {
|
||||
// If clear the screen.
|
||||
system("clear");
|
||||
return 0;
|
||||
} else if (regex_match(command, "^[\t ]*set[ \t]+max_binary_display_width[ \t]+(default|[1-9][0-9]*)[ \t;]*$", REG_EXTENDED | REG_ICASE)) {
|
||||
strtok(command, " \t");
|
||||
strtok(NULL, " \t");
|
||||
char* p = strtok(NULL, " \t");
|
||||
if (strcasecmp(p, "default") == 0) {
|
||||
tsMaxBinaryDisplayWidth = DEFAULT_MAX_BINARY_DISPLAY_WIDTH;
|
||||
} else {
|
||||
tsMaxBinaryDisplayWidth = atoi(p);
|
||||
}
|
||||
} else if (regex_match(command, "^[ \t]*source[\t ]+[^ ]+[ \t;]*$", REG_EXTENDED | REG_ICASE)) {
|
||||
/* If source file. */
|
||||
char *c_ptr = strtok(command, " ;");
|
||||
|
@ -310,32 +321,91 @@ int regex_match(const char *s, const char *reg, int cflags) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int shellDumpResult(TAOS *con, char *fname, int *error_no, bool printMode) {
|
||||
TAOS_ROW row = NULL;
|
||||
int numOfRows = 0;
|
||||
time_t tt;
|
||||
char buf[25] = "\0";
|
||||
struct tm *ptm;
|
||||
int output_bytes = 0;
|
||||
FILE * fp = NULL;
|
||||
int num_fields = taos_field_count(con);
|
||||
wordexp_t full_path;
|
||||
|
||||
assert(num_fields != 0);
|
||||
|
||||
result = taos_use_result(con);
|
||||
if (result == NULL) {
|
||||
taos_error(con);
|
||||
return -1;
|
||||
static char* formatTimestamp(char* buf, int64_t val, int precision) {
|
||||
if (args.is_raw_time) {
|
||||
sprintf(buf, "%" PRId64, val);
|
||||
return buf;
|
||||
}
|
||||
|
||||
if (fname != NULL) {
|
||||
time_t tt;
|
||||
if (precision == TSDB_TIME_PRECISION_MICRO) {
|
||||
tt = (time_t)(val / 1000000);
|
||||
} else {
|
||||
tt = (time_t)(val / 1000);
|
||||
}
|
||||
|
||||
struct tm* ptm = localtime(&tt);
|
||||
size_t pos = strftime(buf, 32, "%Y-%m-%d %H:%M:%S", ptm);
|
||||
|
||||
if (precision == TSDB_TIME_PRECISION_MICRO) {
|
||||
sprintf(buf + pos, ".%06d", (int)(val % 1000000));
|
||||
} else {
|
||||
sprintf(buf + pos, ".%03d", (int)(val % 1000));
|
||||
}
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
||||
|
||||
static void dumpFieldToFile(FILE* fp, const char* val, TAOS_FIELD* field, int32_t length, int precision) {
|
||||
if (val == NULL) {
|
||||
fprintf(fp, "%s", TSDB_DATA_NULL_STR);
|
||||
return;
|
||||
}
|
||||
|
||||
char buf[TSDB_MAX_BYTES_PER_ROW];
|
||||
switch (field->type) {
|
||||
case TSDB_DATA_TYPE_BOOL:
|
||||
fprintf(fp, "%d", ((((int)(*((char *)val))) == 1) ? 1 : 0));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_TINYINT:
|
||||
fprintf(fp, "%d", (int)(*((char *)val)));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_SMALLINT:
|
||||
fprintf(fp, "%d", (int)(*((short *)val)));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_INT:
|
||||
fprintf(fp, "%d", *((int *)val));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_BIGINT:
|
||||
fprintf(fp, "%" PRId64, *((int64_t *)val));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_FLOAT:
|
||||
fprintf(fp, "%.5f", GET_FLOAT_VAL(val));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_DOUBLE:
|
||||
fprintf(fp, "%.9f", GET_DOUBLE_VAL(val));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_BINARY:
|
||||
case TSDB_DATA_TYPE_NCHAR:
|
||||
memcpy(buf, val, length);
|
||||
buf[length] = 0;
|
||||
fprintf(fp, "\'%s\'", buf);
|
||||
break;
|
||||
case TSDB_DATA_TYPE_TIMESTAMP:
|
||||
formatTimestamp(buf, *(int64_t*)val, precision);
|
||||
fprintf(fp, "'%s'", buf);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int dumpResultToFile(const char* fname, TAOS_RES* result) {
|
||||
TAOS_ROW row = taos_fetch_row(result);
|
||||
if (row == NULL) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
wordexp_t full_path;
|
||||
|
||||
if (wordexp(fname, &full_path, 0) != 0) {
|
||||
fprintf(stderr, "ERROR: invalid file name: %s\n", fname);
|
||||
return -1;
|
||||
}
|
||||
|
||||
fp = fopen(full_path.we_wordv[0], "w");
|
||||
FILE* fp = fopen(full_path.we_wordv[0], "w");
|
||||
if (fp == NULL) {
|
||||
fprintf(stderr, "ERROR: failed to open file: %s\n", full_path.we_wordv[0]);
|
||||
wordfree(&full_path);
|
||||
|
@ -343,327 +413,255 @@ int shellDumpResult(TAOS *con, char *fname, int *error_no, bool printMode) {
|
|||
}
|
||||
|
||||
wordfree(&full_path);
|
||||
|
||||
int num_fields = taos_num_fields(result);
|
||||
TAOS_FIELD *fields = taos_fetch_fields(result);
|
||||
int32_t* length = taos_fetch_lengths(result);
|
||||
int precision = taos_result_precision(result);
|
||||
|
||||
for (int col = 0; col < num_fields; col++) {
|
||||
if (col > 0) {
|
||||
fprintf(fp, ",");
|
||||
}
|
||||
fprintf(fp, "%s", fields[col].name);
|
||||
}
|
||||
fputc('\n', fp);
|
||||
|
||||
int numOfRows = 0;
|
||||
do {
|
||||
for (int i = 0; i < num_fields; i++) {
|
||||
if (i > 0) {
|
||||
fputc(',', fp);
|
||||
}
|
||||
dumpFieldToFile(fp, row[i], fields +i, length[i], precision);
|
||||
}
|
||||
fputc('\n', fp);
|
||||
|
||||
numOfRows++;
|
||||
row = taos_fetch_row(result);
|
||||
} while( row != NULL);
|
||||
|
||||
fclose(fp);
|
||||
return numOfRows;
|
||||
}
|
||||
|
||||
TAOS_FIELD *fields = taos_fetch_fields(result);
|
||||
|
||||
row = taos_fetch_row(result);
|
||||
int32_t* length = taos_fetch_lengths(result);
|
||||
static void printField(const char* val, TAOS_FIELD* field, int width, int32_t length, int precision) {
|
||||
if (val == NULL) {
|
||||
int w = width;
|
||||
if (field->type < TSDB_DATA_TYPE_TINYINT || field->type > TSDB_DATA_TYPE_DOUBLE) {
|
||||
w = 0;
|
||||
}
|
||||
w = printf("%*s", w, TSDB_DATA_NULL_STR);
|
||||
for (; w < width; w++) {
|
||||
putchar(' ');
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
char t_str[TSDB_MAX_BYTES_PER_ROW] = "\0";
|
||||
int l[TSDB_MAX_COLUMNS] = {0};
|
||||
int maxLenColumnName = 0;
|
||||
|
||||
if (row) {
|
||||
// Print the header indicator
|
||||
if (fname == NULL) { // print to standard output
|
||||
if (!printMode) {
|
||||
for (int col = 0; col < num_fields; col++) {
|
||||
switch (fields[col].type) {
|
||||
char buf[TSDB_MAX_BYTES_PER_ROW];
|
||||
switch (field->type) {
|
||||
case TSDB_DATA_TYPE_BOOL:
|
||||
l[col] = MAX(BOOL_OUTPUT_LENGTH, strlen(fields[col].name));
|
||||
printf("%*s", width, ((((int)(*((char *)val))) == 1) ? "true" : "false"));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_TINYINT:
|
||||
l[col] = MAX(TINYINT_OUTPUT_LENGTH, strlen(fields[col].name));
|
||||
printf("%*d", width, (int)(*((char *)val)));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_SMALLINT:
|
||||
l[col] = MAX(SMALLINT_OUTPUT_LENGTH, strlen(fields[col].name));
|
||||
printf("%*d", width, (int)(*((short *)val)));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_INT:
|
||||
l[col] = MAX(INT_OUTPUT_LENGTH, strlen(fields[col].name));
|
||||
printf("%*d", width, *((int *)val));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_BIGINT:
|
||||
l[col] = MAX(BIGINT_OUTPUT_LENGTH, strlen(fields[col].name));
|
||||
printf("%*" PRId64, width, *((int64_t *)val));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_FLOAT:
|
||||
l[col] = MAX(FLOAT_OUTPUT_LENGTH, strlen(fields[col].name));
|
||||
printf("%*.5f", width, GET_FLOAT_VAL(val));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_DOUBLE:
|
||||
l[col] = MAX(DOUBLE_OUTPUT_LENGTH, strlen(fields[col].name));
|
||||
printf("%*.9f", width, GET_DOUBLE_VAL(val));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_BINARY:
|
||||
case TSDB_DATA_TYPE_NCHAR:
|
||||
l[col] = MAX(fields[col].bytes, strlen(fields[col].name));
|
||||
/* l[col] = max(BINARY_OUTPUT_LENGTH, strlen(fields[col].name)); */
|
||||
break;
|
||||
case TSDB_DATA_TYPE_TIMESTAMP: {
|
||||
int32_t defaultWidth = TIMESTAMP_OUTPUT_LENGTH;
|
||||
if (args.is_raw_time) {
|
||||
defaultWidth = 14;
|
||||
}
|
||||
if (taos_result_precision(result) == TSDB_TIME_PRECISION_MICRO) {
|
||||
defaultWidth += 3;
|
||||
}
|
||||
l[col] = MAX(defaultWidth, strlen(fields[col].name));
|
||||
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
int spaces = (int)(l[col] - strlen(fields[col].name));
|
||||
int left_space = spaces / 2;
|
||||
int right_space = (spaces % 2 ? left_space + 1 : left_space);
|
||||
printf("%*.s%s%*.s|", left_space, " ", fields[col].name, right_space, " ");
|
||||
output_bytes += (l[col] + 1);
|
||||
}
|
||||
printf("\n");
|
||||
for (int k = 0; k < output_bytes; k++) printf("=");
|
||||
printf("\n");
|
||||
} else {
|
||||
for (int col = 0; col < num_fields; col++) {
|
||||
if (strlen(fields[col].name) > maxLenColumnName) maxLenColumnName = strlen(fields[col].name);
|
||||
}
|
||||
}
|
||||
|
||||
// print the elements
|
||||
do {
|
||||
if (!printMode) {
|
||||
for (int i = 0; i < num_fields; i++) {
|
||||
if (row[i] == NULL) {
|
||||
printf("%*s|", l[i], TSDB_DATA_NULL_STR);
|
||||
continue;
|
||||
}
|
||||
|
||||
switch (fields[i].type) {
|
||||
case TSDB_DATA_TYPE_BOOL:
|
||||
printf("%*s|", l[i], ((((int)(*((char *)row[i]))) == 1) ? "true" : "false"));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_TINYINT:
|
||||
printf("%*d|", l[i], (int)(*((char *)row[i])));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_SMALLINT:
|
||||
printf("%*d|", l[i], (int)(*((short *)row[i])));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_INT:
|
||||
printf("%*d|", l[i], *((int *)row[i]));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_BIGINT:
|
||||
printf("%*" PRId64 "|", l[i], *((int64_t *)row[i]));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_FLOAT: {
|
||||
float fv = 0;
|
||||
fv = GET_FLOAT_VAL(row[i]);
|
||||
printf("%*.5f|", l[i], fv);
|
||||
}
|
||||
break;
|
||||
case TSDB_DATA_TYPE_DOUBLE: {
|
||||
double dv = 0;
|
||||
dv = GET_DOUBLE_VAL(row[i]);
|
||||
printf("%*.9f|", l[i], dv);
|
||||
}
|
||||
break;
|
||||
case TSDB_DATA_TYPE_BINARY:
|
||||
case TSDB_DATA_TYPE_NCHAR:
|
||||
memset(t_str, 0, TSDB_MAX_BYTES_PER_ROW);
|
||||
memcpy(t_str, row[i], length[i]);
|
||||
/* printf("%-*s|",max(fields[i].bytes, strlen(fields[i].name)),
|
||||
* t_str); */
|
||||
/* printf("%-*s|", l[i], t_str); */
|
||||
shellPrintNChar(t_str, l[i], printMode);
|
||||
shellPrintNChar(val, length, width);
|
||||
break;
|
||||
case TSDB_DATA_TYPE_TIMESTAMP:
|
||||
if (args.is_raw_time) {
|
||||
printf(" %" PRId64 "|", *(int64_t *)row[i]);
|
||||
} else {
|
||||
if (taos_result_precision(result) == TSDB_TIME_PRECISION_MICRO) {
|
||||
tt = (time_t)((*(int64_t *)row[i]) / 1000000);
|
||||
} else {
|
||||
tt = (time_t)((*(int64_t *)row[i]) / 1000);
|
||||
}
|
||||
|
||||
ptm = localtime(&tt);
|
||||
strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm);
|
||||
|
||||
if (taos_result_precision(result) == TSDB_TIME_PRECISION_MICRO) {
|
||||
printf(" %s.%06d|", buf, (int)(*(int64_t *)row[i] % 1000000));
|
||||
} else {
|
||||
printf(" %s.%03d|", buf, (int)(*(int64_t *)row[i] % 1000));
|
||||
}
|
||||
}
|
||||
formatTimestamp(buf, *(int64_t*)val, precision);
|
||||
printf("%s", buf);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
printf("\n");
|
||||
} else {
|
||||
|
||||
|
||||
static int verticalPrintResult(TAOS_RES* result) {
|
||||
TAOS_ROW row = taos_fetch_row(result);
|
||||
if (row == NULL) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int num_fields = taos_num_fields(result);
|
||||
TAOS_FIELD *fields = taos_fetch_fields(result);
|
||||
int32_t* length = taos_fetch_lengths(result);
|
||||
int precision = taos_result_precision(result);
|
||||
|
||||
int maxColNameLen = 0;
|
||||
for (int col = 0; col < num_fields; col++) {
|
||||
int len = strlen(fields[col].name);
|
||||
if (len > maxColNameLen) {
|
||||
maxColNameLen = len;
|
||||
}
|
||||
}
|
||||
|
||||
int numOfRows = 0;
|
||||
do {
|
||||
printf("*************************** %d.row ***************************\n", numOfRows + 1);
|
||||
for (int i = 0; i < num_fields; i++) {
|
||||
// 1. print column name
|
||||
int left_space = (int)(maxLenColumnName - strlen(fields[i].name));
|
||||
printf("%*.s%s: ", left_space, " ", fields[i].name);
|
||||
TAOS_FIELD* field = fields + i;
|
||||
|
||||
// 2. print column value
|
||||
if (row[i] == NULL) {
|
||||
printf("%s\n", TSDB_DATA_NULL_STR);
|
||||
continue;
|
||||
}
|
||||
int padding = (int)(maxColNameLen - strlen(field->name));
|
||||
printf("%*.s%s: ", padding, " ", field->name);
|
||||
|
||||
switch (fields[i].type) {
|
||||
case TSDB_DATA_TYPE_BOOL:
|
||||
printf("%s\n", ((((int)(*((char *)row[i]))) == 1) ? "true" : "false"));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_TINYINT:
|
||||
printf("%d\n", (int)(*((char *)row[i])));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_SMALLINT:
|
||||
printf("%d\n", (int)(*((short *)row[i])));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_INT:
|
||||
printf("%d\n", *((int *)row[i]));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_BIGINT:
|
||||
printf("%" PRId64 "\n", *((int64_t *)row[i]));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_FLOAT: {
|
||||
float fv = 0;
|
||||
fv = GET_FLOAT_VAL(row[i]);
|
||||
printf("%.5f\n", fv);
|
||||
}
|
||||
break;
|
||||
case TSDB_DATA_TYPE_DOUBLE: {
|
||||
double dv = 0;
|
||||
dv = GET_DOUBLE_VAL(row[i]);
|
||||
printf("%.9f\n", dv);
|
||||
}
|
||||
break;
|
||||
case TSDB_DATA_TYPE_BINARY:
|
||||
case TSDB_DATA_TYPE_NCHAR:
|
||||
memset(t_str, 0, TSDB_MAX_BYTES_PER_ROW);
|
||||
memcpy(t_str, row[i], length[i]);
|
||||
|
||||
l[i] = MAX(fields[i].bytes, strlen(fields[i].name));
|
||||
shellPrintNChar(t_str, l[i], printMode);
|
||||
break;
|
||||
case TSDB_DATA_TYPE_TIMESTAMP:
|
||||
if (args.is_raw_time) {
|
||||
printf("%" PRId64 "\n", *(int64_t *)row[i]);
|
||||
} else {
|
||||
if (taos_result_precision(result) == TSDB_TIME_PRECISION_MICRO) {
|
||||
tt = (time_t)((*(int64_t *)row[i]) / 1000000);
|
||||
} else {
|
||||
tt = (time_t)((*(int64_t *)row[i]) / 1000);
|
||||
}
|
||||
|
||||
ptm = localtime(&tt);
|
||||
strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm);
|
||||
|
||||
if (taos_result_precision(result) == TSDB_TIME_PRECISION_MICRO) {
|
||||
printf("%s.%06d\n", buf, (int)(*(int64_t *)row[i] % 1000000));
|
||||
} else {
|
||||
printf("%s.%03d\n", buf, (int)(*(int64_t *)row[i] % 1000));
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
printField(row[i], field, 0, length[i], precision);
|
||||
putchar('\n');
|
||||
}
|
||||
|
||||
numOfRows++;
|
||||
} while ((row = taos_fetch_row(result)));
|
||||
|
||||
} else { // dump to file
|
||||
// first write column
|
||||
for (int col = 0; col < num_fields; col++) {
|
||||
fprintf(fp, "%s", fields[col].name);
|
||||
if (col < num_fields - 1) {
|
||||
fprintf(fp, ",");
|
||||
} else {
|
||||
fprintf(fp, "\n");
|
||||
}
|
||||
}
|
||||
|
||||
do {
|
||||
for (int i = 0; i < num_fields; i++) {
|
||||
if (row[i]) {
|
||||
switch (fields[i].type) {
|
||||
case TSDB_DATA_TYPE_BOOL:
|
||||
fprintf(fp, "%d", ((((int)(*((char *)row[i]))) == 1) ? 1 : 0));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_TINYINT:
|
||||
fprintf(fp, "%d", (int)(*((char *)row[i])));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_SMALLINT:
|
||||
fprintf(fp, "%d", (int)(*((short *)row[i])));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_INT:
|
||||
fprintf(fp, "%d", *((int *)row[i]));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_BIGINT:
|
||||
fprintf(fp, "%" PRId64, *((int64_t *)row[i]));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_FLOAT: {
|
||||
float fv = 0;
|
||||
fv = GET_FLOAT_VAL(row[i]);
|
||||
fprintf(fp, "%.5f", fv);
|
||||
}
|
||||
break;
|
||||
case TSDB_DATA_TYPE_DOUBLE: {
|
||||
double dv = 0;
|
||||
dv = GET_DOUBLE_VAL(row[i]);
|
||||
fprintf(fp, "%.9f", dv);
|
||||
}
|
||||
break;
|
||||
case TSDB_DATA_TYPE_BINARY:
|
||||
case TSDB_DATA_TYPE_NCHAR:
|
||||
memset(t_str, 0, TSDB_MAX_BYTES_PER_ROW);
|
||||
memcpy(t_str, row[i], length[i]);
|
||||
fprintf(fp, "\'%s\'", t_str);
|
||||
break;
|
||||
case TSDB_DATA_TYPE_TIMESTAMP:
|
||||
if (args.is_raw_time) {
|
||||
fprintf(fp, "%" PRId64, *(int64_t *)row[i]);
|
||||
} else {
|
||||
if (taos_result_precision(result) == TSDB_TIME_PRECISION_MICRO) {
|
||||
tt = (time_t)((*(int64_t *)row[i]) / 1000000);
|
||||
} else {
|
||||
tt = (time_t)((*(int64_t *)row[i]) / 1000);
|
||||
}
|
||||
|
||||
ptm = localtime(&tt);
|
||||
strftime(buf, 64, "%Y-%m-%d %H:%M:%S", ptm);
|
||||
|
||||
if (taos_result_precision(result) == TSDB_TIME_PRECISION_MICRO) {
|
||||
fprintf(fp, "\'%s.%06d\'", buf, (int)(*(int64_t *)row[i] % 1000000));
|
||||
} else {
|
||||
fprintf(fp, "\'%s.%03d\'", buf, (int)(*(int64_t *)row[i] % 1000));
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
fprintf(fp, "%s", TSDB_DATA_NULL_STR);
|
||||
}
|
||||
if (i < num_fields - 1) {
|
||||
fprintf(fp, ",");
|
||||
} else {
|
||||
fprintf(fp, "\n");
|
||||
}
|
||||
}
|
||||
|
||||
numOfRows++;
|
||||
} while ((row = taos_fetch_row(result)));
|
||||
}
|
||||
}
|
||||
|
||||
*error_no = taos_errno(con);
|
||||
|
||||
taos_free_result(result);
|
||||
result = NULL;
|
||||
|
||||
if (fname != NULL) {
|
||||
fclose(fp);
|
||||
}
|
||||
row = taos_fetch_row(result);
|
||||
} while(row != NULL);
|
||||
|
||||
return numOfRows;
|
||||
}
|
||||
|
||||
|
||||
static int calcColWidth(TAOS_FIELD* field, int precision) {
|
||||
int width = strlen(field->name);
|
||||
|
||||
switch (field->type) {
|
||||
case TSDB_DATA_TYPE_BOOL:
|
||||
return MAX(5, width); // 'false'
|
||||
|
||||
case TSDB_DATA_TYPE_TINYINT:
|
||||
return MAX(4, width); // '-127'
|
||||
|
||||
case TSDB_DATA_TYPE_SMALLINT:
|
||||
return MAX(6, width); // '-32767'
|
||||
|
||||
case TSDB_DATA_TYPE_INT:
|
||||
return MAX(11, width); // '-2147483648'
|
||||
|
||||
case TSDB_DATA_TYPE_BIGINT:
|
||||
return MAX(21, width); // '-9223372036854775807'
|
||||
|
||||
case TSDB_DATA_TYPE_FLOAT:
|
||||
return MAX(20, width);
|
||||
|
||||
case TSDB_DATA_TYPE_DOUBLE:
|
||||
return MAX(25, width);
|
||||
|
||||
case TSDB_DATA_TYPE_BINARY:
|
||||
case TSDB_DATA_TYPE_NCHAR:
|
||||
if (field->bytes > tsMaxBinaryDisplayWidth) {
|
||||
return MAX(tsMaxBinaryDisplayWidth, width);
|
||||
} else {
|
||||
return MAX(field->bytes, width);
|
||||
}
|
||||
|
||||
case TSDB_DATA_TYPE_TIMESTAMP:
|
||||
if (args.is_raw_time) {
|
||||
return MAX(14, width);
|
||||
} else if (precision == TSDB_TIME_PRECISION_MICRO) {
|
||||
return MAX(26, width); // '2020-01-01 00:00:00.000000'
|
||||
} else {
|
||||
return MAX(23, width); // '2020-01-01 00:00:00.000'
|
||||
}
|
||||
|
||||
default:
|
||||
assert(false);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static void printHeader(TAOS_FIELD* fields, int* width, int num_fields) {
|
||||
int rowWidth = 0;
|
||||
for (int col = 0; col < num_fields; col++) {
|
||||
TAOS_FIELD* field = fields + col;
|
||||
int padding = (int)(width[col] - strlen(field->name));
|
||||
int left = padding / 2;
|
||||
printf(" %*.s%s%*.s |", left, " ", field->name, padding - left, " ");
|
||||
rowWidth += width[col] + 3;
|
||||
}
|
||||
|
||||
putchar('\n');
|
||||
for (int i = 0; i < rowWidth; i++) {
|
||||
putchar('=');
|
||||
}
|
||||
putchar('\n');
|
||||
}
|
||||
|
||||
|
||||
static int horizontalPrintResult(TAOS_RES* result) {
|
||||
TAOS_ROW row = taos_fetch_row(result);
|
||||
if (row == NULL) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int num_fields = taos_num_fields(result);
|
||||
TAOS_FIELD *fields = taos_fetch_fields(result);
|
||||
int32_t* length = taos_fetch_lengths(result);
|
||||
int precision = taos_result_precision(result);
|
||||
|
||||
int width[TSDB_MAX_COLUMNS];
|
||||
for (int col = 0; col < num_fields; col++) {
|
||||
width[col] = calcColWidth(fields + col, precision);
|
||||
}
|
||||
|
||||
printHeader(fields, width, num_fields);
|
||||
|
||||
int numOfRows = 0;
|
||||
do {
|
||||
for (int i = 0; i < num_fields; i++) {
|
||||
putchar(' ');
|
||||
printField(row[i], fields + i, width[i], length[i], precision);
|
||||
putchar(' ');
|
||||
putchar('|');
|
||||
}
|
||||
putchar('\n');
|
||||
numOfRows++;
|
||||
row = taos_fetch_row(result);
|
||||
} while(row != NULL);
|
||||
|
||||
return numOfRows;
|
||||
}
|
||||
|
||||
|
||||
int shellDumpResult(TAOS *con, char *fname, int *error_no, bool vertical) {
|
||||
int numOfRows = 0;
|
||||
|
||||
TAOS_RES* result = taos_use_result(con);
|
||||
if (result == NULL) {
|
||||
taos_error(con);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (fname != NULL) {
|
||||
numOfRows = dumpResultToFile(fname, result);
|
||||
} else if(vertical) {
|
||||
numOfRows = verticalPrintResult(result);
|
||||
} else {
|
||||
numOfRows = horizontalPrintResult(result);
|
||||
}
|
||||
|
||||
*error_no = taos_errno(con);
|
||||
taos_free_result(result);
|
||||
return numOfRows;
|
||||
}
|
||||
|
||||
|
||||
void read_history() {
|
||||
// Initialize history
|
||||
memset(history.hist, 0, sizeof(char *) * MAX_HISTORY_SIZE);
|
||||
|
|
|
@ -329,34 +329,27 @@ void *shellLoopQuery(void *arg) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
void shellPrintNChar(char *str, int width, bool printMode) {
|
||||
int col_left = width;
|
||||
void shellPrintNChar(const char *str, int length, int width) {
|
||||
int pos = 0, cols = 0;
|
||||
while (pos < length) {
|
||||
wchar_t wc;
|
||||
while (col_left > 0) {
|
||||
if (*str == '\0') break;
|
||||
char *tstr = str;
|
||||
int byte_width = mbtowc(&wc, tstr, MB_CUR_MAX);
|
||||
if (byte_width <= 0) break;
|
||||
int col_width = wcwidth(wc);
|
||||
if (col_width <= 0) {
|
||||
str += byte_width;
|
||||
continue;
|
||||
pos += mbtowc(&wc, str + pos, MB_CUR_MAX);
|
||||
if (pos > length) {
|
||||
break;
|
||||
}
|
||||
|
||||
int w = wcwidth(wc);
|
||||
if (w > 0) {
|
||||
if (width > 0 && cols + w > width) {
|
||||
break;
|
||||
}
|
||||
if (col_left < col_width) break;
|
||||
printf("%lc", wc);
|
||||
str += byte_width;
|
||||
col_left -= col_width;
|
||||
cols += w;
|
||||
}
|
||||
}
|
||||
|
||||
while (col_left > 0) {
|
||||
printf(" ");
|
||||
col_left--;
|
||||
}
|
||||
|
||||
if (!printMode) {
|
||||
printf("|");
|
||||
} else {
|
||||
printf("\n");
|
||||
for (; cols < width; cols++) {
|
||||
putchar(' ');
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
|
||||
TAOS* con;
|
||||
pthread_t pid;
|
||||
int32_t TIMESTAMP_OUTPUT_LENGTH = 22;
|
||||
|
||||
// TODO: IMPLEMENT INTERRUPT HANDLER.
|
||||
void interruptHandler(int signum) {
|
||||
|
|
|
@ -217,31 +217,31 @@ void *shellLoopQuery(void *arg) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
void shellPrintNChar(char *str, int width, bool printMode) {
|
||||
int col_left = width;
|
||||
void shellPrintNChar(const char *str, int length, int width) {
|
||||
int pos = 0, cols = 0;
|
||||
while (pos < length) {
|
||||
wchar_t wc;
|
||||
while (col_left > 0) {
|
||||
if (*str == '\0') break;
|
||||
char *tstr = str;
|
||||
int byte_width = mbtowc(&wc, tstr, MB_CUR_MAX);
|
||||
int col_width = byte_width;
|
||||
if (col_left < col_width) break;
|
||||
int bytes = mbtowc(&wc, str + pos, MB_CUR_MAX);
|
||||
pos += bytes;
|
||||
if (pos > length) {
|
||||
break;
|
||||
}
|
||||
|
||||
int w = bytes;
|
||||
if (w > 0) {
|
||||
if (width > 0 && cols + w > width) {
|
||||
break;
|
||||
}
|
||||
printf("%lc", wc);
|
||||
str += byte_width;
|
||||
col_left -= col_width;
|
||||
cols += w;
|
||||
}
|
||||
}
|
||||
|
||||
while (col_left > 0) {
|
||||
printf(" ");
|
||||
col_left--;
|
||||
for (; cols < width; cols++) {
|
||||
putchar(' ');
|
||||
}
|
||||
}
|
||||
|
||||
if (!printMode) {
|
||||
printf("|");
|
||||
} else {
|
||||
printf("\n");
|
||||
}
|
||||
}
|
||||
|
||||
void get_history_path(char *history) { sprintf(history, "%s/%s", ".", HISTORY_FILE); }
|
||||
|
||||
|
|
|
@ -138,7 +138,7 @@ typedef struct SQuery {
|
|||
SColumnInfo* colList;
|
||||
SColumnInfo* tagColList;
|
||||
int32_t numOfFilterCols;
|
||||
int64_t* defaultVal;
|
||||
int64_t* fillVal;
|
||||
uint32_t status; // query status
|
||||
SResultRec rec;
|
||||
int32_t pos;
|
||||
|
|
|
@ -28,7 +28,7 @@ typedef struct {
|
|||
STColumn col; // column info
|
||||
int16_t functionId; // sql function id
|
||||
int16_t flag; // column flag: TAG COLUMN|NORMAL COLUMN
|
||||
union {int64_t i; double d;} defaultVal;
|
||||
union {int64_t i; double d;} fillVal;
|
||||
} SFillColInfo;
|
||||
|
||||
typedef struct SFillInfo {
|
||||
|
@ -75,15 +75,13 @@ void taosFillCopyInputDataFromOneFilePage(SFillInfo* pFillInfo, tFilePage* pInpu
|
|||
|
||||
TSKEY taosGetRevisedEndKey(TSKEY ekey, int32_t order, int64_t timeInterval, int8_t slidingTimeUnit, int8_t precision);
|
||||
|
||||
int32_t taosGetNumOfResultWithFill(SFillInfo* pFillInfo, int32_t numOfRows, int64_t ekey, int32_t maxNumOfRows);
|
||||
int64_t taosGetNumOfResultWithFill(SFillInfo* pFillInfo, int32_t numOfRows, int64_t ekey, int32_t maxNumOfRows);
|
||||
|
||||
int32_t taosNumOfRemainRows(SFillInfo *pFillInfo);
|
||||
|
||||
int32_t taosDoInterpoResult(SFillInfo* pFillInfo, tFilePage** data, int32_t numOfRows, int32_t outputRows, char** srcData);
|
||||
|
||||
int taosDoLinearInterpolation(int32_t type, SPoint *point1, SPoint *point2, SPoint *point);
|
||||
|
||||
void taosGenerateDataBlock(SFillInfo* pFillInfo, tFilePage** output, int64_t* outputRows, int32_t capacity);
|
||||
int64_t taosGenerateDataBlock(SFillInfo* pFillInfo, tFilePage** output, int32_t capacity);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -126,7 +126,6 @@ typedef struct SArithmeticSupport {
|
|||
|
||||
typedef struct SQLPreAggVal {
|
||||
bool isSet;
|
||||
int32_t size;
|
||||
SDataStatis statis;
|
||||
} SQLPreAggVal;
|
||||
|
||||
|
@ -174,7 +173,6 @@ typedef struct SQLFunctionCtx {
|
|||
int16_t outputBytes; // size of results, determined by function and input column data type
|
||||
bool hasNull; // null value exist in current block
|
||||
int16_t functionId; // function id
|
||||
int32_t blockStatus; // Indicate if data is loaded, it is first/last/internal block. Only for file blocks
|
||||
void * aInputElemBuf;
|
||||
char * aOutputBuf; // final result output buffer, point to sdata->data
|
||||
uint8_t currentStage; // record current running step, default: 0
|
||||
|
|
|
@ -110,7 +110,7 @@ static void resetMergeResultBuf(SQuery *pQuery, SQLFunctionCtx *pCtx, SResultInf
|
|||
static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx, int32_t functionId);
|
||||
static void getNextTimeWindow(SQuery *pQuery, STimeWindow *pTimeWindow);
|
||||
|
||||
static void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, void *inputData, TSKEY *tsCol, int32_t size,
|
||||
static void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, void* pData, TSKEY *tsCol, SDataBlockInfo* pBlockInfo,
|
||||
int32_t functionId, SDataStatis *pStatis, bool hasNull, void *param, int32_t scanFlag);
|
||||
static void initCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv);
|
||||
static void destroyTableQueryInfo(STableQueryInfo *pTableQueryInfo, int32_t numOfCols);
|
||||
|
@ -200,7 +200,7 @@ bool getNeighborPoints(SQInfo *pQInfo, void *pMeterObj, SPointInterpoSupporter *
|
|||
return false;
|
||||
} else { // prev has been located
|
||||
if (pQuery->fileId >= 0) {
|
||||
pQuery->pos = pQuery->pBlock[pQuery->slot].numOfPoints - 1;
|
||||
pQuery->pos = pQuery->pBlock[pQuery->slot].numOfRows - 1;
|
||||
getOneRowFromDataBlock(pRuntimeEnv, pPointInterpSupporter->pPrevPoint, pQuery->pos);
|
||||
|
||||
qTrace("QInfo:%p get prev data point, fileId:%d, slot:%d, pos:%d, pQuery->pos:%d", GET_QINFO_ADDR(pQuery),
|
||||
|
@ -210,11 +210,11 @@ bool getNeighborPoints(SQInfo *pQInfo, void *pMeterObj, SPointInterpoSupporter *
|
|||
assert(vnodeIsDatablockLoaded(pRuntimeEnv, pMeterObj, -1, true) == DISK_BLOCK_NO_NEED_TO_LOAD);
|
||||
pBlock = &pRuntimeEnv->cacheBlock;
|
||||
|
||||
pQuery->pos = pBlock->numOfPoints - 1;
|
||||
pQuery->pos = pBlock->numOfRows - 1;
|
||||
getOneRowFromDataBlock(pRuntimeEnv, pPointInterpSupporter->pPrevPoint, pQuery->pos);
|
||||
|
||||
qTrace("QInfo:%p get prev data point, fileId:%d, slot:%d, pos:%d, pQuery->pos:%d", GET_QINFO_ADDR(pQuery),
|
||||
pQuery->fileId, pQuery->slot, pBlock->numOfPoints - 1, pQuery->pos);
|
||||
pQuery->fileId, pQuery->slot, pBlock->numOfRows - 1, pQuery->pos);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -376,11 +376,16 @@ bool isSelectivityWithTagsQuery(SQuery *pQuery) {
|
|||
|
||||
bool isTSCompQuery(SQuery *pQuery) { return pQuery->pSelectExpr[0].base.functionId == TSDB_FUNC_TS_COMP; }
|
||||
|
||||
static bool limitResults(SQuery *pQuery) {
|
||||
static bool limitResults(SQueryRuntimeEnv* pRuntimeEnv) {
|
||||
SQInfo* pQInfo = GET_QINFO_ADDR(pRuntimeEnv);
|
||||
SQuery* pQuery = pRuntimeEnv->pQuery;
|
||||
|
||||
if ((pQuery->limit.limit > 0) && (pQuery->rec.total + pQuery->rec.rows > pQuery->limit.limit)) {
|
||||
pQuery->rec.rows = pQuery->limit.limit - pQuery->rec.total;
|
||||
assert(pQuery->rec.rows > 0);
|
||||
|
||||
qTrace("QInfo:%p discard remain data due to result limitation, limit:%"PRId64", current return:%d, total:%"PRId64,
|
||||
pQInfo, pQuery->limit.limit, pQuery->rec.rows, pQuery->rec.total + pQuery->rec.rows);
|
||||
assert(pQuery->rec.rows >= 0);
|
||||
setQueryStatus(pQuery, QUERY_COMPLETED);
|
||||
return true;
|
||||
}
|
||||
|
@ -403,19 +408,20 @@ static bool isTopBottomQuery(SQuery *pQuery) {
|
|||
return false;
|
||||
}
|
||||
|
||||
static SDataStatis *getStatisInfo(SQuery *pQuery, SDataStatis *pStatis, SDataBlockInfo *pDataBlockInfo, int32_t index) {
|
||||
static SDataStatis *getStatisInfo(SQuery *pQuery, SDataStatis *pStatis, int32_t numOfCols, int32_t index) {
|
||||
// for a tag column, no corresponding field info
|
||||
SColIndex *pColIndexEx = &pQuery->pSelectExpr[index].base.colInfo;
|
||||
if (TSDB_COL_IS_TAG(pColIndexEx->flag)) {
|
||||
SColIndex *pColIndex = &pQuery->pSelectExpr[index].base.colInfo;
|
||||
if (TSDB_COL_IS_TAG(pColIndex->flag)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Choose the right column field info by field id, since the file block may be out of date,
|
||||
* which means the newest table schema is not equalled to the schema of this block.
|
||||
* TODO: speedup by using bsearch
|
||||
*/
|
||||
for (int32_t i = 0; i < pDataBlockInfo->numOfCols; ++i) {
|
||||
if (pColIndexEx->colId == pStatis[i].colId) {
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
if (pColIndex->colId == pStatis[i].colId) {
|
||||
return &pStatis[i];
|
||||
}
|
||||
}
|
||||
|
@ -431,8 +437,7 @@ static SDataStatis *getStatisInfo(SQuery *pQuery, SDataStatis *pStatis, SDataBlo
|
|||
* @param pColStatis
|
||||
* @return
|
||||
*/
|
||||
static bool hasNullValue(SQuery *pQuery, int32_t col, SDataBlockInfo *pDataBlockInfo, SDataStatis *pStatis,
|
||||
SDataStatis **pColStatis) {
|
||||
static bool hasNullValue(SQuery *pQuery, int32_t col, int32_t numOfCols, SDataStatis *pStatis, SDataStatis **pColStatis) {
|
||||
SColIndex *pColIndex = &pQuery->pSelectExpr[col].base.colInfo;
|
||||
if (TSDB_COL_IS_TAG(pColIndex->flag)) {
|
||||
return false;
|
||||
|
@ -444,7 +449,7 @@ static bool hasNullValue(SQuery *pQuery, int32_t col, SDataBlockInfo *pDataBlock
|
|||
}
|
||||
|
||||
if (pStatis != NULL) {
|
||||
*pColStatis = getStatisInfo(pQuery, pStatis, pDataBlockInfo, col);
|
||||
*pColStatis = getStatisInfo(pQuery, pStatis, numOfCols, col);
|
||||
} else {
|
||||
*pColStatis = NULL;
|
||||
}
|
||||
|
@ -603,9 +608,9 @@ static SWindowStatus *getTimeWindowResStatus(SWindowResInfo *pWindowResInfo, int
|
|||
return &pWindowResInfo->pResult[slot].status;
|
||||
}
|
||||
|
||||
static int32_t getForwardStepsInBlock(int32_t numOfPoints, __block_search_fn_t searchFn, TSKEY ekey, int16_t pos,
|
||||
static int32_t getForwardStepsInBlock(int32_t numOfRows, __block_search_fn_t searchFn, TSKEY ekey, int16_t pos,
|
||||
int16_t order, int64_t *pData) {
|
||||
int32_t endPos = searchFn((char *)pData, numOfPoints, ekey, order);
|
||||
int32_t endPos = searchFn((char *)pData, numOfRows, ekey, order);
|
||||
int32_t forwardStep = 0;
|
||||
|
||||
if (endPos >= 0) {
|
||||
|
@ -624,15 +629,17 @@ static int32_t getForwardStepsInBlock(int32_t numOfPoints, __block_search_fn_t s
|
|||
/**
|
||||
* NOTE: the query status only set for the first scan of master scan.
|
||||
*/
|
||||
static void doCheckQueryCompleted(SQueryRuntimeEnv *pRuntimeEnv, TSKEY lastKey, SWindowResInfo *pWindowResInfo) {
|
||||
static int32_t doCheckQueryCompleted(SQueryRuntimeEnv *pRuntimeEnv, TSKEY lastKey, SWindowResInfo *pWindowResInfo) {
|
||||
SQuery *pQuery = pRuntimeEnv->pQuery;
|
||||
if (pRuntimeEnv->scanFlag != MASTER_SCAN || (!isIntervalQuery(pQuery))) {
|
||||
return;
|
||||
return pWindowResInfo->size;
|
||||
}
|
||||
|
||||
// no qualified results exist, abort check
|
||||
int32_t numOfClosed = 0;
|
||||
|
||||
if (pWindowResInfo->size == 0) {
|
||||
return;
|
||||
return pWindowResInfo->size;
|
||||
}
|
||||
|
||||
// query completed
|
||||
|
@ -646,10 +653,10 @@ static void doCheckQueryCompleted(SQueryRuntimeEnv *pRuntimeEnv, TSKEY lastKey,
|
|||
int32_t i = 0;
|
||||
int64_t skey = TSKEY_INITIAL_VAL;
|
||||
|
||||
// TODO opt performance: get the closed time window here
|
||||
for (i = 0; i < pWindowResInfo->size; ++i) {
|
||||
SWindowResult *pResult = &pWindowResInfo->pResult[i];
|
||||
if (pResult->status.closed) {
|
||||
numOfClosed += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -672,16 +679,26 @@ static void doCheckQueryCompleted(SQueryRuntimeEnv *pRuntimeEnv, TSKEY lastKey,
|
|||
|
||||
pWindowResInfo->prevSKey = pWindowResInfo->pResult[pWindowResInfo->curIndex].window.skey;
|
||||
|
||||
// the number of completed slots are larger than the threshold, dump to client immediately.
|
||||
int32_t n = numOfClosedTimeWindow(pWindowResInfo);
|
||||
if (n > pWindowResInfo->threshold) {
|
||||
// the number of completed slots are larger than the threshold, return current generated results to client.
|
||||
if (numOfClosed > pWindowResInfo->threshold) {
|
||||
qTrace("QInfo:%p total result window:%d closed:%d, reached the output threshold %d, return",
|
||||
GET_QINFO_ADDR(pRuntimeEnv), pWindowResInfo->size, numOfClosed, pQuery->rec.threshold);
|
||||
|
||||
setQueryStatus(pQuery, QUERY_RESBUF_FULL);
|
||||
} else {
|
||||
qTrace("QInfo:%p total result window:%d already closed:%d", GET_QINFO_ADDR(pRuntimeEnv), pWindowResInfo->size,
|
||||
numOfClosed);
|
||||
}
|
||||
}
|
||||
|
||||
qTrace("QInfo:%p total window:%d, closed:%d", GET_QINFO_ADDR(pRuntimeEnv), pWindowResInfo->size, n);
|
||||
// output has reached the limitation, set query completed
|
||||
if (pQuery->limit.limit > 0 && (pQuery->limit.limit + pQuery->limit.offset) <= numOfClosed &&
|
||||
pRuntimeEnv->scanFlag == MASTER_SCAN) {
|
||||
setQueryStatus(pQuery, QUERY_COMPLETED);
|
||||
}
|
||||
|
||||
assert(pWindowResInfo->prevSKey != TSKEY_INITIAL_VAL);
|
||||
return numOfClosed;
|
||||
}
|
||||
|
||||
static int32_t getNumOfRowsInTimeWindow(SQuery *pQuery, SDataBlockInfo *pDataBlockInfo, TSKEY *pPrimaryColumn,
|
||||
|
@ -733,7 +750,7 @@ static int32_t getNumOfRowsInTimeWindow(SQuery *pQuery, SDataBlockInfo *pDataBlo
|
|||
}
|
||||
|
||||
static void doBlockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SWindowStatus *pStatus, STimeWindow *pWin,
|
||||
int32_t startPos, int32_t forwardStep, TSKEY *tsBuf) {
|
||||
int32_t offset, int32_t forwardStep, TSKEY *tsBuf, int32_t numOfTotal) {
|
||||
SQuery * pQuery = pRuntimeEnv->pQuery;
|
||||
SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx;
|
||||
|
||||
|
@ -743,10 +760,15 @@ static void doBlockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SWindowStat
|
|||
|
||||
pCtx[k].nStartQueryTimestamp = pWin->skey;
|
||||
pCtx[k].size = forwardStep;
|
||||
pCtx[k].startOffset = (QUERY_IS_ASC_QUERY(pQuery)) ? startPos : startPos - (forwardStep - 1);
|
||||
pCtx[k].startOffset = (QUERY_IS_ASC_QUERY(pQuery)) ? offset : offset - (forwardStep - 1);
|
||||
|
||||
if ((aAggs[functionId].nStatus & TSDB_FUNCSTATE_SELECTIVITY) != 0) {
|
||||
pCtx[k].ptsList = &tsBuf[pCtx[k].startOffset];
|
||||
pCtx[k].ptsList = tsBuf;
|
||||
}
|
||||
|
||||
// not a whole block involved in query processing, statistics data can not be used
|
||||
if (forwardStep != numOfTotal) {
|
||||
pCtx[k].preAggVals.isSet = false;
|
||||
}
|
||||
|
||||
if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) {
|
||||
|
@ -890,7 +912,7 @@ static char *getDataBlock(SQueryRuntimeEnv *pRuntimeEnv, SArithmeticSupport *sas
|
|||
}
|
||||
|
||||
assert(dataBlock != NULL);
|
||||
sas->data[i] = dataBlock + pCtx->startOffset * pQuery->colList[i].bytes; // start from the offset
|
||||
sas->data[i] = dataBlock/* + pQuery->colList[i].bytes*/; // start from the offset
|
||||
}
|
||||
|
||||
} else { // other type of query function
|
||||
|
@ -933,14 +955,15 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *
|
|||
|
||||
for (int32_t k = 0; k < pQuery->numOfOutput; ++k) {
|
||||
int32_t functionId = pQuery->pSelectExpr[k].base.functionId;
|
||||
int32_t colId = pQuery->pSelectExpr[k].base.colInfo.colId;
|
||||
|
||||
SDataStatis *tpField = NULL;
|
||||
|
||||
bool hasNull = hasNullValue(pQuery, k, pDataBlockInfo, pStatis, &tpField);
|
||||
bool hasNull = hasNullValue(pQuery, k, pDataBlockInfo->numOfCols, pStatis, &tpField);
|
||||
char *dataBlock = getDataBlock(pRuntimeEnv, &sasArray[k], k, pDataBlockInfo->rows, pDataBlock);
|
||||
|
||||
setExecParams(pQuery, &pCtx[k], dataBlock, primaryKeyCol, pDataBlockInfo->rows, functionId, tpField, hasNull,
|
||||
&sasArray[k], pRuntimeEnv->scanFlag);
|
||||
setExecParams(pQuery, &pCtx[k], dataBlock, primaryKeyCol, pDataBlockInfo, functionId, tpField, hasNull,
|
||||
&sasArray[k], colId);
|
||||
}
|
||||
|
||||
int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order);
|
||||
|
@ -958,7 +981,7 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *
|
|||
getNumOfRowsInTimeWindow(pQuery, pDataBlockInfo, primaryKeyCol, pQuery->pos, ekey, searchFn, true);
|
||||
|
||||
SWindowStatus *pStatus = getTimeWindowResStatus(pWindowResInfo, curTimeWindow(pWindowResInfo));
|
||||
doBlockwiseApplyFunctions(pRuntimeEnv, pStatus, &win, pQuery->pos, forwardStep, primaryKeyCol);
|
||||
doBlockwiseApplyFunctions(pRuntimeEnv, pStatus, &win, pQuery->pos, forwardStep, primaryKeyCol, pDataBlockInfo->rows);
|
||||
|
||||
int32_t index = pWindowResInfo->curIndex;
|
||||
STimeWindow nextWin = win;
|
||||
|
@ -978,7 +1001,7 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *
|
|||
forwardStep = getNumOfRowsInTimeWindow(pQuery, pDataBlockInfo, primaryKeyCol, startPos, ekey, searchFn, true);
|
||||
|
||||
pStatus = getTimeWindowResStatus(pWindowResInfo, curTimeWindow(pWindowResInfo));
|
||||
doBlockwiseApplyFunctions(pRuntimeEnv, pStatus, &nextWin, startPos, forwardStep, primaryKeyCol);
|
||||
doBlockwiseApplyFunctions(pRuntimeEnv, pStatus, &nextWin, startPos, forwardStep, primaryKeyCol, pDataBlockInfo->rows);
|
||||
}
|
||||
|
||||
pWindowResInfo->curIndex = index;
|
||||
|
@ -1154,14 +1177,15 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS
|
|||
|
||||
for (int32_t k = 0; k < pQuery->numOfOutput; ++k) {
|
||||
int32_t functionId = pQuery->pSelectExpr[k].base.functionId;
|
||||
int32_t colId = pQuery->pSelectExpr[k].base.colInfo.colId;
|
||||
|
||||
SDataStatis *pColStatis = NULL;
|
||||
|
||||
bool hasNull = hasNullValue(pQuery, k, pDataBlockInfo, pStatis, &pColStatis);
|
||||
bool hasNull = hasNullValue(pQuery, k, pDataBlockInfo->numOfCols, pStatis, &pColStatis);
|
||||
char *dataBlock = getDataBlock(pRuntimeEnv, &sasArray[k], k, pDataBlockInfo->rows, pDataBlock);
|
||||
|
||||
setExecParams(pQuery, &pCtx[k], dataBlock, primaryKeyCol, pDataBlockInfo->rows, functionId, pColStatis, hasNull,
|
||||
&sasArray[k], pRuntimeEnv->scanFlag);
|
||||
setExecParams(pQuery, &pCtx[k], dataBlock, primaryKeyCol, pDataBlockInfo, functionId, pColStatis, hasNull,
|
||||
&sasArray[k], colId);
|
||||
}
|
||||
|
||||
// set the input column data
|
||||
|
@ -1214,7 +1238,7 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS
|
|||
}
|
||||
|
||||
// all startOffset are identical
|
||||
offset -= pCtx[0].startOffset;
|
||||
// offset -= pCtx[0].startOffset;
|
||||
|
||||
SWindowStatus *pStatus = getTimeWindowResStatus(pWindowResInfo, curTimeWindow(pWindowResInfo));
|
||||
doRowwiseApplyFunctions(pRuntimeEnv, pStatus, &win, offset);
|
||||
|
@ -1255,9 +1279,6 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS
|
|||
}
|
||||
}
|
||||
|
||||
// all startOffset are identical
|
||||
offset -= pCtx[0].startOffset;
|
||||
|
||||
for (int32_t k = 0; k < pQuery->numOfOutput; ++k) {
|
||||
int32_t functionId = pQuery->pSelectExpr[k].base.functionId;
|
||||
if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) {
|
||||
|
@ -1305,16 +1326,14 @@ static int32_t tableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBl
|
|||
TSKEY lastKey = QUERY_IS_ASC_QUERY(pQuery) ? pDataBlockInfo->window.ekey : pDataBlockInfo->window.skey;
|
||||
pTableQInfo->lastKey = lastKey + GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order);
|
||||
|
||||
doCheckQueryCompleted(pRuntimeEnv, lastKey, pWindowResInfo);
|
||||
|
||||
// interval query with limit applied
|
||||
if (isIntervalQuery(pQuery) && pQuery->limit.limit > 0 &&
|
||||
(pQuery->limit.limit + pQuery->limit.offset) <= numOfClosedTimeWindow(pWindowResInfo) &&
|
||||
pRuntimeEnv->scanFlag == MASTER_SCAN) {
|
||||
setQueryStatus(pQuery, QUERY_COMPLETED);
|
||||
}
|
||||
int32_t numOfRes = 0;
|
||||
|
||||
int32_t numOfRes = getNumOfResult(pRuntimeEnv);
|
||||
if (isIntervalQuery(pQuery)) {
|
||||
numOfRes = doCheckQueryCompleted(pRuntimeEnv, lastKey, pWindowResInfo);
|
||||
} else {
|
||||
numOfRes = getNumOfResult(pRuntimeEnv);
|
||||
|
||||
// update the number of output result
|
||||
if (numOfRes > 0 && pQuery->checkBuffer == 1) {
|
||||
|
@ -1325,35 +1344,37 @@ static int32_t tableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBl
|
|||
setQueryStatus(pQuery, QUERY_RESBUF_FULL);
|
||||
}
|
||||
|
||||
if ((pQuery->limit.limit >= 0) && numOfRes >= (pQuery->limit.limit + pQuery->limit.offset)) {
|
||||
if ((pQuery->limit.limit >= 0) && (pQuery->limit.limit + pQuery->limit.offset) <= numOfRes) {
|
||||
setQueryStatus(pQuery, QUERY_COMPLETED);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return numOfRes;
|
||||
}
|
||||
|
||||
void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, void *inputData, TSKEY *tsCol, int32_t size,
|
||||
int32_t functionId, SDataStatis *pStatis, bool hasNull, void *param, int32_t scanFlag) {
|
||||
pCtx->scanFlag = scanFlag;
|
||||
|
||||
pCtx->aInputElemBuf = inputData;
|
||||
void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, void* inputData, TSKEY *tsCol, SDataBlockInfo* pBlockInfo,
|
||||
int32_t functionId, SDataStatis *pStatis, bool hasNull, void *param, int32_t colId) {
|
||||
pCtx->hasNull = hasNull;
|
||||
pCtx->aInputElemBuf = inputData;
|
||||
|
||||
if (pStatis != NULL) {
|
||||
pCtx->preAggVals.isSet = true;
|
||||
pCtx->preAggVals.size = size;
|
||||
pCtx->preAggVals.statis = *pStatis;
|
||||
if (pCtx->preAggVals.statis.numOfNull == -1) {
|
||||
pCtx->preAggVals.statis.numOfNull = pBlockInfo->rows; // todo :can not be -1
|
||||
}
|
||||
} else {
|
||||
pCtx->preAggVals.isSet = false;
|
||||
}
|
||||
|
||||
// limit/offset query will affect this value
|
||||
pCtx->startOffset = QUERY_IS_ASC_QUERY(pQuery) ? pQuery->pos:0;
|
||||
pCtx->size = QUERY_IS_ASC_QUERY(pQuery) ? size - pQuery->pos : pQuery->pos + 1;
|
||||
pCtx->size = QUERY_IS_ASC_QUERY(pQuery) ? pBlockInfo->rows - pQuery->pos : pQuery->pos + 1;
|
||||
|
||||
uint32_t status = aAggs[functionId].nStatus;
|
||||
if (((status & (TSDB_FUNCSTATE_SELECTIVITY | TSDB_FUNCSTATE_NEED_TS)) != 0) && (tsCol != NULL)) {
|
||||
pCtx->ptsList = &tsCol[pCtx->startOffset];
|
||||
pCtx->ptsList = tsCol;
|
||||
}
|
||||
|
||||
if (functionId >= TSDB_FUNC_FIRST_DST && functionId <= TSDB_FUNC_LAST_DST) {
|
||||
|
@ -1377,6 +1398,12 @@ void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, void *inputData, TSKEY
|
|||
|
||||
} else if (functionId == TSDB_FUNC_ARITHM) {
|
||||
pCtx->param[1].pz = param;
|
||||
} else if (functionId == TSDB_FUNC_SPREAD) { // set the statistics data for primary time stamp column
|
||||
if (colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
|
||||
pCtx->preAggVals.isSet = true;
|
||||
pCtx->preAggVals.statis.min = pBlockInfo->window.skey;
|
||||
pCtx->preAggVals.statis.max = pBlockInfo->window.ekey;
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(_DEBUG_VIEW)
|
||||
|
@ -2015,10 +2042,10 @@ void pointInterpSupporterSetData(SQInfo *pQInfo, SPointInterpoSupporter *pPointI
|
|||
|
||||
tVariantCreateFromBinary(&pCtx->param[3], (char *)&count, sizeof(count), TSDB_DATA_TYPE_INT);
|
||||
|
||||
if (isNull((char *)&pQuery->defaultVal[i], pCtx->inputType)) {
|
||||
if (isNull((char *)&pQuery->fillVal[i], pCtx->inputType)) {
|
||||
pCtx->param[1].nType = TSDB_DATA_TYPE_NULL;
|
||||
} else {
|
||||
tVariantCreateFromBinary(&pCtx->param[1], (char *)&pQuery->defaultVal[i], pCtx->inputBytes, pCtx->inputType);
|
||||
tVariantCreateFromBinary(&pCtx->param[1], (char *)&pQuery->fillVal[i], pCtx->inputBytes, pCtx->inputType);
|
||||
}
|
||||
|
||||
pInterpDetail->ts = pQuery->window.skey;
|
||||
|
@ -2318,7 +2345,7 @@ SArray *loadDataBlockOnDemand(SQueryRuntimeEnv *pRuntimeEnv, void* pQueryHandle,
|
|||
|
||||
int32_t binarySearchForKey(char *pValue, int num, TSKEY key, int order) {
|
||||
int32_t midPos = -1;
|
||||
int32_t numOfPoints;
|
||||
int32_t numOfRows;
|
||||
|
||||
if (num <= 0) {
|
||||
return -1;
|
||||
|
@ -2337,8 +2364,8 @@ int32_t binarySearchForKey(char *pValue, int num, TSKEY key, int order) {
|
|||
if (key == keyList[firstPos]) return firstPos;
|
||||
if (key < keyList[firstPos]) return firstPos - 1;
|
||||
|
||||
numOfPoints = lastPos - firstPos + 1;
|
||||
midPos = (numOfPoints >> 1) + firstPos;
|
||||
numOfRows = lastPos - firstPos + 1;
|
||||
midPos = (numOfRows >> 1) + firstPos;
|
||||
|
||||
if (key < keyList[midPos]) {
|
||||
lastPos = midPos - 1;
|
||||
|
@ -2363,8 +2390,8 @@ int32_t binarySearchForKey(char *pValue, int num, TSKEY key, int order) {
|
|||
return lastPos;
|
||||
}
|
||||
|
||||
numOfPoints = lastPos - firstPos + 1;
|
||||
midPos = (numOfPoints >> 1) + firstPos;
|
||||
numOfRows = lastPos - firstPos + 1;
|
||||
midPos = (numOfRows >> 1) + firstPos;
|
||||
|
||||
if (key < keyList[midPos]) {
|
||||
lastPos = midPos - 1;
|
||||
|
@ -2455,13 +2482,13 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) {
|
|||
}
|
||||
|
||||
SDataStatis *pStatis = NULL;
|
||||
SArray * pDataBlock = loadDataBlockOnDemand(pRuntimeEnv, pQueryHandle, &blockInfo, &pStatis);
|
||||
|
||||
pQuery->pos = QUERY_IS_ASC_QUERY(pQuery) ? 0 : blockInfo.rows - 1;
|
||||
|
||||
SArray *pDataBlock = loadDataBlockOnDemand(pRuntimeEnv, pQueryHandle, &blockInfo, &pStatis);
|
||||
int32_t numOfRes = tableApplyFunctionsOnBlock(pRuntimeEnv, &blockInfo, pStatis, binarySearchForKey, pDataBlock);
|
||||
|
||||
qTrace("QInfo:%p check data block, brange:%" PRId64 "-%" PRId64 ", rows:%d, numOfRes:%d", GET_QINFO_ADDR(pRuntimeEnv),
|
||||
blockInfo.window.skey, blockInfo.window.ekey, blockInfo.rows, numOfRes);
|
||||
qTrace("QInfo:%p check data block, brange:%" PRId64 "-%" PRId64 ", numOfRows:%d, numOfRes:%d, lastKey:%"PRId64, GET_QINFO_ADDR(pRuntimeEnv),
|
||||
blockInfo.window.skey, blockInfo.window.ekey, blockInfo.rows, numOfRes, pQuery->current->lastKey);
|
||||
|
||||
// while the output buffer is full or limit/offset is applied, query may be paused here
|
||||
if (Q_STATUS_EQUAL(pQuery->status, QUERY_RESBUF_FULL | QUERY_COMPLETED)) {
|
||||
|
@ -3363,7 +3390,9 @@ static void clearEnvAfterReverseScan(SQueryRuntimeEnv *pRuntimeEnv, SQueryStatus
|
|||
// during reverse scan
|
||||
pTableQueryInfo->lastKey = pStatus->lastKey;
|
||||
pQuery->status = pStatus->status;
|
||||
|
||||
pTableQueryInfo->win = pStatus->w;
|
||||
pQuery->window = pTableQueryInfo->win;
|
||||
}
|
||||
|
||||
void scanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv, TSKEY start) {
|
||||
|
@ -3385,6 +3414,7 @@ void scanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv, TSKEY start) {
|
|||
if (pRuntimeEnv->scanFlag == MASTER_SCAN) {
|
||||
qstatus.status = pQuery->status;
|
||||
qstatus.curWindow.ekey = pTableQueryInfo->lastKey - step;
|
||||
qstatus.lastKey = pTableQueryInfo->lastKey;
|
||||
}
|
||||
|
||||
if (!needScanDataBlocksAgain(pRuntimeEnv)) {
|
||||
|
@ -3413,6 +3443,9 @@ void scanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv, TSKEY start) {
|
|||
setQueryStatus(pQuery, QUERY_NOT_COMPLETED);
|
||||
pRuntimeEnv->scanFlag = REPEAT_SCAN;
|
||||
|
||||
qTrace("QInfo:%p start to repeat scan data blocks due to query func required, qrange:%"PRId64"-%"PRId64, pQInfo,
|
||||
cond.twindow.skey, cond.twindow.ekey);
|
||||
|
||||
// check if query is killed or not
|
||||
if (isQueryKilled(pQInfo)) {
|
||||
return;
|
||||
|
@ -3633,12 +3666,14 @@ void setIntervalQueryRange(SQInfo *pQInfo, TSKEY key) {
|
|||
STimeWindow w = {0};
|
||||
SWindowResInfo *pWindowResInfo = &pTableQueryInfo->windowResInfo;
|
||||
|
||||
getAlignQueryTimeWindow(pQuery, win.skey, win.skey, win.ekey, &skey1, &ekey1, &w);
|
||||
TSKEY sk = MIN(win.skey, win.ekey);
|
||||
TSKEY ek = MAX(win.skey, win.ekey);
|
||||
getAlignQueryTimeWindow(pQuery, win.skey, sk, ek, &skey1, &ekey1, &w);
|
||||
pWindowResInfo->startTime = pTableQueryInfo->win.skey; // windowSKey may be 0 in case of 1970 timestamp
|
||||
|
||||
if (pWindowResInfo->prevSKey == TSKEY_INITIAL_VAL) {
|
||||
if (!QUERY_IS_ASC_QUERY(pQuery)) {
|
||||
assert(win.ekey == pQuery->window.skey);
|
||||
assert(win.ekey == pQuery->window.ekey);
|
||||
}
|
||||
|
||||
pWindowResInfo->prevSKey = w.skey;
|
||||
|
@ -3673,10 +3708,6 @@ bool needPrimaryTimestampCol(SQuery *pQuery, SDataBlockInfo *pDataBlockInfo) {
|
|||
return loadPrimaryTS;
|
||||
}
|
||||
|
||||
bool onDemandLoadDatablock(SQuery *pQuery, int16_t queryRangeSet) {
|
||||
return (pQuery->intervalTime == 0) || ((queryRangeSet == 1) && (isIntervalQuery(pQuery)));
|
||||
}
|
||||
|
||||
static int32_t getNumOfSubset(SQInfo *pQInfo) {
|
||||
SQuery *pQuery = pQInfo->runtimeEnv.pQuery;
|
||||
|
||||
|
@ -3698,7 +3729,7 @@ static int32_t doCopyToSData(SQInfo *pQInfo, SWindowResult *result, int32_t orde
|
|||
int32_t startIdx = 0;
|
||||
int32_t step = -1;
|
||||
|
||||
qTrace("QInfo:%p start to copy data from windowResInfo to query buf", GET_QINFO_ADDR(pQuery));
|
||||
qTrace("QInfo:%p start to copy data from windowResInfo to query buf", pQInfo);
|
||||
int32_t totalSubset = getNumOfSubset(pQInfo);
|
||||
|
||||
if (orderType == TSDB_ORDER_ASC) {
|
||||
|
@ -3827,7 +3858,7 @@ bool queryHasRemainResults(SQueryRuntimeEnv* pRuntimeEnv) {
|
|||
}
|
||||
|
||||
/*
|
||||
* There are no results returned to client now.
|
||||
* While the code reaches here, there are no results returned to client now.
|
||||
* If query is not completed yet, the gaps between two results blocks need to be handled after next data block
|
||||
* is retrieved from TSDB.
|
||||
*
|
||||
|
@ -3881,18 +3912,24 @@ static void doCopyQueryResultToMsg(SQInfo *pQInfo, int32_t numOfRows, char *data
|
|||
}
|
||||
|
||||
int32_t doFillGapsInResults(SQueryRuntimeEnv* pRuntimeEnv, tFilePage **pDst, int32_t numOfRows, int32_t *numOfInterpo) {
|
||||
SQInfo* pQInfo = GET_QINFO_ADDR(pRuntimeEnv);
|
||||
SQuery *pQuery = pRuntimeEnv->pQuery;
|
||||
SFillInfo* pFillInfo = pRuntimeEnv->pFillInfo;
|
||||
|
||||
while (1) {
|
||||
taosGenerateDataBlock(pRuntimeEnv->pFillInfo, (tFilePage**) pQuery->sdata, &pQuery->rec.rows, pQuery->rec.capacity);
|
||||
int32_t ret = pQuery->rec.rows;
|
||||
int32_t ret = taosGenerateDataBlock(pFillInfo, (tFilePage**) pQuery->sdata, pQuery->rec.capacity);
|
||||
|
||||
// todo apply limit output function
|
||||
/* reached the start position of according to offset value, return immediately */
|
||||
if (pQuery->limit.offset == 0) {
|
||||
qTrace("QInfo:%p initial numOfRows:%d, generate filled result:%d rows", pQInfo, pFillInfo->numOfRows, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (pQuery->limit.offset < ret) {
|
||||
qTrace("QInfo:%p initial numOfRows:%d, generate filled result:%d rows, offset:%d. Discard due to offset, remain:%d, new offset:%d",
|
||||
pQInfo, pFillInfo->numOfRows, ret, pQuery->limit.offset, ret - pQuery->limit.offset, 0);
|
||||
|
||||
ret -= pQuery->limit.offset;
|
||||
// todo !!!!there exactly number of interpo is not valid.
|
||||
// todo refactor move to the beginning of buffer
|
||||
|
@ -3900,10 +3937,16 @@ int32_t doFillGapsInResults(SQueryRuntimeEnv* pRuntimeEnv, tFilePage **pDst, int
|
|||
memmove(pDst[i]->data, pDst[i]->data + pQuery->pSelectExpr[i].bytes * pQuery->limit.offset,
|
||||
ret * pQuery->pSelectExpr[i].bytes);
|
||||
}
|
||||
|
||||
pQuery->limit.offset = 0;
|
||||
return ret;
|
||||
} else {
|
||||
qTrace("QInfo:%p initial numOfRows:%d, generate filled result:%d rows, offset:%d. Discard due to offset, "
|
||||
"remain:%d, new offset:%d", pQInfo, pFillInfo->numOfRows, ret, pQuery->limit.offset, 0,
|
||||
pQuery->limit.offset - ret);
|
||||
|
||||
pQuery->limit.offset -= ret;
|
||||
pQuery->rec.rows = 0;
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
|
@ -3911,8 +3954,6 @@ int32_t doFillGapsInResults(SQueryRuntimeEnv* pRuntimeEnv, tFilePage **pDst, int
|
|||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void vnodePrintQueryStatistics(SQInfo *pQInfo) {
|
||||
|
@ -3993,8 +4034,8 @@ static void updateOffsetVal(SQueryRuntimeEnv *pRuntimeEnv, SDataBlockInfo *pBloc
|
|||
|
||||
int32_t numOfRes = tableApplyFunctionsOnBlock(pRuntimeEnv, pBlockInfo, NULL, binarySearchForKey, pDataBlock);
|
||||
|
||||
qTrace("QInfo:%p check data block, brange:%" PRId64 "-%" PRId64 ", rows:%d, numOfRes:%d", GET_QINFO_ADDR(pRuntimeEnv),
|
||||
pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows, numOfRes);
|
||||
qTrace("QInfo:%p check data block, brange:%" PRId64 "-%" PRId64 ", numOfRows:%d, numOfRes:%d, lastKey:%"PRId64, GET_QINFO_ADDR(pRuntimeEnv),
|
||||
pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows, numOfRes, pQuery->current->lastKey);
|
||||
}
|
||||
|
||||
void skipBlocks(SQueryRuntimeEnv *pRuntimeEnv) {
|
||||
|
@ -4111,8 +4152,9 @@ static bool skipTimeInterval(SQueryRuntimeEnv *pRuntimeEnv, TSKEY* start) {
|
|||
int32_t numOfRes = tableApplyFunctionsOnBlock(pRuntimeEnv, &blockInfo, NULL, binarySearchForKey, pDataBlock);
|
||||
pRuntimeEnv->windowResInfo.curIndex = index; // restore the window index
|
||||
|
||||
qTrace("QInfo:%p check data block, brange:%" PRId64 "-%" PRId64 ", rows:%d, numOfRes:%d",
|
||||
GET_QINFO_ADDR(pRuntimeEnv), blockInfo.window.skey, blockInfo.window.ekey, blockInfo.rows, numOfRes);
|
||||
qTrace("QInfo:%p check data block, brange:%" PRId64 "-%" PRId64 ", numOfRows:%d, numOfRes:%d, lastKey:%"PRId64,
|
||||
GET_QINFO_ADDR(pRuntimeEnv), blockInfo.window.skey, blockInfo.window.ekey, blockInfo.rows, numOfRes, pQuery->current->lastKey);
|
||||
|
||||
return true;
|
||||
} else { // do nothing
|
||||
*start = tw.skey;
|
||||
|
@ -4206,7 +4248,7 @@ static SFillColInfo* taosCreateFillColInfo(SQuery* pQuery) {
|
|||
pFillCol[i].col.offset = offset;
|
||||
pFillCol[i].flag = TSDB_COL_NORMAL; // always be ta normal column for table query
|
||||
pFillCol[i].functionId = pExprInfo->base.functionId;
|
||||
pFillCol[i].defaultVal.i = pQuery->defaultVal[i];
|
||||
pFillCol[i].fillVal.i = pQuery->fillVal[i];
|
||||
|
||||
offset += pExprInfo->bytes;
|
||||
}
|
||||
|
@ -4582,7 +4624,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) {
|
|||
skipResults(pRuntimeEnv);
|
||||
|
||||
// the limitation of output result is reached, set the query completed
|
||||
if (limitResults(pQuery)) {
|
||||
if (limitResults(pRuntimeEnv)) {
|
||||
pQInfo->tableIndex = pQInfo->groupInfo.numOfTables;
|
||||
break;
|
||||
}
|
||||
|
@ -4837,7 +4879,7 @@ static void tableFixedOutputProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo)
|
|||
pQuery->rec.rows = getNumOfResult(pRuntimeEnv);
|
||||
|
||||
skipResults(pRuntimeEnv);
|
||||
limitResults(pQuery);
|
||||
limitResults(pRuntimeEnv);
|
||||
}
|
||||
|
||||
static void tableMultiOutputProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) {
|
||||
|
@ -4885,7 +4927,7 @@ static void tableMultiOutputProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo)
|
|||
resetCtxOutputBuf(pRuntimeEnv);
|
||||
}
|
||||
|
||||
limitResults(pQuery);
|
||||
limitResults(pRuntimeEnv);
|
||||
if (Q_STATUS_EQUAL(pQuery->status, QUERY_RESBUF_FULL)) {
|
||||
qTrace("QInfo:%p query paused due to output limitation, next qrange:%" PRId64 "-%" PRId64, pQInfo,
|
||||
pQuery->current->lastKey, pQuery->window.ekey);
|
||||
|
@ -4963,7 +5005,7 @@ static void tableIntervalProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) {
|
|||
|
||||
// the offset is handled at prepare stage if no interpolation involved
|
||||
if (pQuery->fillType == TSDB_FILL_NONE || pQuery->rec.rows == 0) {
|
||||
limitResults(pQuery);
|
||||
limitResults(pRuntimeEnv);
|
||||
break;
|
||||
} else {
|
||||
TSKEY ekey = taosGetRevisedEndKey(pQuery->window.ekey, pQuery->order.order, pQuery->slidingTime,
|
||||
|
@ -4971,11 +5013,10 @@ static void tableIntervalProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) {
|
|||
taosFillSetStartInfo(pRuntimeEnv->pFillInfo, pQuery->rec.rows, ekey);
|
||||
taosFillCopyInputDataFromFilePage(pRuntimeEnv->pFillInfo, (tFilePage**) pQuery->sdata);
|
||||
numOfInterpo = 0;
|
||||
pQuery->rec.rows = doFillGapsInResults(pRuntimeEnv, (tFilePage **)pQuery->sdata, pQuery->rec.rows, &numOfInterpo);
|
||||
|
||||
qTrace("QInfo: %p fill results completed, final:%d", pQInfo, pQuery->rec.rows);
|
||||
pQuery->rec.rows = doFillGapsInResults(pRuntimeEnv, (tFilePage **)pQuery->sdata, pQuery->rec.rows, &numOfInterpo);
|
||||
if (pQuery->rec.rows > 0 || Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) {
|
||||
limitResults(pQuery);
|
||||
limitResults(pRuntimeEnv);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -5008,9 +5049,8 @@ static void tableQueryImpl(SQInfo *pQInfo) {
|
|||
int32_t remain = taosNumOfRemainRows(pRuntimeEnv->pFillInfo);
|
||||
pQuery->rec.rows = doFillGapsInResults(pRuntimeEnv, (tFilePage **)pQuery->sdata, remain, &numOfInterpo);
|
||||
|
||||
qTrace("QInfo: %p fill results completed, final:%d", pQInfo, pQuery->rec.rows);
|
||||
if (pQuery->rec.rows > 0) {
|
||||
limitResults(pQuery);
|
||||
limitResults(pRuntimeEnv);
|
||||
}
|
||||
|
||||
qTrace("QInfo:%p current:%d returned, total:%d", pQInfo, pQuery->rec.rows, pQuery->rec.total);
|
||||
|
@ -5342,7 +5382,7 @@ static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList,
|
|||
|
||||
pQueryMsg->fillType = htons(pQueryMsg->fillType);
|
||||
if (pQueryMsg->fillType != TSDB_FILL_NONE) {
|
||||
pQueryMsg->defaultVal = (uint64_t)(pMsg);
|
||||
pQueryMsg->fillVal = (uint64_t)(pMsg);
|
||||
|
||||
int64_t *v = (int64_t *)pMsg;
|
||||
for (int32_t i = 0; i < pQueryMsg->numOfOutput; ++i) {
|
||||
|
@ -5610,18 +5650,18 @@ static void doUpdateExprColumnIndex(SQuery *pQuery) {
|
|||
continue;
|
||||
}
|
||||
|
||||
SColIndex *pColIndexEx = &pSqlExprMsg->colInfo;
|
||||
if (!TSDB_COL_IS_TAG(pColIndexEx->flag)) {
|
||||
SColIndex *pColIndex = &pSqlExprMsg->colInfo;
|
||||
if (!TSDB_COL_IS_TAG(pColIndex->flag)) {
|
||||
for (int32_t f = 0; f < pQuery->numOfCols; ++f) {
|
||||
if (pColIndexEx->colId == pQuery->colList[f].colId) {
|
||||
pColIndexEx->colIndex = f;
|
||||
if (pColIndex->colId == pQuery->colList[f].colId) {
|
||||
pColIndex->colIndex = f;
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (int32_t f = 0; f < pQuery->numOfTags; ++f) {
|
||||
if (pColIndexEx->colId == pQuery->tagColList[f].colId) {
|
||||
pColIndexEx->colIndex = f;
|
||||
if (pColIndex->colId == pQuery->tagColList[f].colId) {
|
||||
pColIndex->colIndex = f;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -5713,13 +5753,13 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList,
|
|||
}
|
||||
|
||||
if (pQuery->fillType != TSDB_FILL_NONE) {
|
||||
pQuery->defaultVal = malloc(sizeof(int64_t) * pQuery->numOfOutput);
|
||||
if (pQuery->defaultVal == NULL) {
|
||||
pQuery->fillVal = malloc(sizeof(int64_t) * pQuery->numOfOutput);
|
||||
if (pQuery->fillVal == NULL) {
|
||||
goto _cleanup;
|
||||
}
|
||||
|
||||
// the first column is the timestamp
|
||||
memcpy(pQuery->defaultVal, (char *)pQueryMsg->defaultVal, pQuery->numOfOutput * sizeof(int64_t));
|
||||
memcpy(pQuery->fillVal, (char *)pQueryMsg->fillVal, pQuery->numOfOutput * sizeof(int64_t));
|
||||
}
|
||||
|
||||
// to make sure third party won't overwrite this structure
|
||||
|
@ -5745,7 +5785,7 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList,
|
|||
SGroupItem item = { .id = id };
|
||||
// NOTE: compare STableIdInfo with STableId
|
||||
// not a problem at present because we only use their 1st int64_t field
|
||||
STableIdInfo* pTableId = taosArraySearch( pTableIdList, compareTableIdInfo, &id );
|
||||
STableIdInfo* pTableId = taosArraySearch( pTableIdList, &id, compareTableIdInfo);
|
||||
if (pTableId != NULL ) {
|
||||
window.skey = pTableId->key;
|
||||
} else {
|
||||
|
@ -5776,7 +5816,7 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList,
|
|||
return pQInfo;
|
||||
|
||||
_cleanup:
|
||||
tfree(pQuery->defaultVal);
|
||||
tfree(pQuery->fillVal);
|
||||
|
||||
if (pQuery->sdata != NULL) {
|
||||
for (int16_t col = 0; col < pQuery->numOfOutput; ++col) {
|
||||
|
@ -5884,8 +5924,8 @@ static void freeQInfo(SQInfo *pQInfo) {
|
|||
tfree(pQuery->pSelectExpr);
|
||||
}
|
||||
|
||||
if (pQuery->defaultVal != NULL) {
|
||||
tfree(pQuery->defaultVal);
|
||||
if (pQuery->fillVal != NULL) {
|
||||
tfree(pQuery->fillVal);
|
||||
}
|
||||
|
||||
// todo refactor, extract method to destroytableDataInfo
|
||||
|
@ -5988,7 +6028,12 @@ static int32_t doDumpQueryResult(SQInfo *pQInfo, char *data) {
|
|||
}
|
||||
|
||||
pQuery->rec.total += pQuery->rec.rows;
|
||||
qTrace("QInfo:%p current:%d, total:%d", pQInfo, pQuery->rec.rows, pQuery->rec.total);
|
||||
qTrace("QInfo:%p current numOfRes rows:%d, total:%d", pQInfo, pQuery->rec.rows, pQuery->rec.total);
|
||||
|
||||
if (pQuery->limit.limit > 0 && pQuery->limit.limit == pQuery->rec.total) {
|
||||
qTrace("QInfo:%p results limitation reached, limitation:%"PRId64, pQInfo, pQuery->limit.limit);
|
||||
setQueryStatus(pQuery, QUERY_OVER);
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
||||
|
|
|
@ -773,9 +773,6 @@ static void tQueryIndexlessColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo,
|
|||
|
||||
// todo refactor:
|
||||
tstr *name = ((STableIndexElem *)pData)->pTable->name;
|
||||
// char* name = NULL;
|
||||
// tsdbGetTableName(pQueryInfo->, pTable, &name);
|
||||
|
||||
// todo speed up by using hash
|
||||
if (pQueryInfo->colIndex == TSDB_TBNAME_COLUMN_INDEX) {
|
||||
if (pQueryInfo->optr == TSDB_RELATION_IN) {
|
||||
|
@ -1051,7 +1048,7 @@ static void* exception_malloc(size_t size) {
|
|||
return p;
|
||||
}
|
||||
|
||||
static char* exception_strdup(const char* str) {
|
||||
static UNUSED_FUNC char* exception_strdup(const char* str) {
|
||||
char* p = strdup(str);
|
||||
if (p == NULL) {
|
||||
THROW(TSDB_CODE_SERV_OUT_OF_MEMORY);
|
||||
|
@ -1154,28 +1151,33 @@ tExprNode* exprTreeFromTableName(const char* tbnameCond) {
|
|||
tVariant* pVal = exception_calloc(1, sizeof(tVariant));
|
||||
right->pVal = pVal;
|
||||
pVal->nType = TSDB_DATA_TYPE_ARRAY;
|
||||
pVal->arr = taosArrayInit(2, sizeof(char*));
|
||||
pVal->arr = taosArrayInit(2, POINTER_BYTES);
|
||||
|
||||
const char* cond = tbnameCond + QUERY_COND_REL_PREFIX_IN_LEN;
|
||||
for (const char *e = cond; *e != 0; e++) {
|
||||
if (*e == TS_PATH_DELIMITER[0]) {
|
||||
cond = e + 1;
|
||||
} else if (*e == ',') {
|
||||
size_t len = e - cond + 1;
|
||||
size_t len = e - cond + VARSTR_HEADER_SIZE;
|
||||
char* p = exception_malloc(len);
|
||||
memcpy(p, cond, len);
|
||||
p[len - 1] = 0;
|
||||
varDataSetLen(p, len - VARSTR_HEADER_SIZE);
|
||||
memcpy(varDataVal(p), cond, len);
|
||||
cond += len;
|
||||
taosArrayPush(pVal->arr, &p);
|
||||
}
|
||||
}
|
||||
|
||||
if (*cond != 0) {
|
||||
char* p = exception_strdup( cond );
|
||||
size_t len = strlen(cond) + VARSTR_HEADER_SIZE;
|
||||
|
||||
char* p = exception_malloc(len);
|
||||
varDataSetLen(p, len - VARSTR_HEADER_SIZE);
|
||||
memcpy(varDataVal(p), cond, len);
|
||||
|
||||
taosArrayPush(pVal->arr, &p);
|
||||
}
|
||||
|
||||
taosArraySortString(pVal->arr);
|
||||
taosArraySortString(pVal->arr, taosArrayCompareString);
|
||||
}
|
||||
|
||||
CLEANUP_EXECUTE_TO(anchor, false);
|
||||
|
|
|
@ -34,7 +34,7 @@ int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, ch
|
|||
* here we revised the start time of day according to the local time zone,
|
||||
* but in case of DST, the start time of one day need to be dynamically decided.
|
||||
*
|
||||
* TODO dynamically decide the start time of a day
|
||||
* TODO dynamically decide the start time of a day, move to common module
|
||||
*/
|
||||
|
||||
// todo refactor to extract function that is available for Linux/Windows/Mac platform
|
||||
|
@ -117,9 +117,8 @@ void taosFillSetStartInfo(SFillInfo* pFillInfo, int32_t numOfRows, TSKEY endKey)
|
|||
}
|
||||
|
||||
pFillInfo->rowIdx = 0;
|
||||
pFillInfo->numOfRows = numOfRows;
|
||||
|
||||
pFillInfo->endKey = endKey;
|
||||
pFillInfo->numOfRows = numOfRows;
|
||||
}
|
||||
|
||||
void taosFillCopyInputDataFromFilePage(SFillInfo* pFillInfo, tFilePage** pInput) {
|
||||
|
@ -131,6 +130,8 @@ void taosFillCopyInputDataFromFilePage(SFillInfo* pFillInfo, tFilePage** pInput)
|
|||
|
||||
void taosFillCopyInputDataFromOneFilePage(SFillInfo* pFillInfo, tFilePage* pInput) {
|
||||
assert(pFillInfo->numOfRows == pInput->num);
|
||||
int32_t t = 0;
|
||||
|
||||
for(int32_t i = 0; i < pFillInfo->numOfCols; ++i) {
|
||||
SFillColInfo* pCol = &pFillInfo->pFillCol[i];
|
||||
|
||||
|
@ -138,7 +139,7 @@ void taosFillCopyInputDataFromOneFilePage(SFillInfo* pFillInfo, tFilePage* pInpu
|
|||
memcpy(pFillInfo->pData[i], s, pInput->num * pCol->col.bytes);
|
||||
|
||||
if (pCol->flag == TSDB_COL_TAG) { // copy the tag value
|
||||
memcpy(pFillInfo->pTags[i], pFillInfo->pData[i], pCol->col.bytes);
|
||||
memcpy(pFillInfo->pTags[t++], pFillInfo->pData[i], pCol->col.bytes);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -170,7 +171,7 @@ static int32_t taosGetTotalNumOfFilledRes(SFillInfo* pFillInfo, const TSKEY* tsA
|
|||
}
|
||||
}
|
||||
|
||||
int32_t taosGetNumOfResultWithFill(SFillInfo* pFillInfo, int32_t numOfRows, int64_t ekey, int32_t maxNumOfRows) {
|
||||
int64_t taosGetNumOfResultWithFill(SFillInfo* pFillInfo, int32_t numOfRows, int64_t ekey, int32_t maxNumOfRows) {
|
||||
int32_t numOfRes = taosGetTotalNumOfFilledRes(pFillInfo, (int64_t*) pFillInfo->pData[0], numOfRows,
|
||||
pFillInfo->slidingTime, ekey);
|
||||
return (numOfRes > maxNumOfRows) ? maxNumOfRows : numOfRes;
|
||||
|
@ -193,7 +194,7 @@ static double linearInterpolationImpl(double v1, double v2, double k1, double k2
|
|||
int taosDoLinearInterpolation(int32_t type, SPoint* point1, SPoint* point2, SPoint* point) {
|
||||
switch (type) {
|
||||
case TSDB_DATA_TYPE_INT: {
|
||||
*(int32_t*)point->val = linearInterpolationImpl(*(int32_t*)point1->val, *(int32_t*)point2->val, point1->key,
|
||||
*(int32_t*)point->val = (int32_t) linearInterpolationImpl(*(int32_t*)point1->val, *(int32_t*)point2->val, point1->key,
|
||||
point2->key, point->key);
|
||||
break;
|
||||
}
|
||||
|
@ -209,17 +210,17 @@ int taosDoLinearInterpolation(int32_t type, SPoint* point1, SPoint* point2, SPoi
|
|||
};
|
||||
case TSDB_DATA_TYPE_TIMESTAMP:
|
||||
case TSDB_DATA_TYPE_BIGINT: {
|
||||
*(int64_t*)point->val = linearInterpolationImpl(*(int64_t*)point1->val, *(int64_t*)point2->val, point1->key,
|
||||
*(int64_t*)point->val = (int64_t) linearInterpolationImpl(*(int64_t*)point1->val, *(int64_t*)point2->val, point1->key,
|
||||
point2->key, point->key);
|
||||
break;
|
||||
};
|
||||
case TSDB_DATA_TYPE_SMALLINT: {
|
||||
*(int16_t*)point->val = linearInterpolationImpl(*(int16_t*)point1->val, *(int16_t*)point2->val, point1->key,
|
||||
*(int16_t*)point->val = (int16_t) linearInterpolationImpl(*(int16_t*)point1->val, *(int16_t*)point2->val, point1->key,
|
||||
point2->key, point->key);
|
||||
break;
|
||||
};
|
||||
case TSDB_DATA_TYPE_TINYINT: {
|
||||
*(int8_t*)point->val =
|
||||
*(int8_t*) point->val = (int8_t)
|
||||
linearInterpolationImpl(*(int8_t*)point1->val, *(int8_t*)point2->val, point1->key, point2->key, point->key);
|
||||
break;
|
||||
};
|
||||
|
@ -243,8 +244,8 @@ static void setTagsValue(SFillInfo* pColInfo, tFilePage** data, char** pTags, in
|
|||
|
||||
static void doInterpoResultImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t* num, char** srcData,
|
||||
int64_t ts, char** pTags, bool outOfBound) {
|
||||
char** prevValues = &pFillInfo->prevValues;
|
||||
char** nextValues = &pFillInfo->nextValues;
|
||||
char* prevValues = pFillInfo->prevValues;
|
||||
char* nextValues = pFillInfo->nextValues;
|
||||
|
||||
SPoint point1, point2, point;
|
||||
|
||||
|
@ -257,16 +258,21 @@ static void doInterpoResultImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t*
|
|||
|
||||
// set the other values
|
||||
if (pFillInfo->fillType == TSDB_FILL_PREV) {
|
||||
char* pInterpolationData = FILL_IS_ASC_FILL(pFillInfo) ? *prevValues : *nextValues;
|
||||
if (pInterpolationData != NULL) {
|
||||
char* p = FILL_IS_ASC_FILL(pFillInfo) ? prevValues : nextValues;
|
||||
|
||||
if (p != NULL) {
|
||||
for (int32_t i = 1; i < numOfValCols; ++i) {
|
||||
SFillColInfo* pCol = &pFillInfo->pFillCol[i];
|
||||
|
||||
char* val1 = elePtrAt(data[i]->data, pCol->col.bytes, *num);
|
||||
if (isNull(pInterpolationData + pCol->col.offset, pCol->col.type)) {
|
||||
setNull(val1, pCol->col.type, pCol->col.bytes);
|
||||
if (isNull(p + pCol->col.offset, pCol->col.type)) {
|
||||
if (pCol->col.type == TSDB_DATA_TYPE_BINARY || pCol->col.type == TSDB_DATA_TYPE_NCHAR) {
|
||||
setVardataNull(val1, pCol->col.type);
|
||||
} else {
|
||||
assignVal(val1, pInterpolationData + pCol->col.offset, pCol->col.bytes, pCol->col.type);
|
||||
setNull(val1, pCol->col.type, pCol->col.bytes);
|
||||
}
|
||||
} else {
|
||||
assignVal(val1, p + pCol->col.offset, pCol->col.bytes, pCol->col.type);
|
||||
}
|
||||
}
|
||||
} else { // no prev value yet, set the value for NULL
|
||||
|
@ -274,14 +280,18 @@ static void doInterpoResultImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t*
|
|||
SFillColInfo* pCol = &pFillInfo->pFillCol[i];
|
||||
|
||||
char* val1 = elePtrAt(data[i]->data, pCol->col.bytes, *num);
|
||||
if (pCol->col.type == TSDB_DATA_TYPE_BINARY||pCol->col.type == TSDB_DATA_TYPE_NCHAR) {
|
||||
setVardataNull(val1, pCol->col.type);
|
||||
} else {
|
||||
setNull(val1, pCol->col.type, pCol->col.bytes);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
setTagsValue(pFillInfo, data, pTags, numOfValCols, *num);
|
||||
} else if (pFillInfo->fillType == TSDB_FILL_LINEAR) {
|
||||
// TODO : linear interpolation supports NULL value
|
||||
if (*prevValues != NULL && !outOfBound) {
|
||||
if (prevValues != NULL && !outOfBound) {
|
||||
for (int32_t i = 1; i < numOfValCols; ++i) {
|
||||
SFillColInfo* pCol = &pFillInfo->pFillCol[i];
|
||||
|
||||
|
@ -289,12 +299,15 @@ static void doInterpoResultImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t*
|
|||
int16_t bytes = pCol->col.bytes;
|
||||
|
||||
char *val1 = elePtrAt(data[i]->data, pCol->col.bytes, *num);
|
||||
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BOOL) {
|
||||
if (type == TSDB_DATA_TYPE_BINARY|| type == TSDB_DATA_TYPE_NCHAR) {
|
||||
setVardataNull(val1, pCol->col.type);
|
||||
continue;
|
||||
} else if (type == TSDB_DATA_TYPE_BOOL) {
|
||||
setNull(val1, pCol->col.type, bytes);
|
||||
continue;
|
||||
}
|
||||
|
||||
point1 = (SPoint){.key = *(TSKEY*)(*prevValues), .val = *prevValues + pCol->col.offset};
|
||||
point1 = (SPoint){.key = *(TSKEY*)(prevValues), .val = prevValues + pCol->col.offset};
|
||||
point2 = (SPoint){.key = ts, .val = srcData[i] + pFillInfo->rowIdx * bytes};
|
||||
point = (SPoint){.key = pFillInfo->start, .val = val1};
|
||||
taosDoLinearInterpolation(type, &point1, &point2, &point);
|
||||
|
@ -307,8 +320,13 @@ static void doInterpoResultImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t*
|
|||
SFillColInfo* pCol = &pFillInfo->pFillCol[i];
|
||||
|
||||
char* val1 = elePtrAt(data[i]->data, pCol->col.bytes, *num);
|
||||
|
||||
if (pCol->col.type == TSDB_DATA_TYPE_BINARY || pCol->col.type == TSDB_DATA_TYPE_NCHAR) {
|
||||
setVardataNull(val1, pCol->col.type);
|
||||
} else {
|
||||
setNull(val1, pCol->col.type, pCol->col.bytes);
|
||||
}
|
||||
}
|
||||
|
||||
setTagsValue(pFillInfo, data, pTags, numOfValCols, *num);
|
||||
|
||||
|
@ -318,7 +336,7 @@ static void doInterpoResultImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t*
|
|||
SFillColInfo* pCol = &pFillInfo->pFillCol[i];
|
||||
|
||||
char* val1 = elePtrAt(data[i]->data, pCol->col.bytes, *num);
|
||||
assignVal(val1, (char*)&pCol->defaultVal.i, pCol->col.bytes, pCol->col.type);
|
||||
assignVal(val1, (char*)&pCol->fillVal.i, pCol->col.bytes, pCol->col.type);
|
||||
}
|
||||
|
||||
setTagsValue(pFillInfo, data, pTags, numOfValCols, *num);
|
||||
|
@ -338,11 +356,16 @@ static void initBeforeAfterDataBuf(SFillInfo* pFillInfo, char** nextValues) {
|
|||
*nextValues = calloc(1, pFillInfo->rowSize);
|
||||
for (int i = 1; i < pFillInfo->numOfCols; i++) {
|
||||
SFillColInfo* pCol = &pFillInfo->pFillCol[i];
|
||||
|
||||
if (pCol->col.type == TSDB_DATA_TYPE_BINARY||pCol->col.type == TSDB_DATA_TYPE_NCHAR) {
|
||||
setVardataNull(*nextValues + pCol->col.offset, pCol->col.type);
|
||||
} else {
|
||||
setNull(*nextValues + pCol->col.offset, pCol->col.type, pCol->col.bytes);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int32_t taosDoInterpoResult(SFillInfo* pFillInfo, tFilePage** data, int32_t numOfRows, int32_t outputRows, char** srcData) {
|
||||
int32_t generateDataBlockImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t numOfRows, int32_t outputRows, char** srcData) {
|
||||
int32_t num = 0;
|
||||
pFillInfo->numOfCurrent = 0;
|
||||
|
||||
|
@ -356,8 +379,8 @@ int32_t taosDoInterpoResult(SFillInfo* pFillInfo, tFilePage** data, int32_t numO
|
|||
|
||||
if (numOfRows == 0) {
|
||||
/*
|
||||
* we need to rebuild whole result set
|
||||
* NOTE:we need to keep the last saved data, to generated the filled data
|
||||
* These data are generated according to fill strategy, since the current timestamp is out of time window of
|
||||
* real result set. Note that we need to keep the direct previous result rows, to generated the filled data.
|
||||
*/
|
||||
while (num < outputRows) {
|
||||
doInterpoResultImpl(pFillInfo, data, &num, srcData, pFillInfo->start, pTags, true);
|
||||
|
@ -387,7 +410,7 @@ int32_t taosDoInterpoResult(SFillInfo* pFillInfo, tFilePage** data, int32_t numO
|
|||
|
||||
while (((pFillInfo->start < ts && FILL_IS_ASC_FILL(pFillInfo)) ||
|
||||
(pFillInfo->start > ts && !FILL_IS_ASC_FILL(pFillInfo))) && num < outputRows) {
|
||||
doInterpoResultImpl(pFillInfo, data, &num, srcData, pFillInfo->start, pTags, false);
|
||||
doInterpoResultImpl(pFillInfo, data, &num, srcData, ts, pTags, false);
|
||||
}
|
||||
|
||||
/* output buffer is full, abort */
|
||||
|
@ -420,7 +443,7 @@ int32_t taosDoInterpoResult(SFillInfo* pFillInfo, tFilePage** data, int32_t numO
|
|||
assignVal(val1, src, pCol->col.bytes, pCol->col.type);
|
||||
memcpy(*prevValues + pCol->col.offset, src, pCol->col.bytes);
|
||||
} else {
|
||||
assignVal(val1, (char*) &pCol->defaultVal.i, pCol->col.bytes, pCol->col.type);
|
||||
assignVal(val1, (char*) &pCol->fillVal.i, pCol->col.bytes, pCol->col.type);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -450,21 +473,12 @@ int32_t taosDoInterpoResult(SFillInfo* pFillInfo, tFilePage** data, int32_t numO
|
|||
}
|
||||
}
|
||||
|
||||
void taosGenerateDataBlock(SFillInfo* pFillInfo, tFilePage** output, int64_t* outputRows, int32_t capacity) {
|
||||
int64_t taosGenerateDataBlock(SFillInfo* pFillInfo, tFilePage** output, int32_t capacity) {
|
||||
int32_t remain = taosNumOfRemainRows(pFillInfo); // todo use iterator?
|
||||
|
||||
// TSKEY ekey = taosGetRevisedEndKey(pQuery->window.ekey, pQuery->order.order, pQuery->slidingTime,
|
||||
// pQuery->slidingTimeUnit, pQuery->precision);
|
||||
// if (QUERY_IS_ASC_QUERY(pQuery)) {
|
||||
// assert(ekey >= pQuery->window.ekey);
|
||||
// } else {
|
||||
// assert(ekey <= pQuery->window.ekey);
|
||||
// }
|
||||
|
||||
int32_t rows = taosGetNumOfResultWithFill(pFillInfo, remain, pFillInfo->endKey, capacity);
|
||||
|
||||
int32_t numOfRes = taosDoInterpoResult(pFillInfo, output, remain, rows, pFillInfo->pData);
|
||||
*outputRows = rows;
|
||||
|
||||
int32_t numOfRes = generateDataBlockImpl(pFillInfo, output, remain, rows, pFillInfo->pData);
|
||||
assert(numOfRes == rows);
|
||||
|
||||
return numOfRes;
|
||||
}
|
||||
|
|
|
@ -101,11 +101,12 @@ void tVariantCreateFromBinary(tVariant *pVar, const char *pz, size_t len, uint32
|
|||
break;
|
||||
}
|
||||
case TSDB_DATA_TYPE_NCHAR: { // here we get the nchar length from raw binary bits length
|
||||
pVar->nLen = len / TSDB_NCHAR_SIZE;
|
||||
pVar->wpz = calloc(1, (pVar->nLen + 1) * TSDB_NCHAR_SIZE);
|
||||
int32_t lenInwchar = len / TSDB_NCHAR_SIZE;
|
||||
pVar->wpz = calloc(1, (lenInwchar + 1) * TSDB_NCHAR_SIZE);
|
||||
|
||||
wcsncpy(pVar->wpz, (wchar_t *)pz, pVar->nLen);
|
||||
pVar->wpz[pVar->nLen] = 0;
|
||||
wcsncpy(pVar->wpz, (wchar_t *)pz, lenInwchar);
|
||||
pVar->wpz[lenInwchar] = 0;
|
||||
pVar->nLen = len;
|
||||
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -74,7 +74,7 @@ void tsdbCloseMetaFile(SMetaFile *mfh);
|
|||
typedef struct {
|
||||
TSKEY keyFirst;
|
||||
TSKEY keyLast;
|
||||
int32_t numOfPoints;
|
||||
int32_t numOfRows;
|
||||
void * pData;
|
||||
} SMemTable;
|
||||
|
||||
|
@ -173,7 +173,7 @@ typedef struct {
|
|||
typedef struct {
|
||||
TSKEY keyFirst;
|
||||
TSKEY keyLast;
|
||||
int64_t numOfPoints;
|
||||
int64_t numOfRows;
|
||||
SList * list;
|
||||
} SCacheMem;
|
||||
|
||||
|
@ -294,7 +294,7 @@ typedef struct {
|
|||
int64_t last : 1; // If the block in data file or last file
|
||||
int64_t offset : 63; // Offset of data block or sub-block index depending on numOfSubBlocks
|
||||
int32_t algorithm : 8; // Compression algorithm
|
||||
int32_t numOfPoints : 24; // Number of total points
|
||||
int32_t numOfRows : 24; // Number of total points
|
||||
int32_t sversion; // Schema version
|
||||
int32_t len; // Data block length or nothing
|
||||
int16_t numOfSubBlocks; // Number of sub-blocks;
|
||||
|
|
|
@ -82,7 +82,7 @@ void *tsdbAllocFromCache(STsdbCache *pCache, int bytes, TSKEY key) {
|
|||
memset(ptr, 0, bytes);
|
||||
if (key < pCache->mem->keyFirst) pCache->mem->keyFirst = key;
|
||||
if (key > pCache->mem->keyLast) pCache->mem->keyLast = key;
|
||||
pCache->mem->numOfPoints++;
|
||||
pCache->mem->numOfRows++;
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
@ -127,7 +127,7 @@ static int tsdbAllocBlockFromPool(STsdbCache *pCache) {
|
|||
if (pCache->mem == NULL) return -1;
|
||||
pCache->mem->keyFirst = INT64_MAX;
|
||||
pCache->mem->keyLast = 0;
|
||||
pCache->mem->numOfPoints = 0;
|
||||
pCache->mem->numOfRows = 0;
|
||||
pCache->mem->list = tdListNew(sizeof(STsdbCacheBlock *));
|
||||
}
|
||||
|
||||
|
|
|
@ -233,10 +233,10 @@ SFileGroup *tsdbGetFileGroupNext(SFileGroupIter *pIter) {
|
|||
// SCompBlock *pBlock = pStartBlock;
|
||||
// for (int i = 0; i < numOfBlocks; i++) {
|
||||
// if (tsdbLoadCompCols(pFile, pBlock, (void *)pCompData) < 0) return -1;
|
||||
// pCols->numOfPoints += (pCompData->cols[0].len / 8);
|
||||
// pCols->numOfRows += (pCompData->cols[0].len / 8);
|
||||
// for (int iCol = 0; iCol < pBlock->numOfCols; iCol++) {
|
||||
// SCompCol *pCompCol = &(pCompData->cols[iCol]);
|
||||
// // pCols->numOfPoints += pBlock->numOfPoints;
|
||||
// // pCols->numOfRows += pBlock->numOfRows;
|
||||
// int k = 0;
|
||||
// for (; k < pCols->numOfCols; k++) {
|
||||
// if (pCompCol->colId == pCols->cols[k].colId) break;
|
||||
|
|
|
@ -830,7 +830,7 @@ static int32_t tdInsertRowToTable(STsdbRepo *pRepo, SDataRow row, STable *pTable
|
|||
tSkipListNewNodeInfo(pTable->mem->pData, &level, &headSize);
|
||||
|
||||
TSKEY key = dataRowKey(row);
|
||||
// printf("insert:%lld, size:%d\n", key, pTable->mem->numOfPoints);
|
||||
// printf("insert:%lld, size:%d\n", key, pTable->mem->numOfRows);
|
||||
|
||||
// Copy row into the memory
|
||||
SSkipListNode *pNode = tsdbAllocFromCache(pRepo->tsdbCache, headSize + dataRowLen(row), key);
|
||||
|
@ -854,7 +854,7 @@ static int32_t tdInsertRowToTable(STsdbRepo *pRepo, SDataRow row, STable *pTable
|
|||
if (key < pTable->mem->keyFirst) pTable->mem->keyFirst = key;
|
||||
if (key > pTable->lastKey) pTable->lastKey = key;
|
||||
|
||||
pTable->mem->numOfPoints = tSkipListGetSize(pTable->mem->pData);
|
||||
pTable->mem->numOfRows = tSkipListGetSize(pTable->mem->pData);
|
||||
|
||||
tsdbTrace("vgId:%d, tid:%d, uid:%" PRId64 ", table:%s a row is inserted to table! key:%" PRId64, pRepo->config.tsdbId,
|
||||
pTable->tableId.tid, pTable->tableId.uid, varDataVal(pTable->name), dataRowKey(row));
|
||||
|
@ -1063,7 +1063,7 @@ static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SSkipListIterator **iters
|
|||
while (true) {
|
||||
int rowsRead = tsdbReadRowsFromCache(pIter, maxKey, maxRowsToRead, pDataCols);
|
||||
assert(rowsRead >= 0);
|
||||
if (pDataCols->numOfPoints == 0) break;
|
||||
if (pDataCols->numOfRows == 0) break;
|
||||
nLoop++;
|
||||
|
||||
ASSERT(dataColsKeyFirst(pDataCols) >= minKey && dataColsKeyFirst(pDataCols) <= maxKey);
|
||||
|
@ -1072,13 +1072,13 @@ static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SSkipListIterator **iters
|
|||
int rowsWritten = tsdbWriteDataBlock(pHelper, pDataCols);
|
||||
ASSERT(rowsWritten != 0);
|
||||
if (rowsWritten < 0) goto _err;
|
||||
ASSERT(rowsWritten <= pDataCols->numOfPoints);
|
||||
ASSERT(rowsWritten <= pDataCols->numOfRows);
|
||||
|
||||
tdPopDataColsPoints(pDataCols, rowsWritten);
|
||||
maxRowsToRead = pCfg->maxRowsPerFileBlock * 4 / 5 - pDataCols->numOfPoints;
|
||||
maxRowsToRead = pCfg->maxRowsPerFileBlock * 4 / 5 - pDataCols->numOfRows;
|
||||
}
|
||||
|
||||
ASSERT(pDataCols->numOfPoints == 0);
|
||||
ASSERT(pDataCols->numOfRows == 0);
|
||||
|
||||
// Move the last block to the new .l file if neccessary
|
||||
if (tsdbMoveLastBlockIfNeccessary(pHelper) < 0) {
|
||||
|
@ -1196,7 +1196,7 @@ uint32_t tsdbGetFileInfo(TsdbRepoT *repo, char *name, uint32_t *index, int32_t *
|
|||
// Map index to the file name
|
||||
int fid = (*index) / 3;
|
||||
|
||||
if (fid > pFileH->numOfFGroups) {
|
||||
if (fid >= pFileH->numOfFGroups) {
|
||||
// return meta data file
|
||||
if ((*index) % 3 > 0) { // it is finished
|
||||
tfree(spath);
|
||||
|
|
|
@ -307,7 +307,7 @@ void tsdbSetHelperTable(SRWHelper *pHelper, STable *pTable, STsdbRepo *pRepo) {
|
|||
*/
|
||||
int tsdbWriteDataBlock(SRWHelper *pHelper, SDataCols *pDataCols) {
|
||||
ASSERT(TSDB_HELPER_TYPE(pHelper) == TSDB_WRITE_HELPER);
|
||||
ASSERT(pDataCols->numOfPoints > 0);
|
||||
ASSERT(pDataCols->numOfRows > 0);
|
||||
|
||||
SCompBlock compBlock;
|
||||
int rowsToWrite = 0;
|
||||
|
@ -322,7 +322,7 @@ int tsdbWriteDataBlock(SRWHelper *pHelper, SDataCols *pDataCols) {
|
|||
|
||||
if (pIdx->offset == 0 || (!pIdx->hasLast && keyFirst > pIdx->maxKey)) { // Just append as a super block
|
||||
ASSERT(pHelper->hasOldLastBlock == false);
|
||||
rowsToWrite = pDataCols->numOfPoints;
|
||||
rowsToWrite = pDataCols->numOfRows;
|
||||
SFile *pWFile = NULL;
|
||||
bool isLast = false;
|
||||
|
||||
|
@ -380,10 +380,10 @@ int tsdbMoveLastBlockIfNeccessary(SRWHelper *pHelper) {
|
|||
|
||||
if (pCompBlock->numOfSubBlocks > 1) {
|
||||
if (tsdbLoadBlockData(pHelper, blockAtIdx(pHelper, pIdx->numOfBlocks - 1), NULL) < 0) return -1;
|
||||
ASSERT(pHelper->pDataCols[0]->numOfPoints > 0 &&
|
||||
pHelper->pDataCols[0]->numOfPoints < pHelper->config.minRowsPerFileBlock);
|
||||
ASSERT(pHelper->pDataCols[0]->numOfRows > 0 &&
|
||||
pHelper->pDataCols[0]->numOfRows < pHelper->config.minRowsPerFileBlock);
|
||||
if (tsdbWriteBlockToFile(pHelper, &(pHelper->files.nLastF), pHelper->pDataCols[0],
|
||||
pHelper->pDataCols[0]->numOfPoints, &compBlock, true, true) < 0)
|
||||
pHelper->pDataCols[0]->numOfRows, &compBlock, true, true) < 0)
|
||||
return -1;
|
||||
|
||||
if (tsdbUpdateSuperBlock(pHelper, &compBlock, pIdx->numOfBlocks - 1) < 0) return -1;
|
||||
|
@ -625,13 +625,13 @@ int tsdbLoadBlockDataCols(SRWHelper *pHelper, SDataCols *pDataCols, int blkIdx,
|
|||
for (int i = 1; i < numOfSubBlocks; i++) {
|
||||
pStartBlock++;
|
||||
if (tsdbLoadSingleBlockDataCols(pHelper, pStartBlock, colIds, numOfColIds, pHelper->pDataCols[1]) < 0) return -1;
|
||||
tdMergeDataCols(pDataCols, pHelper->pDataCols[1], pHelper->pDataCols[1]->numOfPoints);
|
||||
tdMergeDataCols(pDataCols, pHelper->pDataCols[1], pHelper->pDataCols[1]->numOfRows);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tsdbCheckAndDecodeColumnData(SDataCol *pDataCol, char *content, int32_t len, int8_t comp, int numOfPoints,
|
||||
static int tsdbCheckAndDecodeColumnData(SDataCol *pDataCol, char *content, int32_t len, int8_t comp, int numOfRows,
|
||||
int maxPoints, char *buffer, int bufferSize) {
|
||||
// Verify by checksum
|
||||
if (!taosCheckChecksumWhole((uint8_t *)content, len)) return -1;
|
||||
|
@ -640,16 +640,16 @@ static int tsdbCheckAndDecodeColumnData(SDataCol *pDataCol, char *content, int32
|
|||
if (comp) {
|
||||
// // Need to decompress
|
||||
pDataCol->len = (*(tDataTypeDesc[pDataCol->type].decompFunc))(
|
||||
content, len - sizeof(TSCKSUM), numOfPoints, pDataCol->pData, pDataCol->spaceSize, comp, buffer, bufferSize);
|
||||
content, len - sizeof(TSCKSUM), numOfRows, pDataCol->pData, pDataCol->spaceSize, comp, buffer, bufferSize);
|
||||
if (pDataCol->type == TSDB_DATA_TYPE_BINARY || pDataCol->type == TSDB_DATA_TYPE_NCHAR) {
|
||||
dataColSetOffset(pDataCol, numOfPoints);
|
||||
dataColSetOffset(pDataCol, numOfRows);
|
||||
}
|
||||
} else {
|
||||
// No need to decompress, just memcpy it
|
||||
pDataCol->len = len - sizeof(TSCKSUM);
|
||||
memcpy(pDataCol->pData, content, pDataCol->len);
|
||||
if (pDataCol->type == TSDB_DATA_TYPE_BINARY || pDataCol->type == TSDB_DATA_TYPE_NCHAR) {
|
||||
dataColSetOffset(pDataCol, numOfPoints);
|
||||
dataColSetOffset(pDataCol, numOfRows);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
@ -673,7 +673,7 @@ static int tsdbLoadBlockDataImpl(SRWHelper *pHelper, SCompBlock *pCompBlock, SDa
|
|||
int32_t tsize = sizeof(SCompData) + sizeof(SCompCol) * pCompBlock->numOfCols + sizeof(TSCKSUM);
|
||||
if (!taosCheckChecksumWhole((uint8_t *)pCompData, tsize)) goto _err;
|
||||
|
||||
pDataCols->numOfPoints = pCompBlock->numOfPoints;
|
||||
pDataCols->numOfRows = pCompBlock->numOfRows;
|
||||
|
||||
// Recover the data
|
||||
int ccol = 0;
|
||||
|
@ -682,7 +682,7 @@ static int tsdbLoadBlockDataImpl(SRWHelper *pHelper, SCompBlock *pCompBlock, SDa
|
|||
SDataCol *pDataCol = &(pDataCols->cols[dcol]);
|
||||
if (ccol >= pCompData->numOfCols) {
|
||||
// Set current column as NULL and forward
|
||||
dataColSetNEleNull(pDataCol, pCompBlock->numOfPoints, pDataCols->maxPoints);
|
||||
dataColSetNEleNull(pDataCol, pCompBlock->numOfRows, pDataCols->maxPoints);
|
||||
dcol++;
|
||||
continue;
|
||||
}
|
||||
|
@ -691,15 +691,15 @@ static int tsdbLoadBlockDataImpl(SRWHelper *pHelper, SCompBlock *pCompBlock, SDa
|
|||
|
||||
if (pCompCol->colId == pDataCol->colId) {
|
||||
if (pCompBlock->algorithm == TWO_STAGE_COMP) {
|
||||
int zsize = pDataCol->bytes * pCompBlock->numOfPoints + COMP_OVERFLOW_BYTES;
|
||||
int zsize = pDataCol->bytes * pCompBlock->numOfRows + COMP_OVERFLOW_BYTES;
|
||||
if (pCompCol->type == TSDB_DATA_TYPE_BINARY || pCompCol->type == TSDB_DATA_TYPE_NCHAR) {
|
||||
zsize += (sizeof(VarDataLenT) * pCompBlock->numOfPoints);
|
||||
zsize += (sizeof(VarDataLenT) * pCompBlock->numOfRows);
|
||||
}
|
||||
pHelper->compBuffer = trealloc(pHelper->compBuffer, zsize);
|
||||
if (pHelper->compBuffer == NULL) goto _err;
|
||||
}
|
||||
if (tsdbCheckAndDecodeColumnData(pDataCol, (char *)pCompData + tsize + pCompCol->offset, pCompCol->len,
|
||||
pCompBlock->algorithm, pCompBlock->numOfPoints, pDataCols->maxPoints,
|
||||
pCompBlock->algorithm, pCompBlock->numOfRows, pDataCols->maxPoints,
|
||||
pHelper->compBuffer, tsizeof(pHelper->compBuffer)) < 0)
|
||||
goto _err;
|
||||
dcol++;
|
||||
|
@ -708,7 +708,7 @@ static int tsdbLoadBlockDataImpl(SRWHelper *pHelper, SCompBlock *pCompBlock, SDa
|
|||
ccol++;
|
||||
} else {
|
||||
// Set current column as NULL and forward
|
||||
dataColSetNEleNull(pDataCol, pCompBlock->numOfPoints, pDataCols->maxPoints);
|
||||
dataColSetNEleNull(pDataCol, pCompBlock->numOfRows, pDataCols->maxPoints);
|
||||
dcol++;
|
||||
}
|
||||
}
|
||||
|
@ -732,7 +732,7 @@ int tsdbLoadBlockData(SRWHelper *pHelper, SCompBlock *pCompBlock, SDataCols *tar
|
|||
tdResetDataCols(pHelper->pDataCols[1]);
|
||||
pCompBlock++;
|
||||
if (tsdbLoadBlockDataImpl(pHelper, pCompBlock, pHelper->pDataCols[1]) < 0) goto _err;
|
||||
if (tdMergeDataCols(pHelper->pDataCols[0], pHelper->pDataCols[1], pHelper->pDataCols[1]->numOfPoints) < 0) goto _err;
|
||||
if (tdMergeDataCols(pHelper->pDataCols[0], pHelper->pDataCols[1], pHelper->pDataCols[1]->numOfRows) < 0) goto _err;
|
||||
}
|
||||
|
||||
// if (target) TODO
|
||||
|
@ -753,7 +753,7 @@ static bool tsdbShouldCreateNewLast(SRWHelper *pHelper) {
|
|||
|
||||
static int tsdbWriteBlockToFile(SRWHelper *pHelper, SFile *pFile, SDataCols *pDataCols, int rowsToWrite, SCompBlock *pCompBlock,
|
||||
bool isLast, bool isSuperBlock) {
|
||||
ASSERT(rowsToWrite > 0 && rowsToWrite <= pDataCols->numOfPoints &&
|
||||
ASSERT(rowsToWrite > 0 && rowsToWrite <= pDataCols->numOfRows &&
|
||||
rowsToWrite <= pHelper->config.maxRowsPerFileBlock);
|
||||
|
||||
SCompData *pCompData = (SCompData *)(pHelper->pBuffer);
|
||||
|
@ -840,7 +840,7 @@ static int tsdbWriteBlockToFile(SRWHelper *pHelper, SFile *pFile, SDataCols *pDa
|
|||
pCompBlock->last = isLast;
|
||||
pCompBlock->offset = offset;
|
||||
pCompBlock->algorithm = pHelper->config.compress;
|
||||
pCompBlock->numOfPoints = rowsToWrite;
|
||||
pCompBlock->numOfRows = rowsToWrite;
|
||||
pCompBlock->sversion = pHelper->tableInfo.sversion;
|
||||
pCompBlock->len = (int32_t)lsize;
|
||||
pCompBlock->numOfSubBlocks = isSuperBlock ? 1 : 0;
|
||||
|
@ -877,7 +877,7 @@ static int tsdbMergeDataWithBlock(SRWHelper *pHelper, int blkIdx, SDataCols *pDa
|
|||
int rowsWritten = 0;
|
||||
SCompBlock compBlock = {0};
|
||||
|
||||
ASSERT(pDataCols->numOfPoints > 0);
|
||||
ASSERT(pDataCols->numOfRows > 0);
|
||||
TSKEY keyFirst = dataColsKeyFirst(pDataCols);
|
||||
|
||||
SCompIdx *pIdx = pHelper->pCompIdx + pHelper->tableInfo.tid;
|
||||
|
@ -889,32 +889,32 @@ static int tsdbMergeDataWithBlock(SRWHelper *pHelper, int blkIdx, SDataCols *pDa
|
|||
// ASSERT(compareKeyBlock((void *)&keyFirst, (void *)pCompBlock) == 0);
|
||||
|
||||
if (keyFirst > blockAtIdx(pHelper, blkIdx)->keyLast) { // Merge with the last block by append
|
||||
ASSERT(blockAtIdx(pHelper, blkIdx)->numOfPoints < pHelper->config.minRowsPerFileBlock && blkIdx == pIdx->numOfBlocks-1);
|
||||
ASSERT(blockAtIdx(pHelper, blkIdx)->numOfRows < pHelper->config.minRowsPerFileBlock && blkIdx == pIdx->numOfBlocks-1);
|
||||
int defaultRowsToWrite = pHelper->config.maxRowsPerFileBlock * 4 / 5; // TODO: make a interface
|
||||
|
||||
rowsWritten = MIN((defaultRowsToWrite - blockAtIdx(pHelper, blkIdx)->numOfPoints), pDataCols->numOfPoints);
|
||||
rowsWritten = MIN((defaultRowsToWrite - blockAtIdx(pHelper, blkIdx)->numOfRows), pDataCols->numOfRows);
|
||||
if ((blockAtIdx(pHelper, blkIdx)->numOfSubBlocks < TSDB_MAX_SUBBLOCKS) &&
|
||||
(blockAtIdx(pHelper, blkIdx)->numOfPoints + rowsWritten < pHelper->config.minRowsPerFileBlock) && (pHelper->files.nLastF.fd) > 0) {
|
||||
(blockAtIdx(pHelper, blkIdx)->numOfRows + rowsWritten < pHelper->config.minRowsPerFileBlock) && (pHelper->files.nLastF.fd) > 0) {
|
||||
if (tsdbWriteBlockToFile(pHelper, &(pHelper->files.lastF), pDataCols, rowsWritten, &compBlock, true, false) < 0)
|
||||
goto _err;
|
||||
if (tsdbAddSubBlock(pHelper, &compBlock, blkIdx, rowsWritten) < 0) goto _err;
|
||||
} else {
|
||||
// Load
|
||||
if (tsdbLoadBlockData(pHelper, blockAtIdx(pHelper, blkIdx), NULL) < 0) goto _err;
|
||||
ASSERT(pHelper->pDataCols[0]->numOfPoints == blockAtIdx(pHelper, blkIdx)->numOfPoints);
|
||||
ASSERT(pHelper->pDataCols[0]->numOfRows == blockAtIdx(pHelper, blkIdx)->numOfRows);
|
||||
// Merge
|
||||
if (tdMergeDataCols(pHelper->pDataCols[0], pDataCols, rowsWritten) < 0) goto _err;
|
||||
// Write
|
||||
SFile *pWFile = NULL;
|
||||
bool isLast = false;
|
||||
if (pHelper->pDataCols[0]->numOfPoints >= pHelper->config.minRowsPerFileBlock) {
|
||||
if (pHelper->pDataCols[0]->numOfRows >= pHelper->config.minRowsPerFileBlock) {
|
||||
pWFile = &(pHelper->files.dataF);
|
||||
} else {
|
||||
isLast = true;
|
||||
pWFile = (pHelper->files.nLastF.fd > 0) ? &(pHelper->files.nLastF) : &(pHelper->files.lastF);
|
||||
}
|
||||
if (tsdbWriteBlockToFile(pHelper, pWFile, pHelper->pDataCols[0],
|
||||
pHelper->pDataCols[0]->numOfPoints, &compBlock, isLast, true) < 0)
|
||||
pHelper->pDataCols[0]->numOfRows, &compBlock, isLast, true) < 0)
|
||||
goto _err;
|
||||
if (tsdbUpdateSuperBlock(pHelper, &compBlock, blkIdx) < 0) goto _err;
|
||||
}
|
||||
|
@ -931,7 +931,7 @@ static int tsdbMergeDataWithBlock(SRWHelper *pHelper, int blkIdx, SDataCols *pDa
|
|||
// rows1: number of rows must merge in this block
|
||||
int rows1 = tsdbGetRowsInRange(pDataCols, blockAtIdx(pHelper, blkIdx)->keyFirst, blockAtIdx(pHelper, blkIdx)->keyLast);
|
||||
// rows2: max nuber of rows the block can have more
|
||||
int rows2 = pHelper->config.maxRowsPerFileBlock - blockAtIdx(pHelper, blkIdx)->numOfPoints;
|
||||
int rows2 = pHelper->config.maxRowsPerFileBlock - blockAtIdx(pHelper, blkIdx)->numOfRows;
|
||||
// rows3: number of rows between this block and the next block
|
||||
int rows3 = tsdbGetRowsInRange(pDataCols, blockAtIdx(pHelper, blkIdx)->keyFirst, keyLimit);
|
||||
|
||||
|
@ -939,7 +939,7 @@ static int tsdbMergeDataWithBlock(SRWHelper *pHelper, int blkIdx, SDataCols *pDa
|
|||
|
||||
if ((rows2 >= rows1) &&
|
||||
(( blockAtIdx(pHelper, blkIdx)->last) ||
|
||||
((rows1 + blockAtIdx(pHelper, blkIdx)->numOfPoints < pHelper->config.minRowsPerFileBlock) && (pHelper->files.nLastF.fd < 0)))) {
|
||||
((rows1 + blockAtIdx(pHelper, blkIdx)->numOfRows < pHelper->config.minRowsPerFileBlock) && (pHelper->files.nLastF.fd < 0)))) {
|
||||
rowsWritten = rows1;
|
||||
bool isLast = false;
|
||||
SFile *pFile = NULL;
|
||||
|
@ -965,11 +965,11 @@ static int tsdbMergeDataWithBlock(SRWHelper *pHelper, int blkIdx, SDataCols *pDa
|
|||
int round = 0;
|
||||
// tdResetDataCols(pHelper->pDataCols[1]);
|
||||
while (true) {
|
||||
if (iter1 >= pHelper->pDataCols[0]->numOfPoints && iter2 >= rows3) break;
|
||||
if (iter1 >= pHelper->pDataCols[0]->numOfRows && iter2 >= rows3) break;
|
||||
tdMergeTwoDataCols(pHelper->pDataCols[1], pHelper->pDataCols[0], &iter1, pDataCols, &iter2, pHelper->config.maxRowsPerFileBlock * 4 / 5);
|
||||
ASSERT(pHelper->pDataCols[1]->numOfPoints > 0);
|
||||
ASSERT(pHelper->pDataCols[1]->numOfRows > 0);
|
||||
if (tsdbWriteBlockToFile(pHelper, &(pHelper->files.dataF), pHelper->pDataCols[1],
|
||||
pHelper->pDataCols[1]->numOfPoints, &compBlock, false, true) < 0)
|
||||
pHelper->pDataCols[1]->numOfRows, &compBlock, false, true) < 0)
|
||||
goto _err;
|
||||
if (round == 0) {
|
||||
tsdbUpdateSuperBlock(pHelper, &compBlock, blkIdx);
|
||||
|
@ -980,17 +980,17 @@ static int tsdbMergeDataWithBlock(SRWHelper *pHelper, int blkIdx, SDataCols *pDa
|
|||
blkIdx++;
|
||||
// TODO: the blkIdx here is not correct
|
||||
|
||||
// if (iter1 >= pHelper->pDataCols[0]->numOfPoints && iter2 >= rows3) {
|
||||
// if (pHelper->pDataCols[1]->numOfPoints > 0) {
|
||||
// if (iter1 >= pHelper->pDataCols[0]->numOfRows && iter2 >= rows3) {
|
||||
// if (pHelper->pDataCols[1]->numOfRows > 0) {
|
||||
// if (tsdbWriteBlockToFile(pHelper, &pHelper->files.dataF, pHelper->pDataCols[1],
|
||||
// pHelper->pDataCols[1]->numOfPoints, &compBlock, false, true) < 0)
|
||||
// pHelper->pDataCols[1]->numOfRows, &compBlock, false, true) < 0)
|
||||
// goto _err;
|
||||
// // TODO: the blkIdx here is not correct
|
||||
// tsdbAddSubBlock(pHelper, &compBlock, blkIdx, pHelper->pDataCols[1]->numOfPoints);
|
||||
// tsdbAddSubBlock(pHelper, &compBlock, blkIdx, pHelper->pDataCols[1]->numOfRows);
|
||||
// }
|
||||
// }
|
||||
|
||||
// TSKEY key1 = iter1 >= pHelper->pDataCols[0]->numOfPoints
|
||||
// TSKEY key1 = iter1 >= pHelper->pDataCols[0]->numOfRows
|
||||
// ? INT64_MAX
|
||||
// : ((int64_t *)(pHelper->pDataCols[0]->cols[0].pData))[iter1];
|
||||
// TSKEY key2 = iter2 >= rowsWritten ? INT64_MAX : ((int64_t *)(pDataCols->cols[0].pData))[iter2];
|
||||
|
@ -998,11 +998,11 @@ static int tsdbMergeDataWithBlock(SRWHelper *pHelper, int blkIdx, SDataCols *pDa
|
|||
// if (key1 < key2) {
|
||||
// for (int i = 0; i < pDataCols->numOfCols; i++) {
|
||||
// SDataCol *pDataCol = pHelper->pDataCols[1]->cols + i;
|
||||
// memcpy(((char *)pDataCol->pData + TYPE_BYTES[pDataCol->type] * pHelper->pDataCols[1]->numOfPoints),
|
||||
// memcpy(((char *)pDataCol->pData + TYPE_BYTES[pDataCol->type] * pHelper->pDataCols[1]->numOfRows),
|
||||
// ((char *)pHelper->pDataCols[0]->cols[i].pData + TYPE_BYTES[pDataCol->type] * iter1),
|
||||
// TYPE_BYTES[pDataCol->type]);
|
||||
// }
|
||||
// pHelper->pDataCols[1]->numOfPoints++;
|
||||
// pHelper->pDataCols[1]->numOfRows++;
|
||||
// iter1++;
|
||||
// } else if (key1 == key2) {
|
||||
// // TODO: think about duplicate key cases
|
||||
|
@ -1010,17 +1010,17 @@ static int tsdbMergeDataWithBlock(SRWHelper *pHelper, int blkIdx, SDataCols *pDa
|
|||
// } else {
|
||||
// for (int i = 0; i < pDataCols->numOfCols; i++) {
|
||||
// SDataCol *pDataCol = pHelper->pDataCols[1]->cols + i;
|
||||
// memcpy(((char *)pDataCol->pData + TYPE_BYTES[pDataCol->type] * pHelper->pDataCols[1]->numOfPoints),
|
||||
// memcpy(((char *)pDataCol->pData + TYPE_BYTES[pDataCol->type] * pHelper->pDataCols[1]->numOfRows),
|
||||
// ((char *)pDataCols->cols[i].pData +
|
||||
// TYPE_BYTES[pDataCol->type] * iter2),
|
||||
// TYPE_BYTES[pDataCol->type]);
|
||||
// }
|
||||
// pHelper->pDataCols[1]->numOfPoints++;
|
||||
// pHelper->pDataCols[1]->numOfRows++;
|
||||
// iter2++;
|
||||
// }
|
||||
|
||||
// if (pHelper->pDataCols[0]->numOfPoints >= pHelper->config.maxRowsPerFileBlock * 4 / 5) {
|
||||
// if (tsdbWriteBlockToFile(pHelper, &pHelper->files.dataF, pHelper->pDataCols[1], pHelper->pDataCols[1]->numOfPoints, &compBlock, false, true) < 0) goto _err;
|
||||
// if (pHelper->pDataCols[0]->numOfRows >= pHelper->config.maxRowsPerFileBlock * 4 / 5) {
|
||||
// if (tsdbWriteBlockToFile(pHelper, &pHelper->files.dataF, pHelper->pDataCols[1], pHelper->pDataCols[1]->numOfRows, &compBlock, false, true) < 0) goto _err;
|
||||
// // TODO: blkIdx here is not correct, fix it
|
||||
// tsdbInsertSuperBlock(pHelper, &compBlock, blkIdx);
|
||||
|
||||
|
@ -1133,7 +1133,7 @@ static int tsdbAddSubBlock(SRWHelper *pHelper, SCompBlock *pCompBlock, int blkId
|
|||
pSCompBlock->numOfSubBlocks++;
|
||||
ASSERT(pSCompBlock->numOfSubBlocks <= TSDB_MAX_SUBBLOCKS);
|
||||
pSCompBlock->len += sizeof(SCompBlock);
|
||||
pSCompBlock->numOfPoints += rowsAdded;
|
||||
pSCompBlock->numOfRows += rowsAdded;
|
||||
pSCompBlock->keyFirst = MIN(pSCompBlock->keyFirst, pCompBlock->keyFirst);
|
||||
pSCompBlock->keyLast = MAX(pSCompBlock->keyLast, pCompBlock->keyLast);
|
||||
pIdx->len += sizeof(SCompBlock);
|
||||
|
@ -1164,7 +1164,7 @@ static int tsdbAddSubBlock(SRWHelper *pHelper, SCompBlock *pCompBlock, int blkId
|
|||
((SCompBlock *)ptr)[1] = *pCompBlock;
|
||||
|
||||
pSCompBlock->numOfSubBlocks = 2;
|
||||
pSCompBlock->numOfPoints += rowsAdded;
|
||||
pSCompBlock->numOfRows += rowsAdded;
|
||||
pSCompBlock->offset = ((char *)ptr) - ((char *)pHelper->pCompInfo);
|
||||
pSCompBlock->len = sizeof(SCompBlock) * 2;
|
||||
pSCompBlock->keyFirst = MIN(((SCompBlock *)ptr)[0].keyFirst, ((SCompBlock *)ptr)[1].keyFirst);
|
||||
|
@ -1219,7 +1219,7 @@ static int tsdbUpdateSuperBlock(SRWHelper *pHelper, SCompBlock *pCompBlock, int
|
|||
|
||||
// Get the number of rows in range [minKey, maxKey]
|
||||
static int tsdbGetRowsInRange(SDataCols *pDataCols, TSKEY minKey, TSKEY maxKey) {
|
||||
if (pDataCols->numOfPoints == 0) return 0;
|
||||
if (pDataCols->numOfRows == 0) return 0;
|
||||
|
||||
ASSERT(minKey <= maxKey);
|
||||
TSKEY keyFirst = dataColsKeyFirst(pDataCols);
|
||||
|
@ -1228,11 +1228,11 @@ static int tsdbGetRowsInRange(SDataCols *pDataCols, TSKEY minKey, TSKEY maxKey)
|
|||
|
||||
if (minKey > keyLast || maxKey < keyFirst) return 0;
|
||||
|
||||
void *ptr1 = taosbsearch((void *)&minKey, (void *)pDataCols->cols[0].pData, pDataCols->numOfPoints, sizeof(TSKEY),
|
||||
void *ptr1 = taosbsearch((void *)&minKey, (void *)pDataCols->cols[0].pData, pDataCols->numOfRows, sizeof(TSKEY),
|
||||
compTSKEY, TD_GE);
|
||||
ASSERT(ptr1 != NULL);
|
||||
|
||||
void *ptr2 = taosbsearch((void *)&maxKey, (void *)pDataCols->cols[0].pData, pDataCols->numOfPoints, sizeof(TSKEY),
|
||||
void *ptr2 = taosbsearch((void *)&maxKey, (void *)pDataCols->cols[0].pData, pDataCols->numOfRows, sizeof(TSKEY),
|
||||
compTSKEY, TD_LE);
|
||||
ASSERT(ptr2 != NULL);
|
||||
|
||||
|
|
|
@ -40,10 +40,6 @@ enum {
|
|||
TSDB_QUERY_TYPE_EXTERNAL = 3,
|
||||
};
|
||||
|
||||
typedef struct SField {
|
||||
// todo need the definition
|
||||
} SField;
|
||||
|
||||
typedef struct SQueryFilePos {
|
||||
int32_t fid;
|
||||
int32_t slot;
|
||||
|
@ -71,28 +67,20 @@ typedef struct STableCheckInfo {
|
|||
STableId tableId;
|
||||
TSKEY lastKey;
|
||||
STable* pTableObj;
|
||||
int32_t start;
|
||||
SCompInfo* pCompInfo;
|
||||
int32_t compSize;
|
||||
int32_t numOfBlocks; // number of qualified data blocks not the original blocks
|
||||
SDataCols* pDataCols;
|
||||
|
||||
SSkipListIterator* iter; // skip list iterator
|
||||
SSkipListIterator* iiter; // imem iterator
|
||||
|
||||
bool initBuf; // if we should initialize the in-memory skip list iterator
|
||||
bool initBuf; // whether to initialize the in-memory skip list iterator or not
|
||||
SSkipListIterator* iter; // mem buffer skip list iterator
|
||||
SSkipListIterator* iiter; // imem buffer skip list iterator
|
||||
} STableCheckInfo;
|
||||
|
||||
typedef struct {
|
||||
SCompBlock* compBlock;
|
||||
SField* fields;
|
||||
} SCompBlockFields;
|
||||
|
||||
typedef struct STableBlockInfo {
|
||||
SCompBlockFields pBlock;
|
||||
SCompBlock* compBlock;
|
||||
STableCheckInfo* pTableCheckInfo;
|
||||
int32_t blockIndex;
|
||||
int32_t groupIdx; /* number of group is less than the total number of tables */
|
||||
// int32_t blockIndex;
|
||||
// int32_t groupIdx; /* number of group is less than the total number of tables */
|
||||
} STableBlockInfo;
|
||||
|
||||
typedef struct SBlockOrderSupporter {
|
||||
|
@ -105,15 +93,11 @@ typedef struct SBlockOrderSupporter {
|
|||
typedef struct STsdbQueryHandle {
|
||||
STsdbRepo* pTsdb;
|
||||
SQueryFilePos cur; // current position
|
||||
|
||||
SDataBlockLoadInfo dataBlockLoadInfo; /* record current block load information */
|
||||
SLoadCompBlockInfo compBlockLoadInfo; /* record current compblock information in SQuery */
|
||||
|
||||
int16_t order;
|
||||
STimeWindow window; // the primary query time window that applies to all queries
|
||||
SCompBlock* pBlock;
|
||||
SDataStatis* statis; // query level statistics, only one table block statistics info exists at any time
|
||||
int32_t numOfBlocks;
|
||||
SField** pFields;
|
||||
SArray* pColumns; // column list, SColumnInfoData array list
|
||||
bool locateStart;
|
||||
int32_t outputCapacity;
|
||||
|
@ -123,11 +107,13 @@ typedef struct STsdbQueryHandle {
|
|||
bool checkFiles; // check file stage
|
||||
void* qinfo; // query info handle, for debug purpose
|
||||
int32_t type; // query type: retrieve all data blocks, 2. retrieve only last row, 3. retrieve direct prev|next rows
|
||||
STableBlockInfo* pDataBlockInfo;
|
||||
|
||||
SFileGroup* pFileGroup;
|
||||
SFileGroupIter fileIter;
|
||||
SRWHelper rhelper;
|
||||
STableBlockInfo* pDataBlockInfo;
|
||||
|
||||
SDataBlockLoadInfo dataBlockLoadInfo; /* record current block load information */
|
||||
SLoadCompBlockInfo compBlockLoadInfo; /* record current compblock information in SQuery */
|
||||
} STsdbQueryHandle;
|
||||
|
||||
static void changeQueryHandleForLastrowQuery(TsdbQueryHandleT pqHandle);
|
||||
|
@ -152,15 +138,35 @@ TsdbQueryHandleT* tsdbQueryTables(TsdbRepoT* tsdb, STsdbQueryCond* pCond, STable
|
|||
pQueryHandle->window = pCond->twindow;
|
||||
pQueryHandle->pTsdb = tsdb;
|
||||
pQueryHandle->type = TSDB_QUERY_TYPE_ALL;
|
||||
tsdbInitReadHelper(&pQueryHandle->rhelper, (STsdbRepo*) tsdb);
|
||||
|
||||
pQueryHandle->cur.fid = -1;
|
||||
pQueryHandle->cur.win = TSWINDOW_INITIALIZER;
|
||||
pQueryHandle->checkFiles = true;//ASCENDING_TRAVERSE(pQueryHandle->order);
|
||||
pQueryHandle->activeIndex = 0; // current active table index
|
||||
pQueryHandle->outputCapacity = ((STsdbRepo*)tsdb)->config.maxRowsPerFileBlock;
|
||||
|
||||
tsdbInitReadHelper(&pQueryHandle->rhelper, (STsdbRepo*) tsdb);
|
||||
|
||||
size_t sizeOfGroup = taosArrayGetSize(groupList->pGroupList);
|
||||
assert(sizeOfGroup >= 1 && pCond != NULL && pCond->numOfCols > 0);
|
||||
|
||||
// allocate buffer in order to load data blocks from file
|
||||
int32_t numOfCols = pCond->numOfCols;
|
||||
|
||||
pQueryHandle->statis = calloc(numOfCols, sizeof(SDataStatis));
|
||||
pQueryHandle->pColumns = taosArrayInit(numOfCols, sizeof(SColumnInfoData)); // todo: use list instead of array?
|
||||
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
SColumnInfoData colInfo = {{0}, 0};
|
||||
|
||||
colInfo.info = pCond->colList[i];
|
||||
colInfo.pData = calloc(1, EXTRA_BYTES + pQueryHandle->outputCapacity * pCond->colList[i].bytes);
|
||||
taosArrayPush(pQueryHandle->pColumns, &colInfo);
|
||||
pQueryHandle->statis[i].colId = colInfo.info.colId;
|
||||
}
|
||||
|
||||
pQueryHandle->pTableCheckInfo = taosArrayInit(groupList->numOfTables, sizeof(STableCheckInfo));
|
||||
STsdbMeta* pMeta = tsdbGetMeta(tsdb);
|
||||
assert(pMeta != NULL);
|
||||
|
||||
for (int32_t i = 0; i < sizeOfGroup; ++i) {
|
||||
SArray* group = *(SArray**) taosArrayGet(groupList->pGroupList, i);
|
||||
|
@ -174,7 +180,7 @@ TsdbQueryHandleT* tsdbQueryTables(TsdbRepoT* tsdb, STsdbQueryCond* pCond, STable
|
|||
STableCheckInfo info = {
|
||||
.lastKey = pQueryHandle->window.skey,
|
||||
.tableId = *id,
|
||||
.pTableObj = tsdbGetTableByUid(tsdbGetMeta(tsdb), id->uid),
|
||||
.pTableObj = tsdbGetTableByUid(pMeta, id->uid),
|
||||
};
|
||||
|
||||
assert(info.pTableObj != NULL && info.pTableObj->tableId.tid == id->tid);
|
||||
|
@ -182,28 +188,11 @@ TsdbQueryHandleT* tsdbQueryTables(TsdbRepoT* tsdb, STsdbQueryCond* pCond, STable
|
|||
}
|
||||
}
|
||||
|
||||
uTrace("%p total numOfTable:%d in query", pQueryHandle, taosArrayGetSize(pQueryHandle->pTableCheckInfo));
|
||||
|
||||
/*
|
||||
* For ascending timestamp order query, query starts from data files. In contrast, buffer will be checked in the first place
|
||||
* in case of descending timestamp order query.
|
||||
*/
|
||||
pQueryHandle->checkFiles = true;//ASCENDING_TRAVERSE(pQueryHandle->order);
|
||||
pQueryHandle->activeIndex = 0;
|
||||
|
||||
// allocate buffer in order to load data blocks from file
|
||||
int32_t numOfCols = pCond->numOfCols;
|
||||
pQueryHandle->outputCapacity = 4096;
|
||||
|
||||
pQueryHandle->pColumns = taosArrayInit(numOfCols, sizeof(SColumnInfoData));
|
||||
for (int32_t i = 0; i < pCond->numOfCols; ++i) {
|
||||
SColumnInfoData colInfo = {{0}, 0};
|
||||
|
||||
colInfo.info = pCond->colList[i];
|
||||
colInfo.pData = calloc(1, EXTRA_BYTES + pQueryHandle->outputCapacity * pCond->colList[i].bytes);
|
||||
taosArrayPush(pQueryHandle->pColumns, &colInfo);
|
||||
for(int32_t i = 0; i < numOfCols; ++i) {
|
||||
}
|
||||
|
||||
uTrace("%p total numOfTable:%d in query", pQueryHandle, taosArrayGetSize(pQueryHandle->pTableCheckInfo));
|
||||
|
||||
tsdbInitDataBlockLoadInfo(&pQueryHandle->dataBlockLoadInfo);
|
||||
tsdbInitCompBlockLoadInfo(&pQueryHandle->compBlockLoadInfo);
|
||||
|
||||
|
@ -447,7 +436,7 @@ static SDataBlockInfo getTrueDataBlockInfo(STableCheckInfo* pCheckInfo, SCompBlo
|
|||
SDataBlockInfo info = {
|
||||
.window = {.skey = pBlock->keyFirst, .ekey = pBlock->keyLast},
|
||||
.numOfCols = pBlock->numOfCols,
|
||||
.rows = pBlock->numOfPoints,
|
||||
.rows = pBlock->numOfRows,
|
||||
.tid = pCheckInfo->tableId.tid,
|
||||
.uid = pCheckInfo->tableId.uid,
|
||||
};
|
||||
|
@ -499,8 +488,9 @@ static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlo
|
|||
bool blockLoaded = false;
|
||||
SArray* sa = getDefaultLoadColumns(pQueryHandle, true);
|
||||
|
||||
if (pCheckInfo->pDataCols == NULL) { // todo: why not the real data?
|
||||
pCheckInfo->pDataCols = tdNewDataCols(pRepo->tsdbMeta->maxRowBytes, pRepo->tsdbMeta->maxCols, pRepo->config.maxRowsPerFileBlock);
|
||||
if (pCheckInfo->pDataCols == NULL) {
|
||||
STsdbMeta* pMeta = tsdbGetMeta(pRepo);
|
||||
pCheckInfo->pDataCols = tdNewDataCols(pMeta->maxRowBytes, pMeta->maxCols, pRepo->config.maxRowsPerFileBlock);
|
||||
}
|
||||
|
||||
tdInitDataCols(pCheckInfo->pDataCols, tsdbGetTableSchema(tsdbGetMeta(pQueryHandle->pTsdb), pCheckInfo->pTableObj));
|
||||
|
@ -522,8 +512,6 @@ static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlo
|
|||
}
|
||||
|
||||
static void handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock, STableCheckInfo* pCheckInfo){
|
||||
SArray* sa = getDefaultLoadColumns(pQueryHandle, true);
|
||||
|
||||
SQueryFilePos* cur = &pQueryHandle->cur;
|
||||
SDataBlockInfo binfo = getTrueDataBlockInfo(pCheckInfo, pBlock);
|
||||
/*bool hasData = */ initTableMemIterator(pQueryHandle, pCheckInfo);
|
||||
|
@ -592,8 +580,11 @@ static void handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SCompBlock*
|
|||
return;
|
||||
}
|
||||
|
||||
SArray* sa = getDefaultLoadColumns(pQueryHandle, true);
|
||||
doLoadFileDataBlock(pQueryHandle, pBlock, pCheckInfo);
|
||||
doMergeTwoLevelData(pQueryHandle, pCheckInfo, pBlock, sa);
|
||||
taosArrayDestroy(sa);
|
||||
|
||||
} else {
|
||||
pQueryHandle->realNumOfRows = binfo.rows;
|
||||
|
||||
|
@ -617,11 +608,11 @@ static bool loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock
|
|||
}
|
||||
|
||||
SDataCols* pTSCol = pQueryHandle->rhelper.pDataCols[0];
|
||||
assert(pTSCol->cols->type == TSDB_DATA_TYPE_TIMESTAMP && pTSCol->numOfPoints == pBlock->numOfPoints);
|
||||
assert(pTSCol->cols->type == TSDB_DATA_TYPE_TIMESTAMP && pTSCol->numOfRows == pBlock->numOfRows);
|
||||
|
||||
if (pCheckInfo->lastKey > pBlock->keyFirst) {
|
||||
cur->pos =
|
||||
binarySearchForKey(pTSCol->cols[0].pData, pBlock->numOfPoints, pCheckInfo->lastKey, pQueryHandle->order);
|
||||
binarySearchForKey(pTSCol->cols[0].pData, pBlock->numOfRows, pCheckInfo->lastKey, pQueryHandle->order);
|
||||
} else {
|
||||
cur->pos = 0;
|
||||
}
|
||||
|
@ -639,9 +630,9 @@ static bool loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock
|
|||
SDataCols* pDataCols = pCheckInfo->pDataCols;
|
||||
if (pCheckInfo->lastKey < pBlock->keyLast) {
|
||||
cur->pos =
|
||||
binarySearchForKey(pDataCols->cols[0].pData, pBlock->numOfPoints, pCheckInfo->lastKey, pQueryHandle->order);
|
||||
binarySearchForKey(pDataCols->cols[0].pData, pBlock->numOfRows, pCheckInfo->lastKey, pQueryHandle->order);
|
||||
} else {
|
||||
cur->pos = pBlock->numOfPoints - 1;
|
||||
cur->pos = pBlock->numOfRows - 1;
|
||||
}
|
||||
|
||||
doMergeTwoLevelData(pQueryHandle, pCheckInfo, pBlock, sa);
|
||||
|
@ -656,7 +647,7 @@ static bool loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock
|
|||
|
||||
static int vnodeBinarySearchKey(char* pValue, int num, TSKEY key, int order) {
|
||||
int firstPos, lastPos, midPos = -1;
|
||||
int numOfPoints;
|
||||
int numOfRows;
|
||||
TSKEY* keyList;
|
||||
|
||||
assert(order == TSDB_ORDER_ASC || order == TSDB_ORDER_DESC);
|
||||
|
@ -674,8 +665,8 @@ static int vnodeBinarySearchKey(char* pValue, int num, TSKEY key, int order) {
|
|||
if (key == keyList[firstPos]) return firstPos;
|
||||
if (key < keyList[firstPos]) return firstPos - 1;
|
||||
|
||||
numOfPoints = lastPos - firstPos + 1;
|
||||
midPos = (numOfPoints >> 1) + firstPos;
|
||||
numOfRows = lastPos - firstPos + 1;
|
||||
midPos = (numOfRows >> 1) + firstPos;
|
||||
|
||||
if (key < keyList[midPos]) {
|
||||
lastPos = midPos - 1;
|
||||
|
@ -700,8 +691,8 @@ static int vnodeBinarySearchKey(char* pValue, int num, TSKEY key, int order) {
|
|||
return lastPos;
|
||||
}
|
||||
|
||||
numOfPoints = lastPos - firstPos + 1;
|
||||
midPos = (numOfPoints >> 1) + firstPos;
|
||||
numOfRows = lastPos - firstPos + 1;
|
||||
midPos = (numOfRows >> 1) + firstPos;
|
||||
|
||||
if (key < keyList[midPos]) {
|
||||
lastPos = midPos - 1;
|
||||
|
@ -819,7 +810,7 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
|
|||
cur->mixBlock = (cur->pos != blockInfo.rows - 1);
|
||||
} else {
|
||||
int32_t order = (pQueryHandle->order == TSDB_ORDER_ASC)? TSDB_ORDER_DESC:TSDB_ORDER_ASC;
|
||||
endPos = vnodeBinarySearchKey(pCols->cols[0].pData, pCols->numOfPoints, pQueryHandle->window.ekey, order);
|
||||
endPos = vnodeBinarySearchKey(pCols->cols[0].pData, pCols->numOfRows, pQueryHandle->window.ekey, order);
|
||||
cur->mixBlock = true;
|
||||
}
|
||||
|
||||
|
@ -913,7 +904,7 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
|
|||
}
|
||||
|
||||
int32_t order = ASCENDING_TRAVERSE(pQueryHandle->order) ? TSDB_ORDER_DESC : TSDB_ORDER_ASC;
|
||||
int32_t end = vnodeBinarySearchKey(pCols->cols[0].pData, pCols->numOfPoints, key, order);
|
||||
int32_t end = vnodeBinarySearchKey(pCols->cols[0].pData, pCols->numOfRows, key, order);
|
||||
if (tsArray[end] == key) { // the value of key in cache equals to the end timestamp value, ignore it
|
||||
tSkipListIterNext(pCheckInfo->iter);
|
||||
}
|
||||
|
@ -1011,7 +1002,7 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
|
|||
|
||||
int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order) {
|
||||
int firstPos, lastPos, midPos = -1;
|
||||
int numOfPoints;
|
||||
int numOfRows;
|
||||
TSKEY* keyList;
|
||||
|
||||
if (num <= 0) return -1;
|
||||
|
@ -1027,8 +1018,8 @@ int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order) {
|
|||
if (key == keyList[firstPos]) return firstPos;
|
||||
if (key < keyList[firstPos]) return firstPos - 1;
|
||||
|
||||
numOfPoints = lastPos - firstPos + 1;
|
||||
midPos = (numOfPoints >> 1) + firstPos;
|
||||
numOfRows = lastPos - firstPos + 1;
|
||||
midPos = (numOfRows >> 1) + firstPos;
|
||||
|
||||
if (key < keyList[midPos]) {
|
||||
lastPos = midPos - 1;
|
||||
|
@ -1053,8 +1044,8 @@ int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order) {
|
|||
return lastPos;
|
||||
}
|
||||
|
||||
numOfPoints = lastPos - firstPos + 1;
|
||||
midPos = (numOfPoints >> 1) + firstPos;
|
||||
numOfRows = lastPos - firstPos + 1;
|
||||
midPos = (numOfRows >> 1) + firstPos;
|
||||
|
||||
if (key < keyList[midPos]) {
|
||||
lastPos = midPos - 1;
|
||||
|
@ -1074,7 +1065,8 @@ static void cleanBlockOrderSupporter(SBlockOrderSupporter* pSupporter, int32_t n
|
|||
tfree(pSupporter->blockIndexArray);
|
||||
|
||||
for (int32_t i = 0; i < numOfTables; ++i) {
|
||||
tfree(pSupporter->pDataBlockInfo[i]);
|
||||
STableBlockInfo* pBlockInfo = pSupporter->pDataBlockInfo[i];
|
||||
tfree(pBlockInfo);
|
||||
}
|
||||
|
||||
tfree(pSupporter->pDataBlockInfo);
|
||||
|
@ -1100,14 +1092,14 @@ static int32_t dataBlockOrderCompar(const void* pLeft, const void* pRight, void*
|
|||
STableBlockInfo* pLeftBlockInfoEx = &pSupporter->pDataBlockInfo[leftTableIndex][leftTableBlockIndex];
|
||||
STableBlockInfo* pRightBlockInfoEx = &pSupporter->pDataBlockInfo[rightTableIndex][rightTableBlockIndex];
|
||||
|
||||
// assert(pLeftBlockInfoEx->pBlock.compBlock->offset != pRightBlockInfoEx->pBlock.compBlock->offset);
|
||||
if (pLeftBlockInfoEx->pBlock.compBlock->offset == pRightBlockInfoEx->pBlock.compBlock->offset &&
|
||||
pLeftBlockInfoEx->pBlock.compBlock->last == pRightBlockInfoEx->pBlock.compBlock->last) {
|
||||
// assert(pLeftBlockInfoEx->compBlock->offset != pRightBlockInfoEx->compBlock->offset);
|
||||
if (pLeftBlockInfoEx->compBlock->offset == pRightBlockInfoEx->compBlock->offset &&
|
||||
pLeftBlockInfoEx->compBlock->last == pRightBlockInfoEx->compBlock->last) {
|
||||
// todo add more information
|
||||
uError("error in header file, two block with same offset:%p", pLeftBlockInfoEx->pBlock.compBlock->offset);
|
||||
uError("error in header file, two block with same offset:%p", pLeftBlockInfoEx->compBlock->offset);
|
||||
}
|
||||
|
||||
return pLeftBlockInfoEx->pBlock.compBlock->offset > pRightBlockInfoEx->pBlock.compBlock->offset ? 1 : -1;
|
||||
return pLeftBlockInfoEx->compBlock->offset > pRightBlockInfoEx->compBlock->offset ? 1 : -1;
|
||||
}
|
||||
|
||||
static int32_t createDataBlocksInfo(STsdbQueryHandle* pQueryHandle, int32_t numOfBlocks, int32_t* numOfAllocBlocks) {
|
||||
|
@ -1135,6 +1127,7 @@ static int32_t createDataBlocksInfo(STsdbQueryHandle* pQueryHandle, int32_t numO
|
|||
|
||||
int32_t cnt = 0;
|
||||
int32_t numOfQualTables = 0;
|
||||
|
||||
for (int32_t j = 0; j < numOfTables; ++j) {
|
||||
STableCheckInfo* pTableCheck = (STableCheckInfo*)taosArrayGet(pQueryHandle->pTableCheckInfo, j);
|
||||
if (pTableCheck->numOfBlocks <= 0) {
|
||||
|
@ -1153,14 +1146,12 @@ static int32_t createDataBlocksInfo(STsdbQueryHandle* pQueryHandle, int32_t numO
|
|||
sup.pDataBlockInfo[numOfQualTables] = (STableBlockInfo*)buf;
|
||||
|
||||
for (int32_t k = 0; k < pTableCheck->numOfBlocks; ++k) {
|
||||
STableBlockInfo* pBlockInfoEx = &sup.pDataBlockInfo[numOfQualTables][k];
|
||||
STableBlockInfo* pBlockInfo = &sup.pDataBlockInfo[numOfQualTables][k];
|
||||
|
||||
pBlockInfoEx->pBlock.compBlock = &pBlock[k];
|
||||
pBlockInfoEx->pBlock.fields = NULL;
|
||||
|
||||
pBlockInfoEx->pTableCheckInfo = pTableCheck;
|
||||
// pBlockInfoEx->groupIdx = pTableCheckInfo[j]->groupIdx; // set the group index
|
||||
// pBlockInfoEx->blockIndex = pTableCheckInfo[j]->start + k; // set the block index in original table
|
||||
pBlockInfo->compBlock = &pBlock[k];
|
||||
pBlockInfo->pTableCheckInfo = pTableCheck;
|
||||
// pBlockInfo->groupIdx = pTableCheckInfo[j]->groupIdx; // set the group index
|
||||
// pBlockInfo->blockIndex = pTableCheckInfo[j]->start + k; // set the block index in original table
|
||||
cnt++;
|
||||
}
|
||||
|
||||
|
@ -1185,8 +1176,8 @@ static int32_t createDataBlocksInfo(STsdbQueryHandle* pQueryHandle, int32_t numO
|
|||
int32_t pos = pTree->pNode[0].index;
|
||||
int32_t index = sup.blockIndexArray[pos]++;
|
||||
|
||||
STableBlockInfo* pBlocksInfoEx = sup.pDataBlockInfo[pos];
|
||||
pQueryHandle->pDataBlockInfo[numOfTotal++] = pBlocksInfoEx[index];
|
||||
STableBlockInfo* pBlocksInfo = sup.pDataBlockInfo[pos];
|
||||
pQueryHandle->pDataBlockInfo[numOfTotal++] = pBlocksInfo[index];
|
||||
|
||||
// set data block index overflow, in order to disable the offset comparator
|
||||
if (sup.blockIndexArray[pos] >= sup.numOfBlocksPerTable[pos]) {
|
||||
|
@ -1199,7 +1190,7 @@ static int32_t createDataBlocksInfo(STsdbQueryHandle* pQueryHandle, int32_t numO
|
|||
/*
|
||||
* available when no import exists
|
||||
* for(int32_t i = 0; i < cnt - 1; ++i) {
|
||||
* assert((*pDataBlockInfo)[i].pBlock.compBlock->offset < (*pDataBlockInfo)[i+1].pBlock.compBlock->offset);
|
||||
* assert((*pDataBlockInfo)[i].compBlock->offset < (*pDataBlockInfo)[i+1].compBlock->offset);
|
||||
* }
|
||||
*/
|
||||
|
||||
|
@ -1255,7 +1246,7 @@ static bool getDataBlocksInFilesImpl(STsdbQueryHandle* pQueryHandle) {
|
|||
cur->fid = pQueryHandle->pFileGroup->fileId;
|
||||
|
||||
STableBlockInfo* pBlockInfo = &pQueryHandle->pDataBlockInfo[cur->slot];
|
||||
return loadFileDataBlock(pQueryHandle, pBlockInfo->pBlock.compBlock, pBlockInfo->pTableCheckInfo);
|
||||
return loadFileDataBlock(pQueryHandle, pBlockInfo->compBlock, pBlockInfo->pTableCheckInfo);
|
||||
}
|
||||
|
||||
static bool getDataBlocksInFiles(STsdbQueryHandle* pQueryHandle) {
|
||||
|
@ -1291,10 +1282,10 @@ static bool getDataBlocksInFiles(STsdbQueryHandle* pQueryHandle) {
|
|||
cur->blockCompleted = false;
|
||||
|
||||
STableBlockInfo* pNext = &pQueryHandle->pDataBlockInfo[cur->slot];
|
||||
return loadFileDataBlock(pQueryHandle, pNext->pBlock.compBlock, pNext->pTableCheckInfo);
|
||||
return loadFileDataBlock(pQueryHandle, pNext->compBlock, pNext->pTableCheckInfo);
|
||||
}
|
||||
} else {
|
||||
handleDataMergeIfNeeded(pQueryHandle, pBlockInfo->pBlock.compBlock, pCheckInfo);
|
||||
handleDataMergeIfNeeded(pQueryHandle, pBlockInfo->compBlock, pCheckInfo);
|
||||
return pQueryHandle->realNumOfRows > 0;
|
||||
}
|
||||
}
|
||||
|
@ -1484,35 +1475,33 @@ SDataBlockInfo tsdbRetrieveDataBlockInfo(TsdbQueryHandleT* pQueryHandle) {
|
|||
// there are data in file
|
||||
if (pHandle->cur.fid >= 0) {
|
||||
STableBlockInfo* pBlockInfo = &pHandle->pDataBlockInfo[pHandle->cur.slot];
|
||||
STableCheckInfo* pCheckInfo = pBlockInfo->pTableCheckInfo;
|
||||
STable* pTable = pBlockInfo->pTableCheckInfo->pTableObj;
|
||||
|
||||
STable* pTable = pCheckInfo->pTableObj;
|
||||
|
||||
if (pHandle->cur.mixBlock) {
|
||||
SDataBlockInfo blockInfo = {
|
||||
.uid = pTable->tableId.uid,
|
||||
.tid = pTable->tableId.tid,
|
||||
.rows = pHandle->cur.rows,
|
||||
.window = pHandle->cur.win,
|
||||
.numOfCols = QH_GET_NUM_OF_COLS(pHandle),
|
||||
};
|
||||
|
||||
return blockInfo;
|
||||
} else {
|
||||
return getTrueDataBlockInfo(pCheckInfo, pBlockInfo->pBlock.compBlock);
|
||||
}
|
||||
} else {
|
||||
STableCheckInfo* pCheckInfo = taosArrayGet(pHandle->pTableCheckInfo, pHandle->activeIndex);
|
||||
SQueryFilePos* cur = &pHandle->cur;
|
||||
|
||||
STable* pTable = pCheckInfo->pTableObj;
|
||||
if (pTable->mem != NULL) { // create mem table iterator if it is not created yet
|
||||
assert(pCheckInfo->iter != NULL);
|
||||
STimeWindow* win = &pHandle->cur.win;
|
||||
STimeWindow* win = &cur->win;
|
||||
|
||||
pHandle->cur.rows = tsdbReadRowsFromCache(pCheckInfo->iter, pCheckInfo->pTableObj, pHandle->window.ekey,
|
||||
pHandle->outputCapacity, &win->skey, &win->ekey, pHandle); // todo refactor API
|
||||
|
||||
// update the last key value
|
||||
pCheckInfo->lastKey = win->ekey + step;
|
||||
cur->lastKey = win->ekey + step;
|
||||
cur->mixBlock = true;
|
||||
}
|
||||
|
||||
if (!ASCENDING_TRAVERSE(pHandle->order)) {
|
||||
|
@ -1524,18 +1513,54 @@ SDataBlockInfo tsdbRetrieveDataBlockInfo(TsdbQueryHandleT* pQueryHandle) {
|
|||
.tid = pTable->tableId.tid,
|
||||
.rows = pHandle->cur.rows,
|
||||
.window = pHandle->cur.win,
|
||||
.numOfCols = QH_GET_NUM_OF_COLS(pHandle),
|
||||
};
|
||||
|
||||
return blockInfo;
|
||||
}
|
||||
}
|
||||
|
||||
// return null for data block in cache
|
||||
/*
|
||||
* return null for mixed data block, if not a complete file data block, the statistics value will always return NULL
|
||||
*/
|
||||
int32_t tsdbRetrieveDataBlockStatisInfo(TsdbQueryHandleT* pQueryHandle, SDataStatis** pBlockStatis) {
|
||||
STsdbQueryHandle* pHandle = (STsdbQueryHandle*) pQueryHandle;
|
||||
|
||||
SQueryFilePos* cur = &pHandle->cur;
|
||||
if (cur->mixBlock) {
|
||||
*pBlockStatis = NULL;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
assert((cur->slot >= 0 && cur->slot < pHandle->numOfBlocks) ||
|
||||
((cur->slot == pHandle->numOfBlocks) && (cur->slot == 0)));
|
||||
|
||||
STableBlockInfo* pBlockInfo = &pHandle->pDataBlockInfo[cur->slot];
|
||||
tsdbLoadCompData(&pHandle->rhelper, pBlockInfo->compBlock, NULL);
|
||||
|
||||
size_t numOfCols = QH_GET_NUM_OF_COLS(pHandle);
|
||||
for(int32_t i = 0; i < numOfCols; ++i) {
|
||||
SDataStatis* st = &pHandle->statis[i];
|
||||
int32_t colId = st->colId;
|
||||
|
||||
memset(st, 0, sizeof(SDataStatis));
|
||||
st->colId = colId;
|
||||
}
|
||||
|
||||
tsdbGetDataStatis(&pHandle->rhelper, pHandle->statis, numOfCols);
|
||||
|
||||
*pBlockStatis = pHandle->statis;
|
||||
|
||||
//update the number of NULL data rows
|
||||
for(int32_t i = 0; i < numOfCols; ++i) {
|
||||
if (pHandle->statis[i].numOfNull == -1) { // set the column data are all NULL
|
||||
pHandle->statis[i].numOfNull = pBlockInfo->compBlock->numOfRows;
|
||||
}
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
SArray* tsdbRetrieveDataBlock(TsdbQueryHandleT* pQueryHandle, SArray* pIdList) {
|
||||
/**
|
||||
* In the following two cases, the data has been loaded to SColumnInfoData.
|
||||
|
@ -1546,13 +1571,13 @@ SArray* tsdbRetrieveDataBlock(TsdbQueryHandleT* pQueryHandle, SArray* pIdList) {
|
|||
if (pHandle->cur.fid < 0) {
|
||||
return pHandle->pColumns;
|
||||
} else {
|
||||
STableBlockInfo* pBlockInfoEx = &pHandle->pDataBlockInfo[pHandle->cur.slot];
|
||||
STableCheckInfo* pCheckInfo = pBlockInfoEx->pTableCheckInfo;
|
||||
STableBlockInfo* pBlockInfo = &pHandle->pDataBlockInfo[pHandle->cur.slot];
|
||||
STableCheckInfo* pCheckInfo = pBlockInfo->pTableCheckInfo;
|
||||
|
||||
if (pHandle->cur.mixBlock) {
|
||||
return pHandle->pColumns;
|
||||
} else {
|
||||
SDataBlockInfo binfo = getTrueDataBlockInfo(pCheckInfo, pBlockInfoEx->pBlock.compBlock);
|
||||
SDataBlockInfo binfo = getTrueDataBlockInfo(pCheckInfo, pBlockInfo->compBlock);
|
||||
assert(pHandle->realNumOfRows <= binfo.rows);
|
||||
|
||||
// data block has been loaded, todo extract method
|
||||
|
@ -1562,11 +1587,11 @@ SArray* tsdbRetrieveDataBlock(TsdbQueryHandleT* pQueryHandle, SArray* pIdList) {
|
|||
pBlockLoadInfo->tid == pCheckInfo->pTableObj->tableId.tid) {
|
||||
return pHandle->pColumns;
|
||||
} else { // only load the file block
|
||||
SCompBlock* pBlock = pBlockInfoEx->pBlock.compBlock;
|
||||
SCompBlock* pBlock = pBlockInfo->compBlock;
|
||||
doLoadFileDataBlock(pHandle, pBlock, pCheckInfo);
|
||||
|
||||
// todo refactor
|
||||
int32_t numOfRows = copyDataFromFileBlock(pHandle, pHandle->outputCapacity, 0, 0, pBlock->numOfPoints - 1);
|
||||
int32_t numOfRows = copyDataFromFileBlock(pHandle, pHandle->outputCapacity, 0, 0, pBlock->numOfRows - 1);
|
||||
|
||||
// if the buffer is not full in case of descending order query, move the data in the front of the buffer
|
||||
if (!ASCENDING_TRAVERSE(pHandle->order) && numOfRows < pHandle->outputCapacity) {
|
||||
|
@ -1669,7 +1694,7 @@ void filterPrepare(void* expr, void* param) {
|
|||
tVariant* pCond = pExpr->_node.pRight->pVal;
|
||||
SSchema* pSchema = pExpr->_node.pLeft->pSchema;
|
||||
|
||||
// todo : if current super table does not change schema yet, this function may failed, add test case
|
||||
// todo : if current super table does not change schema yet, this function may fail to get correct schema, test case
|
||||
int32_t index = getTagColumnIndex(pTSSchema, pSchema);
|
||||
assert((index >= 0 && i < TSDB_MAX_TAGS) || (index == TSDB_TBNAME_COLUMN_INDEX));
|
||||
|
||||
|
@ -2006,8 +2031,9 @@ void tsdbCleanupQueryHandle(TsdbQueryHandleT queryHandle) {
|
|||
}
|
||||
|
||||
taosArrayDestroy(pQueryHandle->pColumns);
|
||||
|
||||
tfree(pQueryHandle->pDataBlockInfo);
|
||||
tfree(pQueryHandle->statis);
|
||||
|
||||
tsdbDestroyHelper(&pQueryHandle->rhelper);
|
||||
|
||||
tfree(pQueryHandle);
|
||||
|
|
|
@ -129,7 +129,7 @@ void taosArraySort(SArray* pArray, int (*compar)(const void*, const void*));
|
|||
* sort string array
|
||||
* @param pArray
|
||||
*/
|
||||
void taosArraySortString(SArray* pArray);
|
||||
void taosArraySortString(SArray* pArray, __compar_fn_t comparFn);
|
||||
|
||||
/**
|
||||
* search the array
|
||||
|
@ -137,14 +137,14 @@ void taosArraySortString(SArray* pArray);
|
|||
* @param compar
|
||||
* @param key
|
||||
*/
|
||||
void* taosArraySearch(const SArray* pArray, int (*compar)(const void*, const void*), const void* key);
|
||||
void* taosArraySearch(const SArray* pArray, const void* key, __compar_fn_t comparFn);
|
||||
|
||||
/**
|
||||
* search the array
|
||||
* @param pArray
|
||||
* @param key
|
||||
*/
|
||||
char* taosArraySearchString(const SArray* pArray, const char* key);
|
||||
char* taosArraySearchString(const SArray* pArray, const char* key, __compar_fn_t comparFn);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -34,16 +34,18 @@ typedef struct SPatternCompareInfo {
|
|||
char matchOne; // symbol for match one wildcard, default: '_'
|
||||
} SPatternCompareInfo;
|
||||
|
||||
int patternMatch(const char *zPattern, const char *zString, size_t size, const SPatternCompareInfo *pInfo);
|
||||
int patternMatch(const char *pattern, const char *str, size_t size, const SPatternCompareInfo *pInfo);
|
||||
|
||||
int WCSPatternMatch(const wchar_t *zPattern, const wchar_t *zString, size_t size, const SPatternCompareInfo *pInfo);
|
||||
int WCSPatternMatch(const wchar_t *pattern, const wchar_t *str, size_t size, const SPatternCompareInfo *pInfo);
|
||||
|
||||
int32_t doCompare(const char* f1, const char* f2, int32_t type, size_t size);
|
||||
int32_t doCompare(const char* a, const char* b, int32_t type, size_t size);
|
||||
|
||||
__compar_fn_t getKeyComparFunc(int32_t keyType);
|
||||
|
||||
__compar_fn_t getComparFunc(int32_t type, int32_t optr);
|
||||
|
||||
int32_t taosArrayCompareString(const void* a, const void* b);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -197,30 +197,23 @@ void taosArraySort(SArray* pArray, int (*compar)(const void*, const void*)) {
|
|||
qsort(pArray->pData, pArray->size, pArray->elemSize, compar);
|
||||
}
|
||||
|
||||
void* taosArraySearch(const SArray* pArray, int (*compar)(const void*, const void*), const void* key) {
|
||||
assert(pArray != NULL);
|
||||
assert(compar != NULL);
|
||||
void* taosArraySearch(const SArray* pArray, const void* key, __compar_fn_t comparFn) {
|
||||
assert(pArray != NULL && comparFn != NULL);
|
||||
assert(key != NULL);
|
||||
|
||||
return bsearch(key, pArray->pData, pArray->size, pArray->elemSize, compar);
|
||||
return bsearch(key, pArray->pData, pArray->size, pArray->elemSize, comparFn);
|
||||
}
|
||||
|
||||
static int taosArrayCompareString(const void* a, const void* b) {
|
||||
const char* x = *(const char**)a;
|
||||
const char* y = *(const char**)b;
|
||||
return strcmp(x, y);
|
||||
}
|
||||
|
||||
void taosArraySortString(SArray* pArray) {
|
||||
void taosArraySortString(SArray* pArray, __compar_fn_t comparFn) {
|
||||
assert(pArray != NULL);
|
||||
qsort(pArray->pData, pArray->size, pArray->elemSize, taosArrayCompareString);
|
||||
qsort(pArray->pData, pArray->size, pArray->elemSize, comparFn);
|
||||
}
|
||||
|
||||
char* taosArraySearchString(const SArray* pArray, const char* key) {
|
||||
char* taosArraySearchString(const SArray* pArray, const char* key, __compar_fn_t comparFn) {
|
||||
assert(pArray != NULL);
|
||||
assert(key != NULL);
|
||||
|
||||
void* p = bsearch(&key, pArray->pData, pArray->size, pArray->elemSize, taosArrayCompareString);
|
||||
void* p = bsearch(&key, pArray->pData, pArray->size, pArray->elemSize, comparFn);
|
||||
if (p == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -227,9 +227,16 @@ static int32_t compareStrPatternComp(const void* pLeft, const void* pRight) {
|
|||
return (ret == TSDB_PATTERN_MATCH) ? 0 : 1;
|
||||
}
|
||||
|
||||
int32_t taosArrayCompareString(const void* a, const void* b) {
|
||||
const char* x = *(const char**)a;
|
||||
const char* y = *(const char**)b;
|
||||
|
||||
return compareLenPrefixedStr(x, y);
|
||||
}
|
||||
|
||||
static int32_t compareFindStrInArray(const void* pLeft, const void* pRight) {
|
||||
const SArray* arr = (const SArray*) pRight;
|
||||
return taosArraySearchString(arr, pLeft) == NULL ? 0 : 1;
|
||||
return taosArraySearchString(arr, pLeft, taosArrayCompareString) == NULL ? 0 : 1;
|
||||
}
|
||||
|
||||
static int32_t compareWStrPatternComp(const void* pLeft, const void* pRight) {
|
||||
|
|
|
@ -29,7 +29,8 @@ class TDTestCase:
|
|||
tdSql.prepare()
|
||||
|
||||
tdLog.info("=============== step1")
|
||||
tdSql.execute('create table tb (ts timestamp, speed int, temp float, note binary(5), flag bool)')
|
||||
tdSql.execute(
|
||||
'create table tb (ts timestamp, speed int, temp float, note binary(5), flag bool)')
|
||||
|
||||
numOfRecords = 0
|
||||
randomList = [10, 50, 100, 500, 1000, 5000]
|
||||
|
@ -38,9 +39,11 @@ class TDTestCase:
|
|||
tdLog.info("will insert %d records" % num)
|
||||
for x in range(0, num):
|
||||
tdLog.info(
|
||||
'insert into tb values (now + %da, NULL, NULL, NULL, TRUE)' % x)
|
||||
'insert into tb values (now + %da, NULL, NULL, NULL, TRUE)' %
|
||||
x)
|
||||
tdSql.execute(
|
||||
'insert into tb values (now + %da, NULL, NULL, NULL, TRUE)' % x)
|
||||
'insert into tb values (now + %da, NULL, NULL, NULL, TRUE)' %
|
||||
x)
|
||||
|
||||
numOfRecords = numOfRecords + num
|
||||
|
||||
|
@ -54,7 +57,6 @@ class TDTestCase:
|
|||
tdDnodes.start(1)
|
||||
tdLog.sleep(5)
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
|
|
@ -0,0 +1,67 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
from util.dnodes import *
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
tdLog.info("=============== step1")
|
||||
tdSql.execute(
|
||||
'create table tb (ts timestamp, speed int, temp float, note binary(4000), flag bool)')
|
||||
|
||||
numOfRecords = 1000000
|
||||
dividend = 1000
|
||||
tdLog.info("will insert %d records" % numOfRecords)
|
||||
|
||||
ts = 1500000000000
|
||||
for i in range(0, numOfRecords):
|
||||
|
||||
if (i % dividend):
|
||||
print(".", end="")
|
||||
tdSql.execute(
|
||||
'insert into tb values (%d + %da, NULL, NULL, NULL, TRUE)' %
|
||||
(ts, i))
|
||||
else:
|
||||
print("a", end="")
|
||||
tdSql.execute(
|
||||
'insert into tb values (%d + %da, NULL, NULL, "a", FALSE)' %
|
||||
(ts, i))
|
||||
|
||||
tdSql.query("select * from tb")
|
||||
tdSql.checkRows(numOfRecords)
|
||||
tdSql.checkData(numOfRecords - dividend, 3, 'a')
|
||||
tdSql.checkData(numOfRecords - dividend - 1, 3, None)
|
||||
|
||||
tdLog.info("stop dnode to commit data to disk")
|
||||
tdDnodes.stop(1)
|
||||
tdLog.info("dnodes:%d size is %d" % (1, tdDnodes.getDataSize(1)))
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -121,6 +121,7 @@ class Test:
|
|||
tdDnodes.start(1)
|
||||
tdSql.prepare()
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
|
|
|
@ -98,12 +98,16 @@ class TDTestCase:
|
|||
# create a super table with name exceed max length
|
||||
sname = self.generateString(maxTableNameLen + 1)
|
||||
tdLog.info("create a super table with length %d" % len(sname))
|
||||
tdSql.error("create table %s (ts timestamp, value int) tags(id int)" % sname)
|
||||
tdSql.error(
|
||||
"create table %s (ts timestamp, value int) tags(id int)" %
|
||||
sname)
|
||||
|
||||
# create a super table with name of max length
|
||||
sname = self.generateString(maxTableNameLen)
|
||||
tdLog.info("create a super table with length %d" % len(sname))
|
||||
tdSql.execute("create table %s (ts timestamp, value int) tags(id int)" % sname)
|
||||
tdSql.execute(
|
||||
"create table %s (ts timestamp, value int) tags(id int)" %
|
||||
sname)
|
||||
tdLog.info("check table count, should be one")
|
||||
tdSql.query('show stables')
|
||||
tdSql.checkRows(1)
|
||||
|
|
|
@ -99,6 +99,19 @@ class TDDnode:
|
|||
def setValgrind(self, value):
|
||||
self.valgrind = value
|
||||
|
||||
def getDataSize(self):
|
||||
totalSize = 0
|
||||
|
||||
if (self.deployed == 1):
|
||||
for dirpath, dirnames, filenames in os.walk(self.dataDir):
|
||||
for f in filenames:
|
||||
fp = os.path.join(dirpath, f)
|
||||
|
||||
if not os.path.islink(fp):
|
||||
totalSize = totalSize + os.path.getsize(fp)
|
||||
|
||||
return totalSize
|
||||
|
||||
def deploy(self):
|
||||
self.logDir = "%s/pysim/dnode%d/log" % (self.path, self.index)
|
||||
self.dataDir = "%s/pysim/dnode%d/data" % (self.path, self.index)
|
||||
|
@ -384,6 +397,10 @@ class TDDnodes:
|
|||
self.check(index)
|
||||
self.dnodes[index - 1].stop()
|
||||
|
||||
def getDataSize(self, index):
|
||||
self.check(index)
|
||||
return self.dnodes[index - 1].getDataSize()
|
||||
|
||||
def forcestop(self, index):
|
||||
self.check(index)
|
||||
self.dnodes[index - 1].forcestop()
|
||||
|
|
|
@ -104,14 +104,14 @@ $replica = 1 # max=3
|
|||
$days = 10
|
||||
$keep = 365
|
||||
$rows_db = 1000
|
||||
$cache = 4096 # 4 kb
|
||||
$cache = 16 # 16MB
|
||||
$ablocks = 100
|
||||
$tblocks = 32 # max=512, automatically trimmed when exceeding
|
||||
$ctime = 36000 # 10 hours
|
||||
$wal = 0 # valid value is 0, 1, 2
|
||||
$comp = 1 # max=32, automatically trimmed when exceeding
|
||||
|
||||
sql create database $db replica $replica days $days keep $keep maxrows $rows_db cache $cache ablocks $ablocks tblocks $tblocks ctime $ctime wal $wal comp $comp
|
||||
sql create database $db replica $replica days $days keep $keep maxrows $rows_db cache $cache ctime $ctime wal $wal comp $comp
|
||||
sql show databases
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
|
|
|
@ -27,32 +27,25 @@ endi
|
|||
if $data00 != @18-09-17 08:59:00.000@ then
|
||||
return -1
|
||||
endi
|
||||
#if $data01 != NULL then
|
||||
if $data01 != 0 then
|
||||
return -1
|
||||
endi
|
||||
#if $data02 != NULL then
|
||||
if $data02 != 0 then
|
||||
return -1
|
||||
endi
|
||||
#if $data03 != NULL then
|
||||
print data03 = $data03
|
||||
if $data03 != 0.00000 then
|
||||
return -1
|
||||
endi
|
||||
#if $data04 != NULL then
|
||||
if $data04 != 0.000000000 then
|
||||
return -1
|
||||
endi
|
||||
#if $data05 != NULL then
|
||||
if $data05 != 0 then
|
||||
return -1
|
||||
endi
|
||||
#if $data06 != NULL then
|
||||
if $data06 != 0 then
|
||||
return -1
|
||||
endi
|
||||
#if $data07 != NULL then
|
||||
if $data07 != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
|
|
@ -36,6 +36,7 @@ if $data01 != 2 then
|
|||
return -1
|
||||
endi
|
||||
if $data02 != tb2 then
|
||||
print expect tb2, actual: $data02
|
||||
return -1
|
||||
endi
|
||||
if $data03 != tb2 then
|
||||
|
|
|
@ -177,7 +177,7 @@ sql_error insert into st34 using mt3 tags ('NULL', '123aBc', 105, NULL) values
|
|||
#### case 3: set tag value
|
||||
sql create table mt4 (ts timestamp, c1 int) tags (tag_binary binary(16), tag_nchar nchar(16), tag_int int, tag_bool bool, tag_float float, tag_double double)
|
||||
sql create table st41 using mt4 tags ("beijing", 'nchar_tag', 100, false, 9.12345, 7.123456789)
|
||||
sql select tag_binary, tag_nchar, tag_int, tag_bool, tag_float, tag_double st41
|
||||
sql select tag_binary, tag_nchar, tag_int, tag_bool, tag_float, tag_double from st41
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
@ -190,13 +190,17 @@ endi
|
|||
if $data02 != 100 then
|
||||
return -1
|
||||
endi
|
||||
if $data03 != false then
|
||||
if $data03 != 0 then
|
||||
return -1
|
||||
endi
|
||||
if $dat04 != 9.123450 then
|
||||
|
||||
if $data04 != 9.12345 then
|
||||
print expect 9.12345 , actual: $data04
|
||||
return -1
|
||||
endi
|
||||
if $data05 != 7.123457 then
|
||||
|
||||
if $data05 != 7.123456789 then
|
||||
print expect 7.123456789 , actual: $data05
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
|
|
@ -133,7 +133,7 @@ if $rows != 4007 then
|
|||
return -1
|
||||
endi
|
||||
|
||||
if $data00 != @70-01-01 08:01:43.499@ then
|
||||
if $data00 != @70-01-01 08:01:43.500@ then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
@ -143,7 +143,7 @@ if $rows != 3907 then
|
|||
return -1
|
||||
endi
|
||||
|
||||
if $data00 != @70-01-01 08:01:43.399@ then
|
||||
if $data00 != @70-01-01 08:01:43.488@ then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
@ -152,7 +152,7 @@ if $rows != 3106 then
|
|||
return -1
|
||||
endi
|
||||
|
||||
if $data00 != @70-01-01 08:01:43.099@ then
|
||||
if $data00 != @70-01-01 08:01:43.388@ then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
@ -161,7 +161,7 @@ if $rows != 3608 then
|
|||
return -1
|
||||
endi
|
||||
|
||||
if $data00 != @70-01-01 08:01:43.100@ then
|
||||
if $data00 != @70-01-01 08:01:43.450@ then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
@ -358,6 +358,15 @@ if $row != 8 then
|
|||
return -1
|
||||
endi
|
||||
|
||||
sql select diff(k) from tm0
|
||||
if $row != 3 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data21 != -1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
#error sql
|
||||
sql_error select * from 1;
|
||||
sql_error select 1;
|
||||
|
@ -371,8 +380,11 @@ sql_error select 1 interval(1h);
|
|||
sql_error select count(*);
|
||||
sql_error select sum(k);
|
||||
sql_error select 'abc';
|
||||
sql_error select k+1,sum(k) from tm0;
|
||||
sql_error select k, sum(k) from tm0;
|
||||
sql_error select k, sum(k)+1 from tm0;
|
||||
|
||||
#=============================tbase-1205
|
||||
sql select count(*) from tm1 where ts<now and ts>= now -1d interval(1h) fill(NULL);
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ $db = $dbPrefix . $i
|
|||
$stb = $stbPrefix . $i
|
||||
|
||||
sql drop database if exists $db
|
||||
sql create database $db maxrows 200 cache 1024 tblocks 200 maxTables 4
|
||||
sql create database $db maxrows 200 cache 16 maxTables 4
|
||||
print ====== create tables
|
||||
sql use $db
|
||||
sql create table $stb (ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 smallint, c6 tinyint, c7 bool, c8 binary(10), c9 nchar(10)) tags(t1 binary(15), t2 int, t3 bigint, t4 nchar(10), t5 double, t6 bool)
|
||||
|
@ -64,7 +64,7 @@ print ====== $db tables created
|
|||
|
||||
$db = $dbPrefix . 1
|
||||
sql drop database if exists $db
|
||||
sql create database $db maxrows 200 cache 1024
|
||||
sql create database $db maxrows 200 cache 16
|
||||
sql use $db
|
||||
sql create table $stb (ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 smallint, c6 tinyint, c7 bool, c8 binary(10), c9 nchar(10)) tags(t1 binary(15), t2 int, t3 bigint, t4 nchar(10), t5 double, t6 bool)
|
||||
|
||||
|
|
|
@ -56,15 +56,17 @@ sleep 2000
|
|||
run general/parser/limit1_tblocks100.sim
|
||||
sleep 2000
|
||||
run general/parser/select_across_vnodes.sim
|
||||
sleep 2000
|
||||
run general/parser/slimit1.sim
|
||||
sleep 2000
|
||||
run general/parser/tbnameIn.sim
|
||||
sleep 2000
|
||||
run general/parser/projection_limit_offset.sim
|
||||
|
||||
sleep 2000
|
||||
run general/parser/limit2.sim
|
||||
sleep 2000
|
||||
run general/parser/tbnameIn.sim
|
||||
sleep 2000
|
||||
run general/parser/slimit.sim
|
||||
sleep 2000
|
||||
run general/parser/slimit1.sim
|
||||
|
||||
sleep 2000
|
||||
run general/parser/fill.sim
|
||||
|
@ -94,8 +96,7 @@ sleep 2000
|
|||
run general/parser/join.sim
|
||||
sleep 2000
|
||||
run general/parser/join_multivnode.sim
|
||||
sleep 2000
|
||||
run general/parser/projection_limit_offset.sim
|
||||
|
||||
sleep 2000
|
||||
run general/parser/select_with_tags.sim
|
||||
sleep 2000
|
||||
|
|
|
@ -109,9 +109,9 @@ cd ../../../debug; make
|
|||
./test.sh -f general/parser/auto_create_tb.sim
|
||||
./test.sh -f general/parser/auto_create_tb_drop_tb.sim
|
||||
./test.sh -f general/parser/col_arithmetic_operation.sim
|
||||
#/test.sh -f general/parser/columnValue.sim
|
||||
./test.sh -f general/parser/columnValue.sim
|
||||
./test.sh -f general/parser/commit.sim
|
||||
# ./test.sh -f general/parser/create_db.sim
|
||||
#./test.sh -f general/parser/create_db.sim #there are bugs in this sim script
|
||||
./test.sh -f general/parser/create_mt.sim
|
||||
./test.sh -f general/parser/create_tb.sim
|
||||
./test.sh -f general/parser/dbtbnameValidate.sim
|
||||
|
@ -122,38 +122,38 @@ cd ../../../debug; make
|
|||
./test.sh -f general/parser/first_last.sim
|
||||
# ./test.sh -f general/parser/import_file.sim
|
||||
./test.sh -f general/parser/lastrow.sim
|
||||
# ./test.sh -f general/parser/nchar.sim
|
||||
# ./test.sh -f general/parser/null_char.sim
|
||||
./test.sh -f general/parser/nchar.sim
|
||||
./test.sh -f general/parser/null_char.sim
|
||||
./test.sh -f general/parser/single_row_in_tb.sim
|
||||
./test.sh -f general/parser/select_from_cache_disk.sim
|
||||
./test.sh -f general/parser/limit.sim
|
||||
# ./test.sh -f general/parser/fill.sim
|
||||
# ./test.sh -f general/parser/fill_stb.sim
|
||||
# ./test.sh -f general/parser/tags_dynamically_specifiy.sim
|
||||
# ./test.sh -f general/parser/interp.sim
|
||||
./test.sh -f general/parser/limit1.sim
|
||||
./test.sh -f general/parser/limit1_tblocks100.sim
|
||||
# ./test.sh -f general/parser/limit2.sim
|
||||
./test.sh -f general/parser/mixed_blocks.sim
|
||||
./test.sh -f general/parser/selectResNum.sim
|
||||
./test.sh -f general/parser/select_across_vnodes.sim
|
||||
# ./test.sh -f general/parser/set_tag_vals.sim
|
||||
# ./test.sh -f general/parser/slimit.sim
|
||||
./test.sh -f general/parser/slimit1.sim
|
||||
./test.sh -f general/parser/tbnameIn.sim
|
||||
./test.sh -f general/parser/binary_escapeCharacter.sim
|
||||
./test.sh -f general/parser/projection_limit_offset.sim
|
||||
# ./test.sh -f general/parser/limit2.sim
|
||||
# ./test.sh -f general/parser/slimit.sim
|
||||
# ./test.sh -f general/parser/fill.sim
|
||||
# ./test.sh -f general/parser/fill_stb.sim
|
||||
# ./test.sh -f general/parser/interp.sim
|
||||
# ./test.sh -f general/parser/where.sim
|
||||
# ./test.sh -f general/parser/join.sim
|
||||
# ./test.sh -f general/parser/join_multivnode.sim
|
||||
# ./test.sh -f general/parser/select_with_tags.sim
|
||||
# ./test.sh -f general/parser/groupby.sim
|
||||
# ./test.sh -f general/parser/bug.sim
|
||||
#unsupport ./test.sh -f general/parser/tags_dynamically_specifiy.sim
|
||||
#unsupport ./test.sh -f general/parser/set_tag_vals.sim
|
||||
#unsupport ./test.sh -f general/parser/repeatAlter.sim
|
||||
#unsupport ./test.sh -f general/parser/slimit_alter_tags.sim
|
||||
#unsupport ./test.sh -f general/parser/stream_on_sys.sim
|
||||
#unsupport ./test.sh -f general/parser/stream.sim
|
||||
# ./test.sh -f general/parser/tbnameIn.sim
|
||||
# ./test.sh -f general/parser/where.sim
|
||||
# ./test.sh -f general/parser/repeatAlter.sim
|
||||
#unsupport ./test.sh -f general/parser/repeatStream.sim
|
||||
# ./test.sh -f general/parser/join.sim
|
||||
# ./test.sh -f general/parser/join_multivnode.sim
|
||||
# ./test.sh -f general/parser/projection_limit_offset.sim
|
||||
# ./test.sh -f general/parser/select_with_tags.sim
|
||||
# ./test.sh -f general/parser/groupby.sim
|
||||
./test.sh -f general/parser/binary_escapeCharacter.sim
|
||||
#./test.sh -f general/parser/bug.sim
|
||||
|
||||
./test.sh -f general/stable/disk.sim
|
||||
./test.sh -f general/stable/dnode3.sim
|
||||
|
|
|
@ -96,6 +96,7 @@ echo "second ${HOSTNAME}:7200" >> $TAOS_CFG
|
|||
echo "serverPort ${NODE}" >> $TAOS_CFG
|
||||
echo "dataDir $DATA_DIR" >> $TAOS_CFG
|
||||
echo "logDir $LOG_DIR" >> $TAOS_CFG
|
||||
echo "debugFlag 135" >> $TAOS_CFG
|
||||
echo "dDebugFlag 135" >> $TAOS_CFG
|
||||
echo "mDebugFlag 135" >> $TAOS_CFG
|
||||
echo "sdbDebugFlag 135" >> $TAOS_CFG
|
||||
|
|
|
@ -0,0 +1,64 @@
|
|||
#!/bin/sh
|
||||
|
||||
# if [ $# != 2 || $# != 3 ]; then
|
||||
# echo "argument list need input : "
|
||||
# echo " -s start/stop"
|
||||
# exit 1
|
||||
# fi
|
||||
|
||||
EXEC_OPTON=
|
||||
while getopts "n:s:u:x:ct" arg
|
||||
do
|
||||
case $arg in
|
||||
n)
|
||||
NODE_NAME=$OPTARG
|
||||
;;
|
||||
s)
|
||||
EXEC_OPTON=$OPTARG
|
||||
;;
|
||||
c)
|
||||
CLEAR_OPTION="clear"
|
||||
;;
|
||||
t)
|
||||
SHELL_OPTION="true"
|
||||
;;
|
||||
u)
|
||||
USERS=$OPTARG
|
||||
;;
|
||||
x)
|
||||
SIGNAL=$OPTARG
|
||||
;;
|
||||
?)
|
||||
echo "unkown argument"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
|
||||
SCRIPT_DIR=`dirname $0`
|
||||
cd $SCRIPT_DIR/../
|
||||
SCRIPT_DIR=`pwd`
|
||||
|
||||
cd ../../
|
||||
TAOS_DIR=`pwd`
|
||||
|
||||
BUILD_DIR=$TAOS_DIR/../debug/build
|
||||
SIM_DIR=$TAOS_DIR/sim
|
||||
NODE_DIR=$SIM_DIR/arbitrator
|
||||
EXE_DIR=$BUILD_DIR/bin
|
||||
LOG_DIR=$NODE_DIR/log
|
||||
|
||||
echo "------------ $EXEC_OPTON tarbitrator"
|
||||
|
||||
if [ "$EXEC_OPTON" = "start" ]; then
|
||||
echo "------------ log path: $LOG_DIR"
|
||||
nohup $EXE_DIR/tarbitrator -p 8000 -d 135 -g $LOG_DIR > /dev/null 2>&1 &
|
||||
else
|
||||
#relative path
|
||||
PID=`ps -ef|grep tarbitrator | grep -v grep | awk '{print $2}'`
|
||||
if [ -n "$PID" ]; then
|
||||
sudo kill -9 $PID
|
||||
sudo pkill -9 tarbitrator
|
||||
fi
|
||||
fi
|
||||
|
|
@ -13,3 +13,12 @@ while [ -n "$PID" ]; do
|
|||
fuser -k -n tcp 6030
|
||||
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||
done
|
||||
|
||||
PID=`ps -ef|grep -w tarbitrator | grep -v grep | awk '{print $2}'`
|
||||
while [ -n "$PID" ]; do
|
||||
echo kill -9 $PID
|
||||
pkill -9 tarbitrator
|
||||
fuser -k -n tcp 6040
|
||||
PID=`ps -ef|grep -w tarbitrator | grep -v grep | awk '{print $2}'`
|
||||
done
|
||||
|
||||
|
|
|
@ -62,6 +62,11 @@ CFG_DIR=$PRG_DIR/cfg
|
|||
LOG_DIR=$PRG_DIR/log
|
||||
DATA_DIR=$PRG_DIR/data
|
||||
|
||||
|
||||
ARBITRATOR_PRG_DIR=$SIM_DIR/arbitrator
|
||||
ARBITRATOR_LOG_DIR=$ARBITRATOR_PRG_DIR/log
|
||||
|
||||
|
||||
chmod -R 777 $PRG_DIR
|
||||
echo "------------------------------------------------------------------------"
|
||||
echo "Start TDengine Testing Case ..."
|
||||
|
@ -72,9 +77,12 @@ echo "CFG_DIR : $CFG_DIR"
|
|||
|
||||
rm -rf $LOG_DIR
|
||||
rm -rf $CFG_DIR
|
||||
rm -rf $ARBITRATOR_LOG_DIR
|
||||
|
||||
mkdir -p $PRG_DIR
|
||||
mkdir -p $LOG_DIR
|
||||
mkdir -p $CFG_DIR
|
||||
mkdir -p $ARBITRATOR_LOG_DIR
|
||||
|
||||
TAOS_CFG=$PRG_DIR/cfg/taos.cfg
|
||||
touch -f $TAOS_CFG
|
||||
|
|
|
@ -0,0 +1,100 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/deploy.sh -n dnode2 -i 2
|
||||
system sh/deploy.sh -n dnode3 -i 3
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c numOfMPeers -v 2
|
||||
system sh/cfg.sh -n dnode2 -c numOfMPeers -v 2
|
||||
system sh/cfg.sh -n dnode3 -c numOfMPeers -v 2
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c walLevel -v 1
|
||||
system sh/cfg.sh -n dnode2 -c walLevel -v 1
|
||||
system sh/cfg.sh -n dnode3 -c walLevel -v 1
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
|
||||
system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
|
||||
system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
|
||||
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
|
||||
system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
|
||||
|
||||
print ============== step0: start tarbitrator
|
||||
system sh/exec_tarbitrator.sh -s start
|
||||
|
||||
|
||||
print ============== step1: replica is 1, and start 1 dnode
|
||||
system sh/exec_up.sh -n dnode1 -s start
|
||||
sleep 3000
|
||||
sql connect
|
||||
|
||||
$db = replica_db1
|
||||
sql create database $db replica 1 maxTables 4
|
||||
sql use $db
|
||||
|
||||
# create table , insert data
|
||||
$stb = repl_stb
|
||||
sql create table $stb (ts timestamp, c1 int) tags(t1 int)
|
||||
$rowNum = 10
|
||||
$tblNum = 12
|
||||
|
||||
$ts0 = 1420041600000
|
||||
$ts = $ts0
|
||||
$delta = 1
|
||||
|
||||
$i = 0
|
||||
while $i < $tblNum
|
||||
$tb = tb . $i
|
||||
sql create table $tb using $stb tags( $i )
|
||||
|
||||
$x = 0
|
||||
while $x < $rowNum
|
||||
$xs = $x * $delta
|
||||
$ts = $ts0 + $xs
|
||||
sql insert into $tb values ( $ts , $x )
|
||||
$x = $x + 1
|
||||
endw
|
||||
$i = $i + 1
|
||||
endw
|
||||
|
||||
print ============== step2: add 1 new dnode, expect balanced
|
||||
system sh/exec_up.sh -n dnode2 -s start
|
||||
sql create dnode $hostname2
|
||||
|
||||
# expect after balanced, 2 vondes in dnode1, 1 vonde in dnode2
|
||||
$x = 0
|
||||
show2:
|
||||
$x = $x + 1
|
||||
sleep 2000
|
||||
if $x == 10 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql show dnodes
|
||||
print dnode1 openVnodes $data2_1
|
||||
print dnode2 openVnodes $data2_2
|
||||
if $data2_1 != 2 then
|
||||
goto show2
|
||||
endi
|
||||
if $data2_2 != 1 then
|
||||
goto show2
|
||||
endi
|
||||
|
||||
print ============== step4: stop dnode1, and wait dnode2 master
|
||||
system sh/exec_up.sh -n dnode1 -s stop
|
||||
|
||||
$x = 0
|
||||
loop_wait:
|
||||
$x = $x + 1
|
||||
sleep 2000
|
||||
if $x == 10 then
|
||||
print ERROR: after dnode1 stop, dnode2 didn't become a master!
|
||||
return -1
|
||||
endi
|
||||
sql show mnodes
|
||||
$dnodeRole = $data2_1
|
||||
print dnodeRole ==> $dnodeRole
|
||||
|
||||
if $dnodeRole != master then
|
||||
goto loop_wait
|
||||
endi
|
|
@ -47,6 +47,7 @@ print should not drop master
|
|||
|
||||
print ============== step4
|
||||
system sh/exec_up.sh -n dnode1 -s stop -x SIGINT
|
||||
sleep 2000
|
||||
sql_error show mnodes
|
||||
print error of no master
|
||||
|
||||
|
|
|
@ -42,6 +42,7 @@ endi
|
|||
|
||||
print ============== step3
|
||||
system sh/exec_up.sh -n dnode1 -s stop
|
||||
sleep 2000
|
||||
sql_error show mnodes
|
||||
|
||||
print ============== step4
|
||||
|
|
|
@ -36,12 +36,17 @@ void simLogSql(char *sql) {
|
|||
fflush(fp);
|
||||
}
|
||||
|
||||
char *simParseArbitratorName(char *varName);
|
||||
char *simParseHostName(char *varName);
|
||||
char *simGetVariable(SScript *script, char *varName, int varLen) {
|
||||
if (strncmp(varName, "hostname", 8) == 0) {
|
||||
return simParseHostName(varName);
|
||||
}
|
||||
|
||||
if (strncmp(varName, "arbitrator", 10) == 0) {
|
||||
return simParseArbitratorName(varName);
|
||||
}
|
||||
|
||||
if (strncmp(varName, "error", varLen) == 0) return script->error;
|
||||
|
||||
if (strncmp(varName, "rows", varLen) == 0) return script->rows;
|
||||
|
|
|
@ -29,6 +29,12 @@ int simDebugFlag = 135;
|
|||
void simCloseTaosdConnect(SScript *script);
|
||||
char simHostName[128];
|
||||
|
||||
char *simParseArbitratorName(char *varName) {
|
||||
static char hostName[140];
|
||||
sprintf(hostName, "%s:%d", simHostName, 8000);
|
||||
return hostName;
|
||||
}
|
||||
|
||||
char *simParseHostName(char *varName) {
|
||||
static char hostName[140];
|
||||
|
||||
|
|
Loading…
Reference in New Issue