Merge branch 'develop' into feature/TD-3188
This commit is contained in:
commit
2ac4aed2aa
|
@ -46,6 +46,7 @@ def pre_test(){
|
|||
git fetch origin +refs/pull/${CHANGE_ID}/merge
|
||||
git checkout -qf FETCH_HEAD
|
||||
git --no-pager diff --name-only FETCH_HEAD $(git merge-base FETCH_HEAD develop)|grep -v -E '.*md|//src//connector|Jenkinsfile' || exit 0
|
||||
find ${WKC}/tests/pytest -name \'*\'.sql -exec rm -rf {} \\;
|
||||
cd ${WK}
|
||||
git reset --hard HEAD~10
|
||||
git checkout develop
|
||||
|
@ -115,7 +116,6 @@ pipeline {
|
|||
sh '''
|
||||
date
|
||||
cd ${WKC}/tests
|
||||
find pytest -name '*'sql|xargs rm -rf
|
||||
./test-all.sh p1
|
||||
date'''
|
||||
}
|
||||
|
@ -131,7 +131,6 @@ pipeline {
|
|||
sh '''
|
||||
date
|
||||
cd ${WKC}/tests
|
||||
find pytest -name '*'sql|xargs rm -rf
|
||||
./test-all.sh p2
|
||||
date'''
|
||||
}
|
||||
|
|
|
@ -123,6 +123,7 @@ int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, i
|
|||
bool tscIsPointInterpQuery(SQueryInfo* pQueryInfo);
|
||||
bool tscIsTWAQuery(SQueryInfo* pQueryInfo);
|
||||
bool tscIsSecondStageQuery(SQueryInfo* pQueryInfo);
|
||||
bool tscGroupbyColumn(SQueryInfo* pQueryInfo);
|
||||
|
||||
bool tscNonOrderedProjectionQueryOnSTable(SQueryInfo *pQueryInfo, int32_t tableIndex);
|
||||
bool tscOrderedProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex);
|
||||
|
@ -133,6 +134,7 @@ bool tscIsProjectionQuery(SQueryInfo* pQueryInfo);
|
|||
bool tscIsTwoStageSTableQuery(SQueryInfo* pQueryInfo, int32_t tableIndex);
|
||||
bool tscQueryTags(SQueryInfo* pQueryInfo);
|
||||
bool tscMultiRoundQuery(SQueryInfo* pQueryInfo, int32_t tableIndex);
|
||||
bool tscQueryBlockInfo(SQueryInfo* pQueryInfo);
|
||||
|
||||
SSqlExpr* tscAddFuncInSelectClause(SQueryInfo* pQueryInfo, int32_t outputColIndex, int16_t functionId,
|
||||
SColumnIndex* pIndex, SSchema* pColSchema, int16_t colType);
|
||||
|
@ -152,7 +154,6 @@ SInternalField* tscFieldInfoInsert(SFieldInfo* pFieldInfo, int32_t index, TAOS_F
|
|||
SInternalField* tscFieldInfoGetInternalField(SFieldInfo* pFieldInfo, int32_t index);
|
||||
TAOS_FIELD* tscFieldInfoGetField(SFieldInfo* pFieldInfo, int32_t index);
|
||||
|
||||
void tscFieldInfoUpdateOffset(SQueryInfo* pQueryInfo);
|
||||
void tscFieldInfoUpdateOffset(SQueryInfo* pQueryInfo);
|
||||
|
||||
int16_t tscFieldInfoGetOffset(SQueryInfo* pQueryInfo, int32_t index);
|
||||
|
|
|
@ -198,9 +198,10 @@ typedef struct STableDataBlocks {
|
|||
typedef struct SQueryInfo {
|
||||
int16_t command; // the command may be different for each subclause, so keep it seperately.
|
||||
uint32_t type; // query/insert type
|
||||
STimeWindow window; // the whole query time window
|
||||
|
||||
STimeWindow window; // query time window
|
||||
SInterval interval;
|
||||
SInterval interval; // tumble time window
|
||||
SSessionWindow sessionWindow; // session time window
|
||||
|
||||
SSqlGroupbyExpr groupbyExpr; // group by tags info
|
||||
SArray * colList; // SArray<SColumn*>
|
||||
|
@ -232,6 +233,7 @@ typedef struct SQueryInfo {
|
|||
typedef struct {
|
||||
int command;
|
||||
uint8_t msgType;
|
||||
char reserve1[3]; // fix bus error on arm32
|
||||
bool autoCreated; // create table if it is not existed during retrieve table meta in mnode
|
||||
|
||||
union {
|
||||
|
@ -244,8 +246,10 @@ typedef struct {
|
|||
|
||||
char * curSql; // current sql, resume position of sql after parsing paused
|
||||
int8_t parseFinished;
|
||||
char reserve2[3]; // fix bus error on arm32
|
||||
|
||||
int16_t numOfCols;
|
||||
char reserve3[2]; // fix bus error on arm32
|
||||
uint32_t allocSize;
|
||||
char * payload;
|
||||
int32_t payloadLen;
|
||||
|
@ -255,7 +259,9 @@ typedef struct {
|
|||
int32_t numOfParams;
|
||||
|
||||
int8_t dataSourceType; // load data from file or not
|
||||
char reserve4[3]; // fix bus error on arm32
|
||||
int8_t submitSchema; // submit block is built with table schema
|
||||
char reserve5[3]; // fix bus error on arm32
|
||||
STagData tagData; // NOTE: pTagData->data is used as a variant length array
|
||||
|
||||
SName **pTableNameList; // all involved tableMeta list of current insert sql statement.
|
||||
|
@ -397,7 +403,6 @@ typedef struct SSqlStream {
|
|||
|
||||
void tscSetStreamDestTable(SSqlStream* pStream, const char* dstTable);
|
||||
|
||||
|
||||
int tscAcquireRpc(const char *key, const char *user, const char *secret,void **pRpcObj);
|
||||
void tscReleaseRpc(void *param);
|
||||
void tscInitMsgsFp();
|
||||
|
|
|
@ -100,6 +100,10 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SLocalMerger *pReducer, tOrderDescr
|
|||
} else if (functionId == TSDB_FUNC_APERCT) {
|
||||
pCtx->param[0].i64 = pExpr->param[0].i64;
|
||||
pCtx->param[0].nType = pExpr->param[0].nType;
|
||||
} else if (functionId == TSDB_FUNC_BLKINFO) {
|
||||
pCtx->param[0].i64 = pExpr->param[0].i64;
|
||||
pCtx->param[0].nType = pExpr->param[0].nType;
|
||||
pCtx->numOfParams = 1;
|
||||
}
|
||||
|
||||
pCtx->interBufBytes = pExpr->interBytes;
|
||||
|
@ -951,10 +955,10 @@ static void doFillResult(SSqlObj *pSql, SLocalMerger *pLocalMerge, bool doneOutp
|
|||
// todo extract function
|
||||
int64_t actualETime = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.ekey: pQueryInfo->window.skey;
|
||||
|
||||
tFilePage **pResPages = malloc(POINTER_BYTES * pQueryInfo->fieldsInfo.numOfOutput);
|
||||
void** pResPages = malloc(POINTER_BYTES * pQueryInfo->fieldsInfo.numOfOutput);
|
||||
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
|
||||
TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
|
||||
pResPages[i] = calloc(1, sizeof(tFilePage) + pField->bytes * pLocalMerge->resColModel->capacity);
|
||||
pResPages[i] = calloc(1, pField->bytes * pLocalMerge->resColModel->capacity);
|
||||
}
|
||||
|
||||
while (1) {
|
||||
|
@ -966,7 +970,7 @@ static void doFillResult(SSqlObj *pSql, SLocalMerger *pLocalMerge, bool doneOutp
|
|||
if (pQueryInfo->limit.offset > 0) {
|
||||
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
|
||||
TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
|
||||
memmove(pResPages[i]->data, pResPages[i]->data + pField->bytes * pQueryInfo->limit.offset,
|
||||
memmove(pResPages[i], ((char*)pResPages[i]) + pField->bytes * pQueryInfo->limit.offset,
|
||||
(size_t)(newRows * pField->bytes));
|
||||
}
|
||||
}
|
||||
|
@ -1010,7 +1014,7 @@ static void doFillResult(SSqlObj *pSql, SLocalMerger *pLocalMerge, bool doneOutp
|
|||
int32_t offset = 0;
|
||||
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
|
||||
TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
|
||||
memcpy(pRes->data + offset * pRes->numOfRows, pResPages[i]->data, (size_t)(pField->bytes * pRes->numOfRows));
|
||||
memcpy(pRes->data + offset * pRes->numOfRows, pResPages[i], (size_t)(pField->bytes * pRes->numOfRows));
|
||||
offset += pField->bytes;
|
||||
}
|
||||
|
||||
|
|
|
@ -307,7 +307,8 @@ int32_t tsParseOneColumnData(SSchema *pSchema, SStrToken *pToken, char *payload,
|
|||
return tscInvalidSQLErrMsg(msg, "illegal float data", pToken->z);
|
||||
}
|
||||
|
||||
*((float *)payload) = (float)dv;
|
||||
// *((float *)payload) = (float)dv;
|
||||
SET_FLOAT_VAL(payload, dv);
|
||||
}
|
||||
break;
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -497,8 +497,6 @@ int tscProcessSql(SSqlObj *pSql) {
|
|||
return pSql->res.code;
|
||||
}
|
||||
} else if (pCmd->command >= TSDB_SQL_LOCAL) {
|
||||
//pSql->epSet = tscMgmtEpSet;
|
||||
// } else { // local handler
|
||||
return (*tscProcessMsgRsp[pCmd->command])(pSql);
|
||||
}
|
||||
|
||||
|
@ -645,7 +643,6 @@ static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char
|
|||
}
|
||||
|
||||
pSql->epSet.inUse = rand()%pSql->epSet.numOfEps;
|
||||
|
||||
pQueryMsg->head.vgId = htonl(vgId);
|
||||
|
||||
STableIdInfo *pTableIdInfo = (STableIdInfo *)pMsg;
|
||||
|
@ -660,8 +657,6 @@ static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char
|
|||
int32_t numOfVgroups = (int32_t)taosArrayGetSize(pTableMetaInfo->pVgroupTables);
|
||||
assert(index >= 0 && index < numOfVgroups);
|
||||
|
||||
tscDebug("%p query on stable, vgIndex:%d, numOfVgroups:%d", pSql, index, numOfVgroups);
|
||||
|
||||
SVgroupTableInfo* pTableIdList = taosArrayGet(pTableMetaInfo->pVgroupTables, index);
|
||||
|
||||
// set the vgroup info
|
||||
|
@ -670,7 +665,10 @@ static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char
|
|||
|
||||
int32_t numOfTables = (int32_t)taosArrayGetSize(pTableIdList->itemList);
|
||||
pQueryMsg->numOfTables = htonl(numOfTables); // set the number of tables
|
||||
|
||||
|
||||
tscDebug("%p query on stable, vgId:%d, numOfTables:%d, vgIndex:%d, numOfVgroups:%d", pSql,
|
||||
pTableIdList->vgInfo.vgId, numOfTables, index, numOfVgroups);
|
||||
|
||||
// serialize each table id info
|
||||
for(int32_t i = 0; i < numOfTables; ++i) {
|
||||
STableIdInfo* pItem = taosArrayGet(pTableIdList->itemList, i);
|
||||
|
@ -705,7 +703,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
|||
STableMeta * pTableMeta = pTableMetaInfo->pTableMeta;
|
||||
|
||||
size_t numOfSrcCols = taosArrayGetSize(pQueryInfo->colList);
|
||||
if (numOfSrcCols <= 0 && !tscQueryTags(pQueryInfo)) {
|
||||
if (numOfSrcCols <= 0 && !tscQueryTags(pQueryInfo) && !tscQueryBlockInfo(pQueryInfo)) {
|
||||
tscError("%p illegal value of numOfCols in query msg: %" PRIu64 ", table cols:%d", pSql, (uint64_t)numOfSrcCols,
|
||||
tscGetNumOfColumns(pTableMeta));
|
||||
|
||||
|
@ -756,6 +754,8 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
|||
pQueryMsg->vgroupLimit = htobe64(pQueryInfo->vgroupLimit);
|
||||
pQueryMsg->sqlstrLen = htonl(sqlLen);
|
||||
pQueryMsg->prevResultLen = htonl(pQueryInfo->bufLen);
|
||||
pQueryMsg->sw.gap = htobe64(pQueryInfo->sessionWindow.gap);
|
||||
pQueryMsg->sw.primaryColId = htonl(PRIMARYKEY_TIMESTAMP_COL_INDEX);
|
||||
|
||||
size_t numOfOutput = tscSqlExprNumOfExprs(pQueryInfo);
|
||||
pQueryMsg->numOfOutput = htons((int16_t)numOfOutput); // this is the stage one output column number
|
||||
|
@ -835,13 +835,31 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
|||
pSqlFuncExpr->colInfo.colIndex = htons(pExpr->colInfo.colIndex);
|
||||
pSqlFuncExpr->colInfo.flag = htons(pExpr->colInfo.flag);
|
||||
|
||||
if (TSDB_COL_IS_UD_COL(pExpr->colInfo.flag)) {
|
||||
pSqlFuncExpr->colType = htons(pExpr->resType);
|
||||
pSqlFuncExpr->colBytes = htons(pExpr->resBytes);
|
||||
} else if (pExpr->colInfo.colId == TSDB_TBNAME_COLUMN_INDEX) {
|
||||
SSchema *s = tGetTbnameColumnSchema();
|
||||
|
||||
pSqlFuncExpr->colType = htons(s->type);
|
||||
pSqlFuncExpr->colBytes = htons(s->bytes);
|
||||
} else if (pExpr->colInfo.colId == TSDB_BLOCK_DIST_COLUMN_INDEX) {
|
||||
SSchema s = tGetBlockDistColumnSchema();
|
||||
|
||||
pSqlFuncExpr->colType = htons(s.type);
|
||||
pSqlFuncExpr->colBytes = htons(s.bytes);
|
||||
} else {
|
||||
SSchema* s = tscGetColumnSchemaById(pTableMeta, pExpr->colInfo.colId);
|
||||
pSqlFuncExpr->colType = htons(s->type);
|
||||
pSqlFuncExpr->colBytes = htons(s->bytes);
|
||||
}
|
||||
|
||||
pSqlFuncExpr->functionId = htons(pExpr->functionId);
|
||||
pSqlFuncExpr->numOfParams = htons(pExpr->numOfParams);
|
||||
pSqlFuncExpr->resColId = htons(pExpr->resColId);
|
||||
pMsg += sizeof(SSqlFuncMsg);
|
||||
|
||||
for (int32_t j = 0; j < pExpr->numOfParams; ++j) {
|
||||
// todo add log
|
||||
for (int32_t j = 0; j < pExpr->numOfParams; ++j) { // todo add log
|
||||
pSqlFuncExpr->arg[j].argType = htons((uint16_t)pExpr->param[j].nType);
|
||||
pSqlFuncExpr->arg[j].argBytes = htons(pExpr->param[j].nLen);
|
||||
|
||||
|
@ -866,6 +884,8 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
|||
for (int32_t i = 0; i < output; ++i) {
|
||||
SInternalField* pField = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, i);
|
||||
SSqlExpr *pExpr = pField->pSqlExpr;
|
||||
|
||||
// this should be switched to projection query
|
||||
if (pExpr != NULL) {
|
||||
// the queried table has been removed and a new table with the same name has already been created already
|
||||
// return error msg
|
||||
|
@ -879,33 +899,31 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
|||
return TSDB_CODE_TSC_INVALID_SQL;
|
||||
}
|
||||
|
||||
pSqlFuncExpr1->colInfo.colId = htons(pExpr->colInfo.colId);
|
||||
pSqlFuncExpr1->colInfo.colIndex = htons(pExpr->colInfo.colIndex);
|
||||
pSqlFuncExpr1->colInfo.flag = htons(pExpr->colInfo.flag);
|
||||
pSqlFuncExpr1->numOfParams = 0; // no params for projection query
|
||||
pSqlFuncExpr1->functionId = htons(TSDB_FUNC_PRJ);
|
||||
pSqlFuncExpr1->colInfo.colId = htons(pExpr->resColId);
|
||||
pSqlFuncExpr1->colInfo.flag = htons(TSDB_COL_NORMAL);
|
||||
|
||||
pSqlFuncExpr1->functionId = htons(pExpr->functionId);
|
||||
pSqlFuncExpr1->numOfParams = htons(pExpr->numOfParams);
|
||||
pMsg += sizeof(SSqlFuncMsg);
|
||||
|
||||
for (int32_t j = 0; j < pExpr->numOfParams; ++j) {
|
||||
// todo add log
|
||||
pSqlFuncExpr1->arg[j].argType = htons((uint16_t)pExpr->param[j].nType);
|
||||
pSqlFuncExpr1->arg[j].argBytes = htons(pExpr->param[j].nLen);
|
||||
|
||||
if (pExpr->param[j].nType == TSDB_DATA_TYPE_BINARY) {
|
||||
memcpy(pMsg, pExpr->param[j].pz, pExpr->param[j].nLen);
|
||||
pMsg += pExpr->param[j].nLen;
|
||||
} else {
|
||||
pSqlFuncExpr1->arg[j].argValue.i64 = htobe64(pExpr->param[j].i64);
|
||||
bool assign = false;
|
||||
for (int32_t f = 0; f < tscSqlExprNumOfExprs(pQueryInfo); ++f) {
|
||||
SSqlExpr *pe = tscSqlExprGet(pQueryInfo, f);
|
||||
if (pe == pExpr) {
|
||||
pSqlFuncExpr1->colInfo.colIndex = htons(f);
|
||||
pSqlFuncExpr1->colType = htons(pe->resType);
|
||||
pSqlFuncExpr1->colBytes = htons(pe->resBytes);
|
||||
assign = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
assert(assign);
|
||||
pMsg += sizeof(SSqlFuncMsg);
|
||||
pSqlFuncExpr1 = (SSqlFuncMsg *)pMsg;
|
||||
} else {
|
||||
assert(pField->pArithExprInfo != NULL);
|
||||
SExprInfo* pExprInfo = pField->pArithExprInfo;
|
||||
|
||||
pSqlFuncExpr1->colInfo.colId = htons(pExprInfo->base.colInfo.colId);
|
||||
pSqlFuncExpr1->colInfo.colId = htons(pExprInfo->base.colInfo.colId);
|
||||
pSqlFuncExpr1->functionId = htons(pExprInfo->base.functionId);
|
||||
pSqlFuncExpr1->numOfParams = htons(pExprInfo->base.numOfParams);
|
||||
pMsg += sizeof(SSqlFuncMsg);
|
||||
|
|
|
@ -503,9 +503,19 @@ TAOS_RES *taos_consume(TAOS_SUB *tsub) {
|
|||
SSqlCmd *pCmd = &pSql->cmd;
|
||||
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
|
||||
if (taosArrayGetSize(pSub->progress) > 0) { // fix crash in single tabel subscription
|
||||
pQueryInfo->window.skey = ((SSubscriptionProgress*)taosArrayGet(pSub->progress, 0))->key;
|
||||
tscDebug("subscribe:%s set subscribe skey:%"PRId64, pSub->topic, pQueryInfo->window.skey);
|
||||
if (taosArrayGetSize(pSub->progress) > 0) { // fix crash in single table subscription
|
||||
|
||||
size_t size = taosArrayGetSize(pSub->progress);
|
||||
TSKEY s = INT64_MAX;
|
||||
for(int32_t i = 0; i < size; ++i) {
|
||||
TSKEY k = ((SSubscriptionProgress*)taosArrayGet(pSub->progress, i))->key;
|
||||
if (s > k) {
|
||||
s = k;
|
||||
}
|
||||
}
|
||||
|
||||
pQueryInfo->window.skey = s;
|
||||
tscDebug("subscribe:%s set next round subscribe skey:%"PRId64, pSub->topic, pQueryInfo->window.skey);
|
||||
}
|
||||
|
||||
if (pSub->pTimer == NULL) {
|
||||
|
|
|
@ -74,14 +74,14 @@ static bool allSubqueryDone(SSqlObj *pParentSql) {
|
|||
SSubqueryState *subState = &pParentSql->subState;
|
||||
|
||||
//lock in caller
|
||||
|
||||
tscDebug("%p total subqueries: %d", pParentSql, subState->numOfSub);
|
||||
for (int i = 0; i < subState->numOfSub; i++) {
|
||||
if (0 == subState->states[i]) {
|
||||
tscDebug("%p subquery:%p,%d is NOT finished, total:%d", pParentSql, pParentSql->pSubs[i], i, subState->numOfSub);
|
||||
tscDebug("%p subquery:%p, index: %d NOT finished, abort query completion check", pParentSql, pParentSql->pSubs[i], i);
|
||||
done = false;
|
||||
break;
|
||||
} else {
|
||||
tscDebug("%p subquery:%p,%d is finished, total:%d", pParentSql, pParentSql->pSubs[i], i, subState->numOfSub);
|
||||
tscDebug("%p subquery:%p, index: %d finished", pParentSql, pParentSql->pSubs[i], i);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -453,7 +453,7 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
|
|||
pSubQueryInfo->tsBuf = NULL;
|
||||
|
||||
// free result for async object will also free sqlObj
|
||||
assert(tscSqlExprNumOfExprs(pSubQueryInfo) == 1); // ts_comp query only requires one resutl columns
|
||||
assert(tscSqlExprNumOfExprs(pSubQueryInfo) == 1); // ts_comp query only requires one result columns
|
||||
taos_free_result(pPrevSub);
|
||||
|
||||
SSqlObj *pNew = createSubqueryObj(pSql, (int16_t) i, tscJoinQueryCallback, pSupporter, TSDB_SQL_SELECT, NULL);
|
||||
|
@ -507,6 +507,7 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
|
|||
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, 0);
|
||||
int16_t funcId = pExpr->functionId;
|
||||
|
||||
// add the invisible timestamp column
|
||||
if ((pExpr->colInfo.colId != PRIMARYKEY_TIMESTAMP_COL_INDEX) ||
|
||||
(funcId != TSDB_FUNC_TS && funcId != TSDB_FUNC_TS_DUMMY && funcId != TSDB_FUNC_PRJ)) {
|
||||
|
||||
|
@ -847,6 +848,8 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
|
|||
SSqlRes* pRes = &pSql->res;
|
||||
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
|
||||
|
||||
// todo, the type may not include TSDB_QUERY_TYPE_TAG_FILTER_QUERY
|
||||
assert(TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_TAG_FILTER_QUERY));
|
||||
|
||||
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
|
||||
|
@ -1059,7 +1062,6 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
|
|||
tscError("%p invalid ts comp file from vnode, abort subquery, file size:%d", pSql, numOfRows);
|
||||
|
||||
pParentSql->res.code = TAOS_SYSTEM_ERROR(errno);
|
||||
|
||||
if (quitAllSubquery(pSql, pParentSql, pSupporter)){
|
||||
return;
|
||||
}
|
||||
|
@ -1880,6 +1882,13 @@ void doAppendData(SInterResult* pInterResult, TAOS_ROW row, int32_t numOfCols, S
|
|||
}
|
||||
}
|
||||
|
||||
if (p && taosArrayGetSize(p) > 0) {
|
||||
SResPair *l = taosArrayGetLast(p);
|
||||
if (l->key == key && key == INT64_MIN) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
//append a new column
|
||||
if (p == NULL) {
|
||||
SStddevInterResult t = {.colId = id, .pResult = taosArrayInit(10, sizeof(SResPair)),};
|
||||
|
@ -1941,7 +1950,11 @@ void tscFirstRoundRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) {
|
|||
|
||||
// tag or group by column
|
||||
if (TSDB_COL_IS_TAG(pExpr->colInfo.flag) || pExpr->functionId == TSDB_FUNC_PRJ) {
|
||||
memcpy(p + offset, row[i], length[i]);
|
||||
if (row[i] == NULL) {
|
||||
setNull(p + offset, pExpr->resType, pExpr->resBytes);
|
||||
} else {
|
||||
memcpy(p + offset, row[i], length[i]);
|
||||
}
|
||||
offset += pExpr->resBytes;
|
||||
}
|
||||
}
|
||||
|
@ -2639,12 +2652,17 @@ static SSqlObj *tscCreateSTableSubquery(SSqlObj *pSql, SRetrieveSupport *trsuppo
|
|||
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pNew->cmd, 0);
|
||||
|
||||
pQueryInfo->type |= TSDB_QUERY_TYPE_STABLE_SUBQUERY;
|
||||
|
||||
// clear the limit/offset info, since it should not be sent to vnode to be executed.
|
||||
pQueryInfo->limit.limit = -1;
|
||||
pQueryInfo->limit.offset = 0;
|
||||
|
||||
assert(pQueryInfo->numOfTables == 1 && pNew->cmd.numOfClause == 1 && trsupport->subqueryIndex < pSql->subState.numOfSub);
|
||||
|
||||
// launch subquery for each vnode, so the subquery index equals to the vgroupIndex.
|
||||
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, table_index);
|
||||
pTableMetaInfo->vgroupIndex = trsupport->subqueryIndex;
|
||||
|
||||
|
||||
pSql->pSubs[trsupport->subqueryIndex] = pNew;
|
||||
}
|
||||
|
||||
|
@ -3098,30 +3116,6 @@ void tscBuildResFromSubqueries(SSqlObj *pSql) {
|
|||
}
|
||||
}
|
||||
|
||||
static UNUSED_FUNC void transferNcharData(SSqlObj *pSql, int32_t columnIndex, TAOS_FIELD *pField) {
|
||||
SSqlRes *pRes = &pSql->res;
|
||||
|
||||
if (pRes->tsrow[columnIndex] != NULL && pField->type == TSDB_DATA_TYPE_NCHAR) {
|
||||
// convert unicode to native code in a temporary buffer extra one byte for terminated symbol
|
||||
if (pRes->buffer[columnIndex] == NULL) {
|
||||
pRes->buffer[columnIndex] = malloc(pField->bytes + TSDB_NCHAR_SIZE);
|
||||
}
|
||||
|
||||
/* string terminated char for binary data*/
|
||||
memset(pRes->buffer[columnIndex], 0, pField->bytes + TSDB_NCHAR_SIZE);
|
||||
|
||||
int32_t length = taosUcs4ToMbs(pRes->tsrow[columnIndex], pRes->length[columnIndex], pRes->buffer[columnIndex]);
|
||||
if ( length >= 0 ) {
|
||||
pRes->tsrow[columnIndex] = (unsigned char*)pRes->buffer[columnIndex];
|
||||
pRes->length[columnIndex] = length;
|
||||
} else {
|
||||
tscError("%p charset:%s to %s. val:%s convert failed.", pSql, DEFAULT_UNICODE_ENCODEC, tsCharset, (char*)pRes->tsrow[columnIndex]);
|
||||
pRes->tsrow[columnIndex] = NULL;
|
||||
pRes->length[columnIndex] = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
char *getArithmeticInputSrc(void *param, const char *name, int32_t colId) {
|
||||
SArithmeticSupport *pSupport = (SArithmeticSupport *) param;
|
||||
|
||||
|
|
|
@ -97,6 +97,22 @@ bool tscQueryTags(SQueryInfo* pQueryInfo) {
|
|||
return true;
|
||||
}
|
||||
|
||||
bool tscQueryBlockInfo(SQueryInfo* pQueryInfo) {
|
||||
int32_t numOfCols = (int32_t) tscSqlExprNumOfExprs(pQueryInfo);
|
||||
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
|
||||
int32_t functId = pExpr->functionId;
|
||||
|
||||
// "select count(tbname)" query
|
||||
if (functId == TSDB_FUNC_BLKINFO) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool tscIsTwoStageSTableQuery(SQueryInfo* pQueryInfo, int32_t tableIndex) {
|
||||
if (pQueryInfo == NULL) {
|
||||
return false;
|
||||
|
@ -223,6 +239,21 @@ bool tscIsSecondStageQuery(SQueryInfo* pQueryInfo) {
|
|||
return false;
|
||||
}
|
||||
|
||||
bool tscGroupbyColumn(SQueryInfo* pQueryInfo) {
|
||||
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
||||
int32_t numOfCols = tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
|
||||
|
||||
SSqlGroupbyExpr* pGroupbyExpr = &pQueryInfo->groupbyExpr;
|
||||
for (int32_t k = 0; k < pGroupbyExpr->numOfGroupCols; ++k) {
|
||||
SColIndex* pIndex = taosArrayGet(pGroupbyExpr->columnInfo, k);
|
||||
if (!TSDB_COL_IS_TAG(pIndex->flag) && pIndex->colIndex < numOfCols) { // group by normal columns
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool tscIsTWAQuery(SQueryInfo* pQueryInfo) {
|
||||
size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo);
|
||||
for (int32_t i = 0; i < numOfExprs; ++i) {
|
||||
|
@ -1722,10 +1753,15 @@ void tscInitQueryInfo(SQueryInfo* pQueryInfo) {
|
|||
pQueryInfo->fieldsInfo.internalField = taosArrayInit(4, sizeof(SInternalField));
|
||||
|
||||
assert(pQueryInfo->exprList == NULL);
|
||||
pQueryInfo->exprList = taosArrayInit(4, POINTER_BYTES);
|
||||
pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES);
|
||||
pQueryInfo->udColumnId = TSDB_UD_COLUMN_INDEX;
|
||||
pQueryInfo->resColumnId= -1000;
|
||||
pQueryInfo->exprList = taosArrayInit(4, POINTER_BYTES);
|
||||
pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES);
|
||||
pQueryInfo->udColumnId = TSDB_UD_COLUMN_INDEX;
|
||||
pQueryInfo->resColumnId = -1000;
|
||||
pQueryInfo->limit.limit = -1;
|
||||
pQueryInfo->limit.offset = 0;
|
||||
|
||||
pQueryInfo->slimit.limit = -1;
|
||||
pQueryInfo->slimit.offset = 0;
|
||||
}
|
||||
|
||||
int32_t tscAddSubqueryInfo(SSqlCmd* pCmd) {
|
||||
|
|
|
@ -283,12 +283,37 @@ typedef struct {
|
|||
#define keyCol(pCols) (&((pCols)->cols[0])) // Key column
|
||||
#define dataColsTKeyAt(pCols, idx) ((TKEY *)(keyCol(pCols)->pData))[(idx)]
|
||||
#define dataColsKeyAt(pCols, idx) tdGetKey(dataColsTKeyAt(pCols, idx))
|
||||
#define dataColsTKeyFirst(pCols) (((pCols)->numOfRows == 0) ? TKEY_INVALID : dataColsTKeyAt(pCols, 0))
|
||||
#define dataColsKeyFirst(pCols) (((pCols)->numOfRows == 0) ? TSDB_DATA_TIMESTAMP_NULL : dataColsKeyAt(pCols, 0))
|
||||
#define dataColsTKeyLast(pCols) \
|
||||
(((pCols)->numOfRows == 0) ? TKEY_INVALID : dataColsTKeyAt(pCols, (pCols)->numOfRows - 1))
|
||||
#define dataColsKeyLast(pCols) \
|
||||
(((pCols)->numOfRows == 0) ? TSDB_DATA_TIMESTAMP_NULL : dataColsKeyAt(pCols, (pCols)->numOfRows - 1))
|
||||
static FORCE_INLINE TKEY dataColsTKeyFirst(SDataCols *pCols) {
|
||||
if (pCols->numOfRows) {
|
||||
return dataColsTKeyAt(pCols, 0);
|
||||
} else {
|
||||
return TKEY_INVALID;
|
||||
}
|
||||
}
|
||||
|
||||
static FORCE_INLINE TSKEY dataColsKeyFirst(SDataCols *pCols) {
|
||||
if (pCols->numOfRows) {
|
||||
return dataColsKeyAt(pCols, 0);
|
||||
} else {
|
||||
return TSDB_DATA_TIMESTAMP_NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static FORCE_INLINE TKEY dataColsTKeyLast(SDataCols *pCols) {
|
||||
if (pCols->numOfRows) {
|
||||
return dataColsTKeyAt(pCols, pCols->numOfRows - 1);
|
||||
} else {
|
||||
return TKEY_INVALID;
|
||||
}
|
||||
}
|
||||
|
||||
static FORCE_INLINE TSKEY dataColsKeyLast(SDataCols *pCols) {
|
||||
if (pCols->numOfRows) {
|
||||
return dataColsKeyAt(pCols, pCols->numOfRows - 1);
|
||||
} else {
|
||||
return TSDB_DATA_TIMESTAMP_NULL;
|
||||
}
|
||||
}
|
||||
|
||||
SDataCols *tdNewDataCols(int maxRowSize, int maxCols, int maxRows);
|
||||
void tdResetDataCols(SDataCols *pCols);
|
||||
|
|
|
@ -33,7 +33,7 @@ typedef struct SDataStatis {
|
|||
|
||||
typedef struct SColumnInfoData {
|
||||
SColumnInfo info;
|
||||
void* pData; // the corresponding block data in memory
|
||||
char* pData; // the corresponding block data in memory
|
||||
} SColumnInfoData;
|
||||
|
||||
typedef struct SResPair {
|
||||
|
|
|
@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
|
|||
|
||||
setuptools.setup(
|
||||
name="taos",
|
||||
version="2.0.6",
|
||||
version="2.0.7",
|
||||
author="Taosdata Inc.",
|
||||
author_email="support@taosdata.com",
|
||||
description="TDengine python client package",
|
||||
|
|
|
@ -22,10 +22,10 @@ def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False):
|
|||
|
||||
if num_of_rows > 0:
|
||||
return list(map(_timestamp_converter, ctypes.cast(
|
||||
data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]))
|
||||
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
|
||||
else:
|
||||
return list(map(_timestamp_converter, ctypes.cast(
|
||||
data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]))
|
||||
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
|
||||
|
||||
|
||||
def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False):
|
||||
|
@ -145,10 +145,10 @@ def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False):
|
|||
"""
|
||||
if num_of_rows > 0:
|
||||
return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
|
||||
data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]]
|
||||
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
|
||||
else:
|
||||
return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
|
||||
data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]]
|
||||
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
|
||||
|
||||
|
||||
def _crow_bigint_unsigned_to_python(
|
||||
|
@ -162,13 +162,13 @@ def _crow_bigint_unsigned_to_python(
|
|||
return [
|
||||
None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
|
||||
data, ctypes.POINTER(
|
||||
ctypes.c_ulong))[
|
||||
ctypes.c_uint64))[
|
||||
:abs(num_of_rows)]]
|
||||
else:
|
||||
return [
|
||||
None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
|
||||
data, ctypes.POINTER(
|
||||
ctypes.c_ulong))[
|
||||
ctypes.c_uint64))[
|
||||
:abs(num_of_rows)]]
|
||||
|
||||
|
||||
|
@ -600,7 +600,7 @@ class CTaosInterface(object):
|
|||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_INT):
|
||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[0]
|
||||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BIGINT):
|
||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[0]
|
||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
|
||||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_FLOAT):
|
||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[0]
|
||||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_DOUBLE):
|
||||
|
@ -608,7 +608,7 @@ class CTaosInterface(object):
|
|||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BINARY):
|
||||
# return (ctypes.cast(data, ctypes.POINTER(ctypes.c_char))[0:byte]).rstrip('\x00')
|
||||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TIMESTAMP):
|
||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[0]
|
||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
|
||||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_NCHAR):
|
||||
# return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00')
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
|
|||
|
||||
setuptools.setup(
|
||||
name="taos",
|
||||
version="2.0.5",
|
||||
version="2.0.7",
|
||||
author="Taosdata Inc.",
|
||||
author_email="support@taosdata.com",
|
||||
description="TDengine python client package",
|
||||
|
|
|
@ -22,10 +22,10 @@ def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False):
|
|||
|
||||
if num_of_rows > 0:
|
||||
return list(map(_timestamp_converter, ctypes.cast(
|
||||
data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]))
|
||||
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
|
||||
else:
|
||||
return list(map(_timestamp_converter, ctypes.cast(
|
||||
data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]))
|
||||
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
|
||||
|
||||
|
||||
def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False):
|
||||
|
@ -145,10 +145,10 @@ def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False):
|
|||
"""
|
||||
if num_of_rows > 0:
|
||||
return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
|
||||
data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]]
|
||||
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
|
||||
else:
|
||||
return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
|
||||
data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]]
|
||||
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
|
||||
|
||||
|
||||
def _crow_bigint_unsigned_to_python(
|
||||
|
@ -162,13 +162,13 @@ def _crow_bigint_unsigned_to_python(
|
|||
return [
|
||||
None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
|
||||
data, ctypes.POINTER(
|
||||
ctypes.c_ulong))[
|
||||
ctypes.c_uint64))[
|
||||
:abs(num_of_rows)]]
|
||||
else:
|
||||
return [
|
||||
None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
|
||||
data, ctypes.POINTER(
|
||||
ctypes.c_ulong))[
|
||||
ctypes.c_uint64))[
|
||||
:abs(num_of_rows)]]
|
||||
|
||||
|
||||
|
@ -600,7 +600,7 @@ class CTaosInterface(object):
|
|||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_INT):
|
||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[0]
|
||||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BIGINT):
|
||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[0]
|
||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
|
||||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_FLOAT):
|
||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[0]
|
||||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_DOUBLE):
|
||||
|
@ -608,7 +608,7 @@ class CTaosInterface(object):
|
|||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BINARY):
|
||||
# return (ctypes.cast(data, ctypes.POINTER(ctypes.c_char))[0:byte]).rstrip('\x00')
|
||||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TIMESTAMP):
|
||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[0]
|
||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
|
||||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_NCHAR):
|
||||
# return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00')
|
||||
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
from .cinterface import CTaosInterface
|
||||
from .error import *
|
||||
from .constants import FieldType
|
||||
import threading
|
||||
|
||||
# querySeqNum = 0
|
||||
|
||||
|
@ -38,7 +37,6 @@ class TDengineCursor(object):
|
|||
self._block_iter = 0
|
||||
self._affected_rows = 0
|
||||
self._logfile = ""
|
||||
self._threadId = threading.get_ident()
|
||||
|
||||
if connection is not None:
|
||||
self._connection = connection
|
||||
|
@ -105,12 +103,6 @@ class TDengineCursor(object):
|
|||
def execute(self, operation, params=None):
|
||||
"""Prepare and execute a database operation (query or command).
|
||||
"""
|
||||
# if threading.get_ident() != self._threadId:
|
||||
# info ="Cursor execute:Thread ID not match,creater:"+str(self._threadId)+" caller:"+str(threading.get_ident())
|
||||
# raise OperationalError(info)
|
||||
# print(info)
|
||||
# return None
|
||||
|
||||
if not operation:
|
||||
return None
|
||||
|
||||
|
@ -280,12 +272,6 @@ class TDengineCursor(object):
|
|||
def _handle_result(self):
|
||||
"""Handle the return result from query.
|
||||
"""
|
||||
# if threading.get_ident() != self._threadId:
|
||||
# info = "Cursor handleresult:Thread ID not match,creater:"+str(self._threadId)+" caller:"+str(threading.get_ident())
|
||||
# raise OperationalError(info)
|
||||
# print(info)
|
||||
# return None
|
||||
|
||||
self._description = []
|
||||
for ele in self._fields:
|
||||
self._description.append(
|
||||
|
|
|
@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
|
|||
|
||||
setuptools.setup(
|
||||
name="taos",
|
||||
version="2.0.5",
|
||||
version="2.0.7",
|
||||
author="Taosdata Inc.",
|
||||
author_email="support@taosdata.com",
|
||||
description="TDengine python client package",
|
||||
|
|
|
@ -22,10 +22,10 @@ def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False):
|
|||
|
||||
if num_of_rows > 0:
|
||||
return list(map(_timestamp_converter, ctypes.cast(
|
||||
data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]))
|
||||
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
|
||||
else:
|
||||
return list(map(_timestamp_converter, ctypes.cast(
|
||||
data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]))
|
||||
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
|
||||
|
||||
|
||||
def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False):
|
||||
|
@ -145,10 +145,10 @@ def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False):
|
|||
"""
|
||||
if num_of_rows > 0:
|
||||
return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
|
||||
data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]]
|
||||
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
|
||||
else:
|
||||
return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
|
||||
data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]]
|
||||
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
|
||||
|
||||
|
||||
def _crow_bigint_unsigned_to_python(
|
||||
|
@ -162,13 +162,13 @@ def _crow_bigint_unsigned_to_python(
|
|||
return [
|
||||
None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
|
||||
data, ctypes.POINTER(
|
||||
ctypes.c_ulong))[
|
||||
ctypes.c_uint64))[
|
||||
:abs(num_of_rows)]]
|
||||
else:
|
||||
return [
|
||||
None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
|
||||
data, ctypes.POINTER(
|
||||
ctypes.c_ulong))[
|
||||
ctypes.c_uint64))[
|
||||
:abs(num_of_rows)]]
|
||||
|
||||
|
||||
|
@ -600,7 +600,7 @@ class CTaosInterface(object):
|
|||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_INT):
|
||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[0]
|
||||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BIGINT):
|
||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[0]
|
||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
|
||||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_FLOAT):
|
||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[0]
|
||||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_DOUBLE):
|
||||
|
@ -608,7 +608,7 @@ class CTaosInterface(object):
|
|||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BINARY):
|
||||
# return (ctypes.cast(data, ctypes.POINTER(ctypes.c_char))[0:byte]).rstrip('\x00')
|
||||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TIMESTAMP):
|
||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[0]
|
||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
|
||||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_NCHAR):
|
||||
# return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00')
|
||||
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
from .cinterface import CTaosInterface
|
||||
from .error import *
|
||||
from .constants import FieldType
|
||||
import threading
|
||||
|
||||
# querySeqNum = 0
|
||||
|
||||
|
@ -38,7 +37,6 @@ class TDengineCursor(object):
|
|||
self._block_iter = 0
|
||||
self._affected_rows = 0
|
||||
self._logfile = ""
|
||||
self._threadId = threading.get_ident()
|
||||
|
||||
if connection is not None:
|
||||
self._connection = connection
|
||||
|
@ -105,12 +103,6 @@ class TDengineCursor(object):
|
|||
def execute(self, operation, params=None):
|
||||
"""Prepare and execute a database operation (query or command).
|
||||
"""
|
||||
# if threading.get_ident() != self._threadId:
|
||||
# info ="Cursor execute:Thread ID not match,creater:"+str(self._threadId)+" caller:"+str(threading.get_ident())
|
||||
# raise OperationalError(info)
|
||||
# print(info)
|
||||
# return None
|
||||
|
||||
if not operation:
|
||||
return None
|
||||
|
||||
|
@ -280,12 +272,6 @@ class TDengineCursor(object):
|
|||
def _handle_result(self):
|
||||
"""Handle the return result from query.
|
||||
"""
|
||||
# if threading.get_ident() != self._threadId:
|
||||
# info = "Cursor handleresult:Thread ID not match,creater:"+str(self._threadId)+" caller:"+str(threading.get_ident())
|
||||
# raise OperationalError(info)
|
||||
# print(info)
|
||||
# return None
|
||||
|
||||
self._description = []
|
||||
for ele in self._fields:
|
||||
self._description.append(
|
||||
|
|
|
@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
|
|||
|
||||
setuptools.setup(
|
||||
name="taos",
|
||||
version="2.0.4",
|
||||
version="2.0.7",
|
||||
author="Taosdata Inc.",
|
||||
author_email="support@taosdata.com",
|
||||
description="TDengine python client package",
|
||||
|
|
|
@ -22,10 +22,10 @@ def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False):
|
|||
|
||||
if num_of_rows > 0:
|
||||
return list(map(_timestamp_converter, ctypes.cast(
|
||||
data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]))
|
||||
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
|
||||
else:
|
||||
return list(map(_timestamp_converter, ctypes.cast(
|
||||
data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]))
|
||||
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
|
||||
|
||||
|
||||
def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False):
|
||||
|
@ -145,10 +145,10 @@ def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False):
|
|||
"""
|
||||
if num_of_rows > 0:
|
||||
return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
|
||||
data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]]
|
||||
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
|
||||
else:
|
||||
return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
|
||||
data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]]
|
||||
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
|
||||
|
||||
|
||||
def _crow_bigint_unsigned_to_python(
|
||||
|
@ -162,13 +162,13 @@ def _crow_bigint_unsigned_to_python(
|
|||
return [
|
||||
None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
|
||||
data, ctypes.POINTER(
|
||||
ctypes.c_ulong))[
|
||||
ctypes.c_uint64))[
|
||||
:abs(num_of_rows)]]
|
||||
else:
|
||||
return [
|
||||
None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
|
||||
data, ctypes.POINTER(
|
||||
ctypes.c_ulong))[
|
||||
ctypes.c_uint64))[
|
||||
:abs(num_of_rows)]]
|
||||
|
||||
|
||||
|
@ -600,7 +600,7 @@ class CTaosInterface(object):
|
|||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_INT):
|
||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[0]
|
||||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BIGINT):
|
||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[0]
|
||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
|
||||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_FLOAT):
|
||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[0]
|
||||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_DOUBLE):
|
||||
|
@ -608,7 +608,7 @@ class CTaosInterface(object):
|
|||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BINARY):
|
||||
# return (ctypes.cast(data, ctypes.POINTER(ctypes.c_char))[0:byte]).rstrip('\x00')
|
||||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TIMESTAMP):
|
||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[0]
|
||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
|
||||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_NCHAR):
|
||||
# return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00')
|
||||
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
from .cinterface import CTaosInterface
|
||||
from .error import *
|
||||
from .constants import FieldType
|
||||
import threading
|
||||
|
||||
# querySeqNum = 0
|
||||
|
||||
|
@ -38,7 +37,6 @@ class TDengineCursor(object):
|
|||
self._block_iter = 0
|
||||
self._affected_rows = 0
|
||||
self._logfile = ""
|
||||
self._threadId = threading.get_ident()
|
||||
|
||||
if connection is not None:
|
||||
self._connection = connection
|
||||
|
|
|
@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
|
|||
|
||||
setuptools.setup(
|
||||
name="taos",
|
||||
version="2.0.4",
|
||||
version="2.0.7",
|
||||
author="Taosdata Inc.",
|
||||
author_email="support@taosdata.com",
|
||||
description="TDengine python client package",
|
||||
|
|
|
@ -22,10 +22,10 @@ def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False):
|
|||
|
||||
if num_of_rows > 0:
|
||||
return list(map(_timestamp_converter, ctypes.cast(
|
||||
data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]))
|
||||
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
|
||||
else:
|
||||
return list(map(_timestamp_converter, ctypes.cast(
|
||||
data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]))
|
||||
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
|
||||
|
||||
|
||||
def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False):
|
||||
|
@ -145,10 +145,10 @@ def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False):
|
|||
"""
|
||||
if num_of_rows > 0:
|
||||
return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
|
||||
data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]]
|
||||
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
|
||||
else:
|
||||
return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
|
||||
data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]]
|
||||
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
|
||||
|
||||
|
||||
def _crow_bigint_unsigned_to_python(
|
||||
|
@ -162,13 +162,13 @@ def _crow_bigint_unsigned_to_python(
|
|||
return [
|
||||
None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
|
||||
data, ctypes.POINTER(
|
||||
ctypes.c_ulong))[
|
||||
ctypes.c_uint64))[
|
||||
:abs(num_of_rows)]]
|
||||
else:
|
||||
return [
|
||||
None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
|
||||
data, ctypes.POINTER(
|
||||
ctypes.c_ulong))[
|
||||
ctypes.c_uint64))[
|
||||
:abs(num_of_rows)]]
|
||||
|
||||
|
||||
|
@ -600,7 +600,7 @@ class CTaosInterface(object):
|
|||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_INT):
|
||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[0]
|
||||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BIGINT):
|
||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[0]
|
||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
|
||||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_FLOAT):
|
||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[0]
|
||||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_DOUBLE):
|
||||
|
@ -608,7 +608,7 @@ class CTaosInterface(object):
|
|||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BINARY):
|
||||
# return (ctypes.cast(data, ctypes.POINTER(ctypes.c_char))[0:byte]).rstrip('\x00')
|
||||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TIMESTAMP):
|
||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[0]
|
||||
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
|
||||
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_NCHAR):
|
||||
# return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00')
|
||||
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
from .cinterface import CTaosInterface
|
||||
from .error import *
|
||||
from .constants import FieldType
|
||||
import threading
|
||||
|
||||
# querySeqNum = 0
|
||||
|
||||
|
@ -38,7 +37,6 @@ class TDengineCursor(object):
|
|||
self._block_iter = 0
|
||||
self._affected_rows = 0
|
||||
self._logfile = ""
|
||||
self._threadId = threading.get_ident()
|
||||
|
||||
if connection is not None:
|
||||
self._connection = connection
|
||||
|
|
|
@ -244,6 +244,7 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_TDB_IVD_CREATE_TABLE_INFO TAOS_DEF_ERROR_CODE(0, 0x0612) //"Invalid information to create table")
|
||||
#define TSDB_CODE_TDB_NO_AVAIL_DISK TAOS_DEF_ERROR_CODE(0, 0x0613) //"No available disk")
|
||||
#define TSDB_CODE_TDB_MESSED_MSG TAOS_DEF_ERROR_CODE(0, 0x0614) //"TSDB messed message")
|
||||
#define TSDB_CODE_TDB_IVLD_TAG_VAL TAOS_DEF_ERROR_CODE(0, 0x0615) //"TSDB invalid tag value")
|
||||
|
||||
// query
|
||||
#define TSDB_CODE_QRY_INVALID_QHANDLE TAOS_DEF_ERROR_CODE(0, 0x0700) //"Invalid handle")
|
||||
|
@ -258,7 +259,7 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_QRY_IN_EXEC TAOS_DEF_ERROR_CODE(0, 0x0709) //"Multiple retrieval of this query")
|
||||
#define TSDB_CODE_QRY_TOO_MANY_TIMEWINDOW TAOS_DEF_ERROR_CODE(0, 0x070A) //"Too many time window in query")
|
||||
#define TSDB_CODE_QRY_NOT_ENOUGH_BUFFER TAOS_DEF_ERROR_CODE(0, 0x070B) //"Query buffer limit has reached")
|
||||
#define TSDB_CODE_QRY_INCONSISTAN TAOS_DEF_ERROR_CODE(0, 0x070C) //"File inconsistance in replica")
|
||||
#define TSDB_CODE_QRY_INCONSISTAN TAOS_DEF_ERROR_CODE(0, 0x070C) //"File inconsistency in replica")
|
||||
|
||||
|
||||
// grant
|
||||
|
|
|
@ -397,7 +397,7 @@ typedef struct SColIndex {
|
|||
int16_t colId; // column id
|
||||
int16_t colIndex; // column index in colList if it is a normal column or index in tagColList if a tag
|
||||
uint16_t flag; // denote if it is a tag or a normal column
|
||||
char name[TSDB_COL_NAME_LEN];
|
||||
char name[TSDB_COL_NAME_LEN]; // TODO remove it
|
||||
} SColIndex;
|
||||
|
||||
/* sql function msg, to describe the message to vnode about sql function
|
||||
|
@ -405,7 +405,10 @@ typedef struct SColIndex {
|
|||
typedef struct SSqlFuncMsg {
|
||||
int16_t functionId;
|
||||
int16_t numOfParams;
|
||||
|
||||
int16_t resColId; // result column id, id of the current output column
|
||||
int16_t colType;
|
||||
int16_t colBytes;
|
||||
|
||||
SColIndex colInfo;
|
||||
struct ArgElem {
|
||||
|
@ -485,12 +488,13 @@ typedef struct {
|
|||
int16_t orderColId;
|
||||
int16_t numOfCols; // the number of columns will be load from vnode
|
||||
SInterval interval;
|
||||
SSessionWindow sw; // session window
|
||||
uint16_t tagCondLen; // tag length in current query
|
||||
uint32_t tbnameCondLen; // table name filter condition string length
|
||||
int16_t numOfGroupCols; // num of group by columns
|
||||
int16_t orderByIdx;
|
||||
int16_t orderType; // used in group by xx order by xxx
|
||||
int64_t vgroupLimit; // limit the number of rows for each table, used in order by + limit in stable projection query.
|
||||
int64_t vgroupLimit; // limit the number of rows for each table, used in order by + limit in stable projection query.
|
||||
int16_t prjOrder; // global order in super table projection query.
|
||||
int64_t limit;
|
||||
int64_t offset;
|
||||
|
@ -640,6 +644,7 @@ typedef struct {
|
|||
int32_t maxtablesPerVnode;
|
||||
int32_t maxVgroupsPerDb;
|
||||
char arbitrator[TSDB_EP_LEN]; // tsArbitrator
|
||||
char reserve[2]; // to solve arm32 bus error
|
||||
char timezone[64]; // tsTimezone
|
||||
int64_t checkTime; // 1970-01-01 00:00:00.000
|
||||
char locale[TSDB_LOCALE_LEN]; // tsLocale
|
||||
|
|
|
@ -158,13 +158,18 @@ int32_t tsdbInsertData(STsdbRepo *repo, SSubmitMsg *pMsg, SShellSubmitRspMsg *pR
|
|||
|
||||
typedef void *TsdbQueryHandleT; // Use void to hide implementation details
|
||||
|
||||
// query condition to build vnode iterator
|
||||
#define BLOCK_LOAD_OFFSET_SEQ_ORDER 1
|
||||
#define BLOCK_LOAD_TABLE_SEQ_ORDER 2
|
||||
#define BLOCK_LOAD_TABLE_RR_ORDER 3
|
||||
|
||||
// query condition to build multi-table data block iterator
|
||||
typedef struct STsdbQueryCond {
|
||||
STimeWindow twindow;
|
||||
int32_t order; // desc|asc order to iterate the data block
|
||||
int32_t numOfCols;
|
||||
SColumnInfo *colList;
|
||||
bool loadExternalRows; // load external rows or not
|
||||
int32_t type; // data block load type:
|
||||
} STsdbQueryCond;
|
||||
|
||||
typedef struct SMemRef {
|
||||
|
@ -181,17 +186,31 @@ typedef struct SDataBlockInfo {
|
|||
int32_t tid;
|
||||
} SDataBlockInfo;
|
||||
|
||||
typedef struct SFileBlockInfo {
|
||||
int32_t numOfRows;
|
||||
} SFileBlockInfo;
|
||||
|
||||
typedef struct {
|
||||
void *pTable;
|
||||
TSKEY lastKey;
|
||||
} STableKeyInfo;
|
||||
|
||||
typedef struct {
|
||||
size_t numOfTables;
|
||||
uint32_t numOfTables;
|
||||
SArray * pGroupList;
|
||||
SHashObj *map; // speedup acquire the tableQueryInfo by table uid
|
||||
} STableGroupInfo;
|
||||
|
||||
typedef struct {
|
||||
uint16_t rowSize;
|
||||
uint16_t numOfFiles;
|
||||
uint32_t numOfTables;
|
||||
uint64_t totalSize;
|
||||
int32_t firstSeekTimeUs;
|
||||
uint32_t numOfRowsInMemTable;
|
||||
SArray *dataBlockInfos;
|
||||
} STableBlockDist;
|
||||
|
||||
/**
|
||||
* Get the data block iterator, starting from position according to the query condition
|
||||
*
|
||||
|
@ -252,16 +271,7 @@ int64_t tsdbGetNumOfRowsInMemTable(TsdbQueryHandleT* pHandle);
|
|||
* @param pQueryHandle
|
||||
* @return
|
||||
*/
|
||||
bool tsdbNextDataBlock(TsdbQueryHandleT *pQueryHandle);
|
||||
/**
|
||||
* move to next block if exists but not merge data in memtable
|
||||
*
|
||||
* @param pQueryHandle
|
||||
* @return
|
||||
*/
|
||||
bool tsdbNextDataBlockWithoutMerge(TsdbQueryHandleT *pQueryHandle);
|
||||
|
||||
SArray* tsdbGetExternalRow(TsdbQueryHandleT *pHandle, SMemRef* pMemRef, int16_t type);
|
||||
bool tsdbNextDataBlock(TsdbQueryHandleT pQueryHandle);
|
||||
|
||||
/**
|
||||
* Get current data block information
|
||||
|
@ -306,7 +316,7 @@ int32_t tsdbQuerySTableByTagCond(STsdbRepo *tsdb, uint64_t uid, TSKEY key, const
|
|||
SColIndex *pColIndex, int32_t numOfCols);
|
||||
|
||||
/**
|
||||
* destory the created table group list, which is generated by tag query
|
||||
* destroy the created table group list, which is generated by tag query
|
||||
* @param pGroupList
|
||||
*/
|
||||
void tsdbDestroyTableGroup(STableGroupInfo *pGroupList);
|
||||
|
@ -336,6 +346,12 @@ int32_t tsdbGetTableGroupFromIdList(STsdbRepo *tsdb, SArray *pTableIdList, STabl
|
|||
*/
|
||||
void tsdbCleanupQueryHandle(TsdbQueryHandleT queryHandle);
|
||||
|
||||
void tsdbResetQueryHandle(TsdbQueryHandleT queryHandle, STsdbQueryCond *pCond);
|
||||
|
||||
void tsdbResetQueryHandleForNewTable(TsdbQueryHandleT queryHandle, STsdbQueryCond *pCond, STableGroupInfo* groupList);
|
||||
|
||||
int32_t tsdbGetFileBlocksDistInfo(TsdbQueryHandleT* queryHandle, STableBlockDist* pTableBlockInfo);
|
||||
|
||||
/**
|
||||
* get the statistics of repo usage
|
||||
* @param repo. point to the tsdbrepo
|
||||
|
|
|
@ -138,100 +138,76 @@
|
|||
#define TK_FROM 119
|
||||
#define TK_VARIABLE 120
|
||||
#define TK_INTERVAL 121
|
||||
#define TK_FILL 122
|
||||
#define TK_SLIDING 123
|
||||
#define TK_ORDER 124
|
||||
#define TK_BY 125
|
||||
#define TK_ASC 126
|
||||
#define TK_DESC 127
|
||||
#define TK_GROUP 128
|
||||
#define TK_HAVING 129
|
||||
#define TK_LIMIT 130
|
||||
#define TK_OFFSET 131
|
||||
#define TK_SLIMIT 132
|
||||
#define TK_SOFFSET 133
|
||||
#define TK_WHERE 134
|
||||
#define TK_NOW 135
|
||||
#define TK_RESET 136
|
||||
#define TK_QUERY 137
|
||||
#define TK_ADD 138
|
||||
#define TK_COLUMN 139
|
||||
#define TK_TAG 140
|
||||
#define TK_CHANGE 141
|
||||
#define TK_SET 142
|
||||
#define TK_KILL 143
|
||||
#define TK_CONNECTION 144
|
||||
#define TK_STREAM 145
|
||||
#define TK_COLON 146
|
||||
#define TK_ABORT 147
|
||||
#define TK_AFTER 148
|
||||
#define TK_ATTACH 149
|
||||
#define TK_BEFORE 150
|
||||
#define TK_BEGIN 151
|
||||
#define TK_CASCADE 152
|
||||
#define TK_CLUSTER 153
|
||||
#define TK_CONFLICT 154
|
||||
#define TK_COPY 155
|
||||
#define TK_DEFERRED 156
|
||||
#define TK_DELIMITERS 157
|
||||
#define TK_DETACH 158
|
||||
#define TK_EACH 159
|
||||
#define TK_END 160
|
||||
#define TK_EXPLAIN 161
|
||||
#define TK_FAIL 162
|
||||
#define TK_FOR 163
|
||||
#define TK_IGNORE 164
|
||||
#define TK_IMMEDIATE 165
|
||||
#define TK_INITIALLY 166
|
||||
#define TK_INSTEAD 167
|
||||
#define TK_MATCH 168
|
||||
#define TK_KEY 169
|
||||
#define TK_OF 170
|
||||
#define TK_RAISE 171
|
||||
#define TK_REPLACE 172
|
||||
#define TK_RESTRICT 173
|
||||
#define TK_ROW 174
|
||||
#define TK_STATEMENT 175
|
||||
#define TK_TRIGGER 176
|
||||
#define TK_VIEW 177
|
||||
#define TK_COUNT 178
|
||||
#define TK_SUM 179
|
||||
#define TK_AVG 180
|
||||
#define TK_MIN 181
|
||||
#define TK_MAX 182
|
||||
#define TK_FIRST 183
|
||||
#define TK_LAST 184
|
||||
#define TK_TOP 185
|
||||
#define TK_BOTTOM 186
|
||||
#define TK_STDDEV 187
|
||||
#define TK_PERCENTILE 188
|
||||
#define TK_APERCENTILE 189
|
||||
#define TK_LEASTSQUARES 190
|
||||
#define TK_HISTOGRAM 191
|
||||
#define TK_DIFF 192
|
||||
#define TK_SPREAD 193
|
||||
#define TK_TWA 194
|
||||
#define TK_INTERP 195
|
||||
#define TK_LAST_ROW 196
|
||||
#define TK_RATE 197
|
||||
#define TK_IRATE 198
|
||||
#define TK_SUM_RATE 199
|
||||
#define TK_SUM_IRATE 200
|
||||
#define TK_AVG_RATE 201
|
||||
#define TK_AVG_IRATE 202
|
||||
#define TK_TBID 203
|
||||
#define TK_SEMI 204
|
||||
#define TK_NONE 205
|
||||
#define TK_PREV 206
|
||||
#define TK_LINEAR 207
|
||||
#define TK_IMPORT 208
|
||||
#define TK_METRIC 209
|
||||
#define TK_TBNAME 210
|
||||
#define TK_JOIN 211
|
||||
#define TK_METRICS 212
|
||||
#define TK_INSERT 213
|
||||
#define TK_INTO 214
|
||||
#define TK_VALUES 215
|
||||
#define TK_SESSION 122
|
||||
#define TK_FILL 123
|
||||
#define TK_SLIDING 124
|
||||
#define TK_ORDER 125
|
||||
#define TK_BY 126
|
||||
#define TK_ASC 127
|
||||
#define TK_DESC 128
|
||||
#define TK_GROUP 129
|
||||
#define TK_HAVING 130
|
||||
#define TK_LIMIT 131
|
||||
#define TK_OFFSET 132
|
||||
#define TK_SLIMIT 133
|
||||
#define TK_SOFFSET 134
|
||||
#define TK_WHERE 135
|
||||
#define TK_NOW 136
|
||||
#define TK_RESET 137
|
||||
#define TK_QUERY 138
|
||||
#define TK_ADD 139
|
||||
#define TK_COLUMN 140
|
||||
#define TK_TAG 141
|
||||
#define TK_CHANGE 142
|
||||
#define TK_SET 143
|
||||
#define TK_KILL 144
|
||||
#define TK_CONNECTION 145
|
||||
#define TK_STREAM 146
|
||||
#define TK_COLON 147
|
||||
#define TK_ABORT 148
|
||||
#define TK_AFTER 149
|
||||
#define TK_ATTACH 150
|
||||
#define TK_BEFORE 151
|
||||
#define TK_BEGIN 152
|
||||
#define TK_CASCADE 153
|
||||
#define TK_CLUSTER 154
|
||||
#define TK_CONFLICT 155
|
||||
#define TK_COPY 156
|
||||
#define TK_DEFERRED 157
|
||||
#define TK_DELIMITERS 158
|
||||
#define TK_DETACH 159
|
||||
#define TK_EACH 160
|
||||
#define TK_END 161
|
||||
#define TK_EXPLAIN 162
|
||||
#define TK_FAIL 163
|
||||
#define TK_FOR 164
|
||||
#define TK_IGNORE 165
|
||||
#define TK_IMMEDIATE 166
|
||||
#define TK_INITIALLY 167
|
||||
#define TK_INSTEAD 168
|
||||
#define TK_MATCH 169
|
||||
#define TK_KEY 170
|
||||
#define TK_OF 171
|
||||
#define TK_RAISE 172
|
||||
#define TK_REPLACE 173
|
||||
#define TK_RESTRICT 174
|
||||
#define TK_ROW 175
|
||||
#define TK_STATEMENT 176
|
||||
#define TK_TRIGGER 177
|
||||
#define TK_VIEW 178
|
||||
#define TK_SEMI 179
|
||||
#define TK_NONE 180
|
||||
#define TK_PREV 181
|
||||
#define TK_LINEAR 182
|
||||
#define TK_IMPORT 183
|
||||
#define TK_METRIC 184
|
||||
#define TK_TBNAME 185
|
||||
#define TK_JOIN 186
|
||||
#define TK_METRICS 187
|
||||
#define TK_INSERT 188
|
||||
#define TK_INTO 189
|
||||
#define TK_VALUES 190
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -171,10 +171,10 @@ extern tDataTypeDescriptor tDataTypes[15];
|
|||
|
||||
bool isValidDataType(int32_t type);
|
||||
|
||||
void setVardataNull(char* val, int32_t type);
|
||||
void setNull(char *val, int32_t type, int32_t bytes);
|
||||
void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems);
|
||||
void* getNullValue(int32_t type);
|
||||
void setVardataNull(char* val, int32_t type);
|
||||
void setNull(char *val, int32_t type, int32_t bytes);
|
||||
void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems);
|
||||
void *getNullValue(int32_t type);
|
||||
|
||||
void assignVal(char *val, const char *src, int32_t len, int32_t type);
|
||||
void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size, void* buf);
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
"confirm_parameter_prompt": "no",
|
||||
"insert_interval": 0,
|
||||
"num_of_records_per_req": 100,
|
||||
"max_sql_len": 1024000,
|
||||
"databases": [{
|
||||
"dbinfo": {
|
||||
"name": "db",
|
||||
|
@ -38,7 +39,9 @@
|
|||
"auto_create_table": "no",
|
||||
"data_source": "rand",
|
||||
"insert_mode": "taosc",
|
||||
"insert_rows": 100000,
|
||||
"childtable_limit": 33,
|
||||
"childtable_offset": 33,
|
||||
"insert_rows": 1000,
|
||||
"multi_thread_write_one_tbl": "no",
|
||||
"number_of_tbl_in_one_sql": 0,
|
||||
"rows_per_tbl": 100,
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -72,6 +72,11 @@ typedef struct SInterval {
|
|||
int64_t offset;
|
||||
} SInterval;
|
||||
|
||||
typedef struct SSessionWindow {
|
||||
int64_t gap; // gap between two session window(in microseconds)
|
||||
int32_t primaryColId; // primary timestamp column
|
||||
} SSessionWindow;
|
||||
|
||||
int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision);
|
||||
int64_t taosTimeTruncate(int64_t t, const SInterval* pInterval, int32_t precision);
|
||||
int32_t taosTimeCountInterval(int64_t skey, int64_t ekey, int64_t interval, char unit, int32_t precision);
|
||||
|
|
|
@ -63,9 +63,11 @@ void httpJsonString(JsonBuf* buf, char* sVal, int32_t len);
|
|||
void httpJsonOriginString(JsonBuf* buf, char* sVal, int32_t len);
|
||||
void httpJsonStringForTransMean(JsonBuf* buf, char* SVal, int32_t maxLen);
|
||||
void httpJsonInt64(JsonBuf* buf, int64_t num);
|
||||
void httpJsonUInt64(JsonBuf* buf, uint64_t num);
|
||||
void httpJsonTimestamp(JsonBuf* buf, int64_t t, bool us);
|
||||
void httpJsonUtcTimestamp(JsonBuf* buf, int64_t t, bool us);
|
||||
void httpJsonInt(JsonBuf* buf, int32_t num);
|
||||
void httpJsonUInt(JsonBuf* buf, uint32_t num);
|
||||
void httpJsonFloat(JsonBuf* buf, float num);
|
||||
void httpJsonDouble(JsonBuf* buf, double num);
|
||||
void httpJsonNull(JsonBuf* buf);
|
||||
|
|
|
@ -256,6 +256,12 @@ void httpJsonInt64(JsonBuf* buf, int64_t num) {
|
|||
buf->lst += snprintf(buf->lst, MAX_NUM_STR_SZ, "%" PRId64, num);
|
||||
}
|
||||
|
||||
void httpJsonUInt64(JsonBuf* buf, uint64_t num) {
|
||||
httpJsonItemToken(buf);
|
||||
httpJsonTestBuf(buf, MAX_NUM_STR_SZ);
|
||||
buf->lst += snprintf(buf->lst, MAX_NUM_STR_SZ, "%" PRIu64, num);
|
||||
}
|
||||
|
||||
void httpJsonTimestamp(JsonBuf* buf, int64_t t, bool us) {
|
||||
char ts[35] = {0};
|
||||
struct tm* ptm;
|
||||
|
@ -303,6 +309,12 @@ void httpJsonInt(JsonBuf* buf, int32_t num) {
|
|||
buf->lst += snprintf(buf->lst, MAX_NUM_STR_SZ, "%d", num);
|
||||
}
|
||||
|
||||
void httpJsonUInt(JsonBuf* buf, uint32_t num) {
|
||||
httpJsonItemToken(buf);
|
||||
httpJsonTestBuf(buf, MAX_NUM_STR_SZ);
|
||||
buf->lst += snprintf(buf->lst, MAX_NUM_STR_SZ, "%u", num);
|
||||
}
|
||||
|
||||
void httpJsonFloat(JsonBuf* buf, float num) {
|
||||
httpJsonItemToken(buf);
|
||||
httpJsonTestBuf(buf, MAX_NUM_STR_SZ);
|
||||
|
|
|
@ -162,6 +162,18 @@ bool restBuildSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result,
|
|||
case TSDB_DATA_TYPE_BIGINT:
|
||||
httpJsonInt64(jsonBuf, *((int64_t *)row[i]));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_UTINYINT:
|
||||
httpJsonUInt(jsonBuf, *((uint8_t *)row[i]));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_USMALLINT:
|
||||
httpJsonUInt(jsonBuf, *((uint16_t *)row[i]));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_UINT:
|
||||
httpJsonUInt(jsonBuf, *((uint32_t *)row[i]));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_UBIGINT:
|
||||
httpJsonUInt64(jsonBuf, *((uint64_t *)row[i]));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_FLOAT:
|
||||
httpJsonFloat(jsonBuf, GET_FLOAT_VAL(row[i]));
|
||||
break;
|
||||
|
|
|
@ -26,6 +26,7 @@ extern "C" {
|
|||
#include "taosdef.h"
|
||||
#include "trpc.h"
|
||||
#include "tvariant.h"
|
||||
#include "tsdb.h"
|
||||
|
||||
#define TSDB_FUNC_INVALID_ID -1
|
||||
#define TSDB_FUNC_COUNT 0
|
||||
|
@ -70,15 +71,17 @@ extern "C" {
|
|||
#define TSDB_FUNC_AVG_IRATE 34
|
||||
|
||||
#define TSDB_FUNC_TID_TAG 35
|
||||
#define TSDB_FUNC_HISTOGRAM 36
|
||||
#define TSDB_FUNC_HLL 37
|
||||
#define TSDB_FUNC_MODE 38
|
||||
#define TSDB_FUNC_SAMPLE 39
|
||||
#define TSDB_FUNC_CEIL 40
|
||||
#define TSDB_FUNC_FLOOR 41
|
||||
#define TSDB_FUNC_ROUND 42
|
||||
#define TSDB_FUNC_MAVG 43
|
||||
#define TSDB_FUNC_CSUM 44
|
||||
#define TSDB_FUNC_BLKINFO 36
|
||||
|
||||
#define TSDB_FUNC_HISTOGRAM 37
|
||||
#define TSDB_FUNC_HLL 38
|
||||
#define TSDB_FUNC_MODE 39
|
||||
#define TSDB_FUNC_SAMPLE 40
|
||||
#define TSDB_FUNC_CEIL 41
|
||||
#define TSDB_FUNC_FLOOR 42
|
||||
#define TSDB_FUNC_ROUND 43
|
||||
#define TSDB_FUNC_MAVG 44
|
||||
#define TSDB_FUNC_CSUM 45
|
||||
|
||||
|
||||
#define TSDB_FUNCSTATE_SO 0x1u // single output
|
||||
|
@ -214,13 +217,14 @@ typedef struct SAggFunctionInfo {
|
|||
void (*xFinalize)(SQLFunctionCtx *pCtx);
|
||||
void (*mergeFunc)(SQLFunctionCtx *pCtx);
|
||||
|
||||
int32_t (*dataReqFunc)(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId);
|
||||
int32_t (*dataReqFunc)(SQLFunctionCtx *pCtx, STimeWindow* w, int32_t colId);
|
||||
} SAggFunctionInfo;
|
||||
|
||||
#define GET_RES_INFO(ctx) ((ctx)->resultInfo)
|
||||
|
||||
int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionId, int32_t param, int16_t *type,
|
||||
int16_t *len, int32_t *interBytes, int16_t extLength, bool isSuperTable);
|
||||
int32_t isValidFunction(const char* name, int32_t len);
|
||||
|
||||
#define IS_STREAM_QUERY_VALID(x) (((x)&TSDB_FUNCSTATE_STREAM) != 0)
|
||||
#define IS_MULTIOUTPUT(x) (((x)&TSDB_FUNCSTATE_MO) != 0)
|
||||
|
@ -242,12 +246,16 @@ typedef struct STwaInfo {
|
|||
STimeWindow win;
|
||||
} STwaInfo;
|
||||
|
||||
struct SBufferWriter;
|
||||
void blockDistInfoToBinary(STableBlockDist* pDist, struct SBufferWriter* bw);
|
||||
void blockDistInfoFromBinary(const char* data, int32_t len, STableBlockDist* pDist);
|
||||
|
||||
/* global sql function array */
|
||||
extern struct SAggFunctionInfo aAggs[];
|
||||
|
||||
extern int32_t functionCompatList[]; // compatible check array list
|
||||
|
||||
bool topbot_datablock_filter(SQLFunctionCtx *pCtx, int32_t functionId, const char *minval, const char *maxval);
|
||||
bool topbot_datablock_filter(SQLFunctionCtx *pCtx, const char *minval, const char *maxval);
|
||||
|
||||
/**
|
||||
* the numOfRes should be kept, since it may be used later
|
||||
|
@ -258,14 +266,14 @@ bool topbot_datablock_filter(SQLFunctionCtx *pCtx, int32_t functionId, const cha
|
|||
(_r)->initialized = false; \
|
||||
} while (0)
|
||||
|
||||
static FORCE_INLINE void initResultInfo(SResultRowCellInfo *pResInfo, uint32_t bufLen) {
|
||||
static FORCE_INLINE void initResultInfo(SResultRowCellInfo *pResInfo, int32_t bufLen) {
|
||||
pResInfo->initialized = true; // the this struct has been initialized flag
|
||||
|
||||
pResInfo->complete = false;
|
||||
pResInfo->hasResult = false;
|
||||
pResInfo->numOfRes = 0;
|
||||
|
||||
memset(GET_ROWCELL_INTERBUF(pResInfo), 0, (size_t)bufLen);
|
||||
memset(GET_ROWCELL_INTERBUF(pResInfo), 0, bufLen);
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -12,8 +12,8 @@
|
|||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#ifndef TDENGINE_QUERYEXECUTOR_H
|
||||
#define TDENGINE_QUERYEXECUTOR_H
|
||||
#ifndef TDENGINE_QEXECUTOR_H
|
||||
#define TDENGINE_QEXECUTOR_H
|
||||
|
||||
#include "os.h"
|
||||
|
||||
|
@ -37,30 +37,24 @@ typedef int32_t (*__block_search_fn_t)(char* data, int32_t num, int64_t key, int
|
|||
#define Q_STATUS_EQUAL(p, s) (((p) & (s)) != 0u)
|
||||
#define QUERY_IS_ASC_QUERY(q) (GET_FORWARD_DIRECTION_FACTOR((q)->order.order) == QUERY_ASC_FORWARD_STEP)
|
||||
|
||||
#define SET_STABLE_QUERY_OVER(_q) ((_q)->tableIndex = (int32_t)((_q)->tableqinfoGroupInfo.numOfTables))
|
||||
#define IS_STASBLE_QUERY_OVER(_q) ((_q)->tableIndex >= (int32_t)((_q)->tableqinfoGroupInfo.numOfTables))
|
||||
|
||||
#define GET_TABLEGROUP(q, _index) ((SArray*) taosArrayGetP((q)->tableqinfoGroupInfo.pGroupList, (_index)))
|
||||
|
||||
#define GET_NUM_OF_RESULTS(_r) (((_r)->outputBuf) == NULL? 0:((_r)->outputBuf)->info.rows)
|
||||
|
||||
enum {
|
||||
// when query starts to execute, this status will set
|
||||
QUERY_NOT_COMPLETED = 0x1u,
|
||||
|
||||
/* result output buffer is full, current query is paused.
|
||||
* this status is only exist in group-by clause and diff/add/division/multiply/ query.
|
||||
*/
|
||||
QUERY_RESBUF_FULL = 0x2u,
|
||||
|
||||
/* query is over
|
||||
* 1. this status is used in one row result query process, e.g., count/sum/first/last/ avg...etc.
|
||||
* 2. when all data within queried time window, it is also denoted as query_completed
|
||||
*/
|
||||
QUERY_COMPLETED = 0x4u,
|
||||
QUERY_COMPLETED = 0x2u,
|
||||
|
||||
/* when the result is not completed return to client, this status will be
|
||||
* usually used in case of interval query with interpolation option
|
||||
*/
|
||||
QUERY_OVER = 0x8u,
|
||||
QUERY_OVER = 0x4u,
|
||||
};
|
||||
|
||||
typedef struct SResultRowPool {
|
||||
|
@ -86,13 +80,13 @@ typedef struct SSqlGroupbyExpr {
|
|||
|
||||
typedef struct SResultRow {
|
||||
int32_t pageId; // pageId & rowId is the position of current result in disk-based output buffer
|
||||
int32_t rowId:29; // row index in buffer page
|
||||
int32_t offset:29; // row index in buffer page
|
||||
bool startInterp; // the time window start timestamp has done the interpolation already.
|
||||
bool endInterp; // the time window end timestamp has done the interpolation already.
|
||||
bool closed; // this result status: closed or opened
|
||||
uint32_t numOfRows; // number of rows of current time window
|
||||
SResultRowCellInfo* pCellInfo; // For each result column, there is a resultInfo
|
||||
union {STimeWindow win; char* key;}; // start key of current time window
|
||||
union {STimeWindow win; char* key;}; // start key of current result row
|
||||
} SResultRow;
|
||||
|
||||
typedef struct SGroupResInfo {
|
||||
|
@ -106,12 +100,11 @@ typedef struct SGroupResInfo {
|
|||
* If the number of generated results is greater than this value,
|
||||
* query query will be halt and return results to client immediate.
|
||||
*/
|
||||
typedef struct SResultRec {
|
||||
typedef struct SRspResultInfo {
|
||||
int64_t total; // total generated result size in rows
|
||||
int64_t rows; // current result set size in rows
|
||||
int64_t capacity; // capacity of current result output buffer
|
||||
int32_t capacity; // capacity of current result output buffer
|
||||
int32_t threshold; // result size threshold in rows.
|
||||
} SResultRec;
|
||||
} SRspResultInfo;
|
||||
|
||||
typedef struct SResultRowInfo {
|
||||
SResultRow** pResult; // result list
|
||||
|
@ -138,7 +131,6 @@ typedef struct SSingleColumnFilterInfo {
|
|||
typedef struct STableQueryInfo {
|
||||
TSKEY lastKey;
|
||||
int32_t groupIndex; // group id in table list
|
||||
int16_t queryRangeSet; // denote if the query range is set, only available for interval query
|
||||
tVariant tag;
|
||||
STimeWindow win;
|
||||
STSCursor cur;
|
||||
|
@ -179,82 +171,136 @@ typedef struct {
|
|||
SArray* pResult; // SArray<SStddevInterResult>
|
||||
} SInterResult;
|
||||
|
||||
typedef struct SSDataBlock {
|
||||
SDataStatis *pBlockStatis;
|
||||
SArray *pDataBlock;
|
||||
SDataBlockInfo info;
|
||||
} SSDataBlock;
|
||||
|
||||
typedef struct SQuery {
|
||||
SLimitVal limit;
|
||||
|
||||
bool stableQuery; // super table query or not
|
||||
bool topBotQuery; // TODO used bitwise flag
|
||||
bool groupbyColumn; // denote if this is a groupby normal column query
|
||||
bool hasTagResults; // if there are tag values in final result or not
|
||||
bool timeWindowInterpo;// if the time window start/end required interpolation
|
||||
bool queryBlockDist; // if query data block distribution
|
||||
bool stabledev; // super table stddev query
|
||||
int32_t interBufSize; // intermediate buffer sizse
|
||||
|
||||
SOrderVal order;
|
||||
int16_t numOfCols;
|
||||
int16_t numOfTags;
|
||||
SOrderVal order;
|
||||
|
||||
STimeWindow window;
|
||||
SInterval interval;
|
||||
SSessionWindow sw;
|
||||
int16_t precision;
|
||||
int16_t numOfOutput;
|
||||
int16_t fillType;
|
||||
int16_t checkResultBuf; // check if the buffer is full during scan each block
|
||||
SLimitVal limit;
|
||||
|
||||
int32_t srcRowSize; // todo extract struct
|
||||
int32_t resultRowSize;
|
||||
int32_t intermediateResultRowSize; // intermediate result row size, in case of top-k query.
|
||||
int32_t maxSrcColumnSize;
|
||||
int32_t tagLen; // tag value length of current query
|
||||
|
||||
SSqlGroupbyExpr* pGroupbyExpr;
|
||||
SExprInfo* pExpr1;
|
||||
SExprInfo* pExpr2;
|
||||
int32_t numOfExpr2;
|
||||
|
||||
SColumnInfo* colList;
|
||||
SColumnInfo* tagColList;
|
||||
int32_t numOfFilterCols;
|
||||
int64_t* fillVal;
|
||||
uint32_t status; // query status
|
||||
SResultRec rec;
|
||||
int32_t pos;
|
||||
tFilePage** sdata;
|
||||
STableQueryInfo* current;
|
||||
int32_t numOfCheckedBlocks; // number of check data blocks
|
||||
|
||||
SOrderedPrjQueryInfo prjInfo; // limit value for each vgroup, only available in global order projection query.
|
||||
SSingleColumnFilterInfo* pFilterInfo;
|
||||
|
||||
STableQueryInfo* current;
|
||||
void* tsdb;
|
||||
SMemRef memRef;
|
||||
STableGroupInfo tableGroupInfo; // table <tid, last_key> list SArray<STableKeyInfo>
|
||||
int32_t vgId;
|
||||
} SQuery;
|
||||
|
||||
typedef SSDataBlock* (*__operator_fn_t)(void* param);
|
||||
typedef void (*__optr_cleanup_fn_t)(void* param, int32_t num);
|
||||
|
||||
struct SOperatorInfo;
|
||||
|
||||
typedef struct SQueryRuntimeEnv {
|
||||
jmp_buf env;
|
||||
SQuery* pQuery;
|
||||
SQLFunctionCtx* pCtx;
|
||||
int32_t numOfRowsPerPage;
|
||||
uint16_t* offset;
|
||||
uint16_t scanFlag; // denotes reversed scan of data or not
|
||||
SFillInfo* pFillInfo;
|
||||
SResultRowInfo resultRowInfo;
|
||||
jmp_buf env;
|
||||
SQuery* pQuery;
|
||||
uint32_t status; // query status
|
||||
void* qinfo;
|
||||
uint8_t scanFlag; // denotes reversed scan of data or not
|
||||
void* pQueryHandle;
|
||||
|
||||
SQueryCostInfo summary;
|
||||
void* pQueryHandle;
|
||||
void* pSecQueryHandle; // another thread for
|
||||
bool stableQuery; // super table query or not
|
||||
bool topBotQuery; // TODO used bitwise flag
|
||||
bool groupbyColumn; // denote if this is a groupby normal column query
|
||||
bool hasTagResults; // if there are tag values in final result or not
|
||||
bool timeWindowInterpo;// if the time window start/end required interpolation
|
||||
bool queryWindowIdentical; // all query time windows are identical for all tables in one group
|
||||
bool queryBlockDist; // if query data block distribution
|
||||
bool stabledev; // super table stddev query
|
||||
int32_t interBufSize; // intermediate buffer sizse
|
||||
int32_t prevGroupId; // previous executed group id
|
||||
SDiskbasedResultBuf* pResultBuf; // query result buffer based on blocked-wised disk file
|
||||
SHashObj* pResultRowHashTable; // quick locate the window object for each result
|
||||
char* keyBuf; // window key buffer
|
||||
SResultRowPool* pool; // window result object pool
|
||||
int32_t prevGroupId; // previous executed group id
|
||||
SDiskbasedResultBuf* pResultBuf; // query result buffer based on blocked-wised disk file
|
||||
SHashObj* pResultRowHashTable; // quick locate the window object for each result
|
||||
char* keyBuf; // window key buffer
|
||||
SResultRowPool* pool; // window result object pool
|
||||
char** prevRow;
|
||||
|
||||
int32_t* rowCellInfoOffset;// offset value for each row result cell info
|
||||
char** prevRow;
|
||||
SArray* prevResult; // intermediate result, SArray<SInterResult>
|
||||
STSBuf* pTsBuf; // timestamp filter list
|
||||
STSCursor cur;
|
||||
|
||||
SArray* prevResult; // intermediate result, SArray<SInterResult>
|
||||
STSBuf* pTsBuf; // timestamp filter list
|
||||
STSCursor cur;
|
||||
char* tagVal; // tag value of current data block
|
||||
SArithmeticSupport *sasArray;
|
||||
|
||||
char* tagVal; // tag value of current data block
|
||||
SArithmeticSupport *sasArray;
|
||||
SSDataBlock *outputBuf;
|
||||
STableGroupInfo tableqinfoGroupInfo; // this is a group array list, including SArray<STableQueryInfo*> structure
|
||||
struct SOperatorInfo *proot;
|
||||
struct SOperatorInfo *pTableScanner; // table scan operator
|
||||
SGroupResInfo groupResInfo;
|
||||
int64_t currentOffset; // dynamic offset value
|
||||
|
||||
SRspResultInfo resultInfo;
|
||||
SHashObj *pTableRetrieveTsMap;
|
||||
} SQueryRuntimeEnv;
|
||||
|
||||
enum {
|
||||
OP_IN_EXECUTING = 1,
|
||||
OP_RES_TO_RETURN = 2,
|
||||
OP_EXEC_DONE = 3,
|
||||
};
|
||||
|
||||
enum OPERATOR_TYPE_E {
|
||||
OP_TableScan = 1,
|
||||
OP_DataBlocksOptScan = 2,
|
||||
OP_TableSeqScan = 3,
|
||||
OP_TagScan = 4,
|
||||
OP_TableBlockInfoScan= 5,
|
||||
OP_Aggregate = 6,
|
||||
OP_Arithmetic = 7,
|
||||
OP_Groupby = 8,
|
||||
OP_Limit = 9,
|
||||
OP_Offset = 10,
|
||||
OP_TimeWindow = 11,
|
||||
OP_SessionWindow = 12,
|
||||
OP_Fill = 13,
|
||||
OP_MultiTableAggregate = 14,
|
||||
OP_MultiTableTimeInterval = 15,
|
||||
};
|
||||
|
||||
typedef struct SOperatorInfo {
|
||||
uint8_t operatorType;
|
||||
bool blockingOptr; // block operator or not
|
||||
uint8_t status; // denote if current operator is completed
|
||||
int32_t numOfOutput; // number of columns of the current operator results
|
||||
char *name; // name, used to show the query execution plan
|
||||
void *info; // extension attribution
|
||||
SExprInfo *pExpr;
|
||||
SQueryRuntimeEnv *pRuntimeEnv;
|
||||
|
||||
struct SOperatorInfo *upstream;
|
||||
__operator_fn_t exec;
|
||||
__optr_cleanup_fn_t cleanup;
|
||||
} SOperatorInfo;
|
||||
|
||||
enum {
|
||||
QUERY_RESULT_NOT_READY = 1,
|
||||
QUERY_RESULT_READY = 2,
|
||||
|
@ -263,23 +309,11 @@ enum {
|
|||
typedef struct SQInfo {
|
||||
void* signature;
|
||||
uint64_t qId;
|
||||
int32_t code; // error code to returned to client
|
||||
int64_t owner; // if it is in execution
|
||||
void* tsdb;
|
||||
SMemRef memRef;
|
||||
int32_t vgId;
|
||||
STableGroupInfo tableGroupInfo; // table <tid, last_key> list SArray<STableKeyInfo>
|
||||
STableGroupInfo tableqinfoGroupInfo; // this is a group array list, including SArray<STableQueryInfo*> structure
|
||||
SQueryRuntimeEnv runtimeEnv;
|
||||
SHashObj* arrTableIdInfo;
|
||||
int32_t groupIndex;
|
||||
int32_t code; // error code to returned to client
|
||||
int64_t owner; // if it is in execution
|
||||
|
||||
/*
|
||||
* the query is executed position on which meter of the whole list.
|
||||
* when the index reaches the last one of the list, it means the query is completed.
|
||||
*/
|
||||
int32_t tableIndex;
|
||||
SGroupResInfo groupResInfo;
|
||||
SQueryRuntimeEnv runtimeEnv;
|
||||
SQuery query;
|
||||
void* pBuf; // allocated buffer for STableQueryInfo, sizeof(STableQueryInfo)*numOfTables;
|
||||
|
||||
pthread_mutex_t lock; // used to synchronize the rsp/query threads
|
||||
|
@ -288,6 +322,7 @@ typedef struct SQInfo {
|
|||
void* rspContext; // response context
|
||||
int64_t startExecTs; // start to exec timestamp
|
||||
char* sql; // query sql string
|
||||
SQueryCostInfo summary;
|
||||
} SQInfo;
|
||||
|
||||
typedef struct SQueryParam {
|
||||
|
@ -306,10 +341,93 @@ typedef struct SQueryParam {
|
|||
SSqlGroupbyExpr *pGroupbyExpr;
|
||||
} SQueryParam;
|
||||
|
||||
typedef struct STableScanInfo {
|
||||
void *pQueryHandle;
|
||||
int32_t numOfBlocks;
|
||||
int32_t numOfSkipped;
|
||||
int32_t numOfBlockStatis;
|
||||
int64_t numOfRows;
|
||||
|
||||
int32_t order; // scan order
|
||||
int32_t times; // repeat counts
|
||||
int32_t current;
|
||||
int32_t reverseTimes; // 0 by default
|
||||
|
||||
SQLFunctionCtx *pCtx; // next operator query context
|
||||
SResultRowInfo *pResultRowInfo;
|
||||
int32_t *rowCellInfoOffset;
|
||||
SExprInfo *pExpr;
|
||||
SSDataBlock block;
|
||||
bool loadExternalRows; // load external rows (prev & next rows)
|
||||
int32_t numOfOutput;
|
||||
int64_t elapsedTime;
|
||||
|
||||
int32_t tableIndex;
|
||||
} STableScanInfo;
|
||||
|
||||
typedef struct STagScanInfo {
|
||||
SColumnInfo* pCols;
|
||||
SSDataBlock* pRes;
|
||||
int32_t totalTables;
|
||||
int32_t currentIndex;
|
||||
} STagScanInfo;
|
||||
|
||||
typedef struct SOptrBasicInfo {
|
||||
SResultRowInfo resultRowInfo;
|
||||
int32_t *rowCellInfoOffset; // offset value for each row result cell info
|
||||
SQLFunctionCtx *pCtx;
|
||||
SSDataBlock *pRes;
|
||||
} SOptrBasicInfo;
|
||||
|
||||
typedef struct SOptrBasicInfo STableIntervalOperatorInfo;
|
||||
|
||||
typedef struct SAggOperatorInfo {
|
||||
SOptrBasicInfo binfo;
|
||||
uint32_t seed;
|
||||
} SAggOperatorInfo;
|
||||
|
||||
typedef struct SArithOperatorInfo {
|
||||
SOptrBasicInfo binfo;
|
||||
int32_t bufCapacity;
|
||||
uint32_t seed;
|
||||
} SArithOperatorInfo;
|
||||
|
||||
typedef struct SLimitOperatorInfo {
|
||||
int64_t limit;
|
||||
int64_t total;
|
||||
} SLimitOperatorInfo;
|
||||
|
||||
typedef struct SOffsetOperatorInfo {
|
||||
int64_t offset;
|
||||
} SOffsetOperatorInfo;
|
||||
|
||||
typedef struct SFillOperatorInfo {
|
||||
SFillInfo *pFillInfo;
|
||||
SSDataBlock *pRes;
|
||||
int64_t totalInputRows;
|
||||
} SFillOperatorInfo;
|
||||
|
||||
typedef struct SGroupbyOperatorInfo {
|
||||
SOptrBasicInfo binfo;
|
||||
int32_t colIndex;
|
||||
char *prevData; // previous group by value
|
||||
} SGroupbyOperatorInfo;
|
||||
|
||||
typedef struct SSWindowOperatorInfo {
|
||||
SOptrBasicInfo binfo;
|
||||
STimeWindow curWindow; // current time window
|
||||
TSKEY prevTs; // previous timestamp
|
||||
int32_t numOfRows; // number of rows
|
||||
int32_t start; // start row index
|
||||
} SSWindowOperatorInfo;
|
||||
|
||||
void freeParam(SQueryParam *param);
|
||||
int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param);
|
||||
int32_t createQueryFuncExprFromMsg(SQueryTableMsg *pQueryMsg, int32_t numOfOutput, SExprInfo **pExprInfo, SSqlFuncMsg **pExprMsg,
|
||||
SColumnInfo* pTagCols);
|
||||
int32_t createIndirectQueryFuncExprFromMsg(SQueryTableMsg *pQueryMsg, int32_t numOfOutput, SExprInfo **pExprInfo,
|
||||
SSqlFuncMsg **pExprMsg, SExprInfo *prevExpr);
|
||||
|
||||
SSqlGroupbyExpr *createGroupbyExprFromMsg(SQueryTableMsg *pQueryMsg, SColIndex *pColIndex, int32_t *code);
|
||||
SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SSqlGroupbyExpr *pGroupbyExpr, SExprInfo *pExprs,
|
||||
SExprInfo *pSecExprs, STableGroupInfo *pTableGroupInfo, SColumnInfo* pTagCols, bool stableQuery, char* sql, uint64_t *qId);
|
||||
|
@ -319,13 +437,9 @@ void freeColumnFilterInfo(SColumnFilterInfo* pFilter, int32_t numOfFilters);
|
|||
bool isQueryKilled(SQInfo *pQInfo);
|
||||
int32_t checkForQueryBuf(size_t numOfTables);
|
||||
bool doBuildResCheck(SQInfo* pQInfo);
|
||||
void setQueryStatus(SQuery *pQuery, int8_t status);
|
||||
void setQueryStatus(SQueryRuntimeEnv *pRuntimeEnv, int8_t status);
|
||||
|
||||
bool onlyQueryTags(SQuery* pQuery);
|
||||
void buildTagQueryResult(SQInfo *pQInfo);
|
||||
void stableQueryImpl(SQInfo *pQInfo);
|
||||
void buildTableBlockDistResult(SQInfo *pQInfo);
|
||||
void tableQueryImpl(SQInfo *pQInfo);
|
||||
bool isValidQInfo(void *param);
|
||||
|
||||
int32_t doDumpQueryResult(SQInfo *pQInfo, char *data);
|
||||
|
@ -337,4 +451,4 @@ void freeQInfo(SQInfo *pQInfo);
|
|||
|
||||
int32_t getMaximumIdleDurationSec();
|
||||
|
||||
#endif // TDENGINE_QUERYEXECUTOR_H
|
||||
#endif // TDENGINE_QEXECUTOR_H
|
||||
|
|
|
@ -24,6 +24,8 @@ extern "C" {
|
|||
#include "qExtbuffer.h"
|
||||
#include "taosdef.h"
|
||||
|
||||
struct SSDataBlock;
|
||||
|
||||
typedef struct {
|
||||
STColumn col; // column info
|
||||
int16_t functionId; // sql function id
|
||||
|
@ -78,7 +80,7 @@ void* taosDestroyFillInfo(SFillInfo *pFillInfo);
|
|||
|
||||
void taosFillSetStartInfo(SFillInfo* pFillInfo, int32_t numOfRows, TSKEY endKey);
|
||||
|
||||
void taosFillSetDataBlockFromFilePage(SFillInfo* pFillInfo, const tFilePage** pInput);
|
||||
void taosFillSetInputDataBlock(SFillInfo* pFillInfo, const struct SSDataBlock* pInput);
|
||||
|
||||
void taosFillCopyInputDataFromOneFilePage(SFillInfo* pFillInfo, const tFilePage* pInput);
|
||||
|
||||
|
@ -88,7 +90,7 @@ int64_t getNumOfResultsAfterFillGap(SFillInfo* pFillInfo, int64_t ekey, int32_t
|
|||
|
||||
int32_t taosGetLinearInterpolationVal(SPoint* point, int32_t outputType, SPoint* point1, SPoint* point2, int32_t inputType);
|
||||
|
||||
int64_t taosFillResultDataBlock(SFillInfo* pFillInfo, tFilePage** output, int32_t capacity);
|
||||
int64_t taosFillResultDataBlock(SFillInfo* pFillInfo, void** output, int32_t capacity);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -55,7 +55,6 @@ typedef struct SResultBufStatis {
|
|||
} SResultBufStatis;
|
||||
|
||||
typedef struct SDiskbasedResultBuf {
|
||||
int32_t numOfRowsPerPage;
|
||||
int32_t numOfPages;
|
||||
int64_t totalBufSize;
|
||||
int64_t fileSize; // disk file size
|
||||
|
@ -77,7 +76,7 @@ typedef struct SDiskbasedResultBuf {
|
|||
SResultBufStatis statis;
|
||||
} SDiskbasedResultBuf;
|
||||
|
||||
#define DEFAULT_INTERN_BUF_PAGE_SIZE (256L) // in bytes
|
||||
#define DEFAULT_INTERN_BUF_PAGE_SIZE (1024L) // in bytes
|
||||
#define PAGE_INFO_INITIALIZER (SPageDiskInfo){-1, -1}
|
||||
|
||||
/**
|
||||
|
@ -89,8 +88,7 @@ typedef struct SDiskbasedResultBuf {
|
|||
* @param handle
|
||||
* @return
|
||||
*/
|
||||
int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t rowSize, int32_t pagesize,
|
||||
int32_t inMemBufSize, const void* handle);
|
||||
int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t pagesize, int32_t inMemBufSize, const void* handle);
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -101,13 +99,6 @@ int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t ro
|
|||
*/
|
||||
tFilePage* getNewDataBuf(SDiskbasedResultBuf* pResultBuf, int32_t groupId, int32_t* pageId);
|
||||
|
||||
/**
|
||||
*
|
||||
* @param pResultBuf
|
||||
* @return
|
||||
*/
|
||||
size_t getNumOfRowsPerPage(const SDiskbasedResultBuf* pResultBuf);
|
||||
|
||||
/**
|
||||
*
|
||||
* @param pResultBuf
|
||||
|
|
|
@ -45,7 +45,7 @@ typedef struct SLimitVal {
|
|||
|
||||
typedef struct SOrderVal {
|
||||
uint32_t order;
|
||||
int32_t orderColId;
|
||||
int32_t orderColId;
|
||||
} SOrderVal;
|
||||
|
||||
typedef struct tVariantListItem {
|
||||
|
@ -58,14 +58,19 @@ typedef struct SIntervalVal {
|
|||
SStrToken offset;
|
||||
} SIntervalVal;
|
||||
|
||||
typedef struct SSessionWindowVal {
|
||||
SStrToken col;
|
||||
SStrToken gap;
|
||||
} SSessionWindowVal;
|
||||
|
||||
typedef struct SQuerySQL {
|
||||
struct tSQLExprList *pSelection; // select clause
|
||||
SArray * from; // from clause SArray<tVariantListItem>
|
||||
struct tSQLExpr * pWhere; // where clause [optional]
|
||||
SArray * pGroupby; // groupby clause, only for tags[optional], SArray<tVariantListItem>
|
||||
SArray * pSortOrder; // orderby [optional], SArray<tVariantListItem>
|
||||
SStrToken interval; // interval [optional]
|
||||
SStrToken offset; // offset window [optional]
|
||||
SIntervalVal interval; // (interval, interval_offset) [optional]
|
||||
SSessionWindowVal sessionVal; // session window [optional]
|
||||
SStrToken sliding; // sliding window [optional]
|
||||
SLimitVal limit; // limit offset [optional]
|
||||
SLimitVal slimit; // group limit offset [optional]
|
||||
|
@ -193,19 +198,32 @@ typedef struct SSqlInfo {
|
|||
};
|
||||
} SSqlInfo;
|
||||
|
||||
#define NON_ARITHMEIC_EXPR 0
|
||||
#define NORMAL_ARITHMETIC 1
|
||||
#define AGG_ARIGHTMEIC 2
|
||||
|
||||
enum SQL_NODE_TYPE {
|
||||
SQL_NODE_TABLE_COLUMN= 1,
|
||||
SQL_NODE_SQLFUNCTION = 2,
|
||||
SQL_NODE_VALUE = 3,
|
||||
SQL_NODE_EXPR = 4,
|
||||
};
|
||||
|
||||
typedef struct tSQLExpr {
|
||||
uint32_t nSQLOptr; // TK_FUNCTION: sql function, TK_LE: less than(binary expr)
|
||||
|
||||
// the full sql string of function(col, param), which is actually the raw
|
||||
// field name, since the function name is kept in nSQLOptr already
|
||||
uint16_t type; // sql node type
|
||||
uint32_t tokenId; // TK_FUNCTION: sql function, TK_LE: less than(binary expr)
|
||||
|
||||
// the whole string of the function(col, param), while the function name is kept in token
|
||||
SStrToken operand;
|
||||
SStrToken colInfo; // field id
|
||||
tVariant val; // value only for string, float, int
|
||||
uint32_t functionId; // function id
|
||||
|
||||
SStrToken colInfo; // table column info
|
||||
tVariant value; // the use input value
|
||||
SStrToken token; // original sql expr string
|
||||
|
||||
struct tSQLExpr *pLeft; // left child
|
||||
struct tSQLExpr *pRight; // right child
|
||||
struct tSQLExprList *pParam; // function parameters
|
||||
struct tSQLExprList *pParam; // function parameters list
|
||||
} tSQLExpr;
|
||||
|
||||
// used in select clause. select <tSQLExprList> from xxx
|
||||
|
@ -251,8 +269,8 @@ tSQLExprList *tSqlExprListAppend(tSQLExprList *pList, tSQLExpr *pNode, SStrToken
|
|||
|
||||
void tSqlExprListDestroy(tSQLExprList *pList);
|
||||
|
||||
SQuerySQL *tSetQuerySqlElems(SStrToken *pSelectToken, tSQLExprList *pSelection, SArray *pFrom, tSQLExpr *pWhere,
|
||||
SArray *pGroupby, SArray *pSortOrder, SIntervalVal *pInterval,
|
||||
SQuerySQL *tSetQuerySqlNode(SStrToken *pSelectToken, tSQLExprList *pSelection, SArray *pFrom, tSQLExpr *pWhere,
|
||||
SArray *pGroupby, SArray *pSortOrder, SIntervalVal *pInterval, SSessionWindowVal *pSession,
|
||||
SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit, SLimitVal *pGLimit);
|
||||
|
||||
SCreateTableSQL *tSetCreateSqlElems(SArray *pCols, SArray *pTags, SQuerySQL *pSelect, int32_t type);
|
||||
|
@ -302,16 +320,6 @@ void tSqlSetColumnType(TAOS_FIELD *pField, SStrToken *type);
|
|||
|
||||
void *ParseAlloc(void *(*mallocProc)(size_t));
|
||||
|
||||
enum {
|
||||
TSQL_NODE_TYPE_EXPR = 0x1,
|
||||
TSQL_NODE_TYPE_ID = 0x2,
|
||||
TSQL_NODE_TYPE_VALUE = 0x4,
|
||||
};
|
||||
|
||||
#define NON_ARITHMEIC_EXPR 0
|
||||
#define NORMAL_ARITHMETIC 1
|
||||
#define AGG_ARIGHTMEIC 2
|
||||
|
||||
SSqlInfo qSQLParse(const char *str);
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -112,13 +112,11 @@ STSBuf* tsBufClone(STSBuf* pTSBuf);
|
|||
|
||||
STSGroupBlockInfo* tsBufGetGroupBlockInfo(STSBuf* pTSBuf, int32_t id);
|
||||
|
||||
void tsBufFlush(STSBuf* pTSBuf);
|
||||
|
||||
void tsBufFlush(STSBuf* pTSBuf);
|
||||
void tsBufResetPos(STSBuf* pTSBuf);
|
||||
STSElem tsBufGetElem(STSBuf* pTSBuf);
|
||||
|
||||
bool tsBufNextPos(STSBuf* pTSBuf);
|
||||
|
||||
STSElem tsBufGetElem(STSBuf* pTSBuf);
|
||||
STSElem tsBufGetElemStartPos(STSBuf* pTSBuf, int32_t id, tVariant* tag);
|
||||
|
||||
STSCursor tsBufGetCursor(STSBuf* pTSBuf);
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
#define GET_RES_WINDOW_KEY_LEN(_l) ((_l) + sizeof(uint64_t))
|
||||
|
||||
#define curTimeWindowIndex(_winres) ((_winres)->curIndex)
|
||||
#define GET_ROW_PARAM_FOR_MULTIOUTPUT(_q, tbq, sq) (((tbq) && (!sq))? (_q)->pExpr1[1].base.arg->argValue.i64:1)
|
||||
#define GET_ROW_PARAM_FOR_MULTIOUTPUT(_q, tbq, sq) (((tbq) && (!(sq)))? (_q)->pExpr1[1].base.arg->argValue.i64:1)
|
||||
|
||||
int32_t getOutputInterResultBufSize(SQuery* pQuery);
|
||||
|
||||
|
@ -44,22 +44,18 @@ void closeResultRow(SResultRowInfo* pResultRowInfo, int32_t slot);
|
|||
bool isResultRowClosed(SResultRowInfo *pResultRowInfo, int32_t slot);
|
||||
void clearResultRow(SQueryRuntimeEnv* pRuntimeEnv, SResultRow* pResultRow, int16_t type);
|
||||
|
||||
SResultRowCellInfo* getResultCell(SQueryRuntimeEnv* pRuntimeEnv, const SResultRow* pRow, int32_t index);
|
||||
SResultRowCellInfo* getResultCell(const SResultRow* pRow, int32_t index, int32_t* offset);
|
||||
|
||||
static FORCE_INLINE SResultRow *getResultRow(SResultRowInfo *pResultRowInfo, int32_t slot) {
|
||||
assert(pResultRowInfo != NULL && slot >= 0 && slot < pResultRowInfo->size);
|
||||
return pResultRowInfo->pResult[slot];
|
||||
}
|
||||
|
||||
static FORCE_INLINE char *getPosInResultPage(SQueryRuntimeEnv *pRuntimeEnv, int32_t columnIndex, SResultRow *pResult,
|
||||
tFilePage* page) {
|
||||
assert(pResult != NULL && pRuntimeEnv != NULL);
|
||||
static FORCE_INLINE char *getPosInResultPage(SQuery *pQuery, tFilePage* page, int32_t rowOffset, int16_t offset) {
|
||||
assert(rowOffset >= 0 && pQuery != NULL);
|
||||
|
||||
SQuery *pQuery = pRuntimeEnv->pQuery;
|
||||
|
||||
int32_t realRowId = (int32_t)(pResult->rowId * GET_ROW_PARAM_FOR_MULTIOUTPUT(pQuery, pRuntimeEnv->topBotQuery, pRuntimeEnv->stableQuery));
|
||||
return ((char *)page->data) + pRuntimeEnv->offset[columnIndex] * pRuntimeEnv->numOfRowsPerPage +
|
||||
pQuery->pExpr1[columnIndex].bytes * realRowId;
|
||||
int32_t numOfRows = (int32_t)GET_ROW_PARAM_FOR_MULTIOUTPUT(pQuery, pQuery->topBotQuery, pQuery->stableQuery);
|
||||
return ((char *)page->data) + rowOffset + offset * numOfRows;
|
||||
}
|
||||
|
||||
bool isNullOperator(SColumnFilterElem *pFilter, const char* minval, const char* maxval, int16_t type);
|
||||
|
@ -74,8 +70,6 @@ void* destroyResultRowPool(SResultRowPool* p);
|
|||
int32_t getNumOfAllocatedResultRows(SResultRowPool* p);
|
||||
int32_t getNumOfUsedResultRows(SResultRowPool* p);
|
||||
|
||||
bool isPointInterpoQuery(SQuery *pQuery);
|
||||
|
||||
typedef struct {
|
||||
SArray* pResult; // SArray<SResPair>
|
||||
int32_t colId;
|
||||
|
@ -85,12 +79,14 @@ void interResToBinary(SBufferWriter* bw, SArray* pRes, int32_t tagLen);
|
|||
SArray* interResFromBinary(const char* data, int32_t len);
|
||||
void freeInterResult(void* param);
|
||||
|
||||
void initGroupResInfo(SGroupResInfo* pGroupResInfo, SResultRowInfo* pResultInfo, int32_t offset);
|
||||
void initGroupResInfo(SGroupResInfo* pGroupResInfo, SResultRowInfo* pResultInfo);
|
||||
void cleanupGroupResInfo(SGroupResInfo* pGroupResInfo);
|
||||
bool hasRemainDataInCurrentGroup(SGroupResInfo* pGroupResInfo);
|
||||
bool hasRemainData(SGroupResInfo* pGroupResInfo);
|
||||
|
||||
bool incNextGroup(SGroupResInfo* pGroupResInfo);
|
||||
int32_t getNumOfTotalRes(SGroupResInfo* pGroupResInfo);
|
||||
|
||||
int32_t mergeIntoGroupResult(SGroupResInfo* pGroupResInfo, SQInfo *pQInfo);
|
||||
int32_t mergeIntoGroupResult(SGroupResInfo* pGroupResInfo, SQueryRuntimeEnv *pRuntimeEnv, int32_t* offset);
|
||||
|
||||
#endif // TDENGINE_QUERYUTIL_H
|
||||
|
|
|
@ -329,8 +329,8 @@ signed(A) ::= PLUS INTEGER(X). { A = strtol(X.z, NULL, 10); }
|
|||
signed(A) ::= MINUS INTEGER(X). { A = -strtol(X.z, NULL, 10);}
|
||||
|
||||
////////////////////////////////// The CREATE TABLE statement ///////////////////////////////
|
||||
cmd ::= CREATE TABLE create_table_args. {}
|
||||
cmd ::= CREATE TABLE create_stable_args. {}
|
||||
cmd ::= CREATE TABLE create_table_args. {}
|
||||
cmd ::= CREATE TABLE create_stable_args. {}
|
||||
cmd ::= CREATE STABLE create_stable_args. {}
|
||||
cmd ::= CREATE TABLE create_table_list(Z). { pInfo->type = TSDB_SQL_CREATE_TABLE; pInfo->pCreateTableInfo = Z;}
|
||||
|
||||
|
@ -455,8 +455,8 @@ tagitem(A) ::= PLUS(X) FLOAT(Y). {
|
|||
//////////////////////// The SELECT statement /////////////////////////////////
|
||||
%type select {SQuerySQL*}
|
||||
%destructor select {doDestroyQuerySql($$);}
|
||||
select(A) ::= SELECT(T) selcollist(W) from(X) where_opt(Y) interval_opt(K) fill_opt(F) sliding_opt(S) groupby_opt(P) orderby_opt(Z) having_opt(N) slimit_opt(G) limit_opt(L). {
|
||||
A = tSetQuerySqlElems(&T, W, X, Y, P, Z, &K, &S, F, &L, &G);
|
||||
select(A) ::= SELECT(T) selcollist(W) from(X) where_opt(Y) interval_opt(K) session_option(H) fill_opt(F) sliding_opt(S) groupby_opt(P) orderby_opt(Z) having_opt(N) slimit_opt(G) limit_opt(L). {
|
||||
A = tSetQuerySqlNode(&T, W, X, Y, P, Z, &K, &H, &S, F, &L, &G);
|
||||
}
|
||||
|
||||
%type union {SSubclauseInfo*}
|
||||
|
@ -474,7 +474,7 @@ cmd ::= union(X). { setSqlInfo(pInfo, X, NULL, TSDB_SQL_SELECT); }
|
|||
// select server_version(), select client_version(),
|
||||
// select server_state();
|
||||
select(A) ::= SELECT(T) selcollist(W). {
|
||||
A = tSetQuerySqlElems(&T, W, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
|
||||
A = tSetQuerySqlNode(&T, W, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
|
||||
}
|
||||
|
||||
// selcollist is a list of expressions that are to become the return
|
||||
|
@ -549,13 +549,21 @@ tablelist(A) ::= tablelist(Y) COMMA ids(X) cpxName(Z) ids(F). {
|
|||
tmvar(A) ::= VARIABLE(X). {A = X;}
|
||||
|
||||
%type interval_opt {SIntervalVal}
|
||||
interval_opt(N) ::= INTERVAL LP tmvar(E) RP. {N.interval = E; N.offset.n = 0; N.offset.z = NULL; N.offset.type = 0;}
|
||||
interval_opt(N) ::= INTERVAL LP tmvar(E) COMMA tmvar(O) RP. {N.interval = E; N.offset = O;}
|
||||
interval_opt(N) ::= INTERVAL LP tmvar(E) RP. {N.interval = E; N.offset.n = 0;}
|
||||
interval_opt(N) ::= INTERVAL LP tmvar(E) COMMA tmvar(X) RP. {N.interval = E; N.offset = X;}
|
||||
interval_opt(N) ::= . {memset(&N, 0, sizeof(N));}
|
||||
|
||||
%type session_option {SSessionWindowVal}
|
||||
session_option(X) ::= . {X.col.n = 0; X.gap.n = 0;}
|
||||
session_option(X) ::= SESSION LP ids(V) cpxName(Z) COMMA tmvar(Y) RP. {
|
||||
V.n += Z.n;
|
||||
X.col = V;
|
||||
X.gap = Y;
|
||||
}
|
||||
|
||||
%type fill_opt {SArray*}
|
||||
%destructor fill_opt {taosArrayDestroy($$);}
|
||||
fill_opt(N) ::= . {N = 0; }
|
||||
fill_opt(N) ::= . { N = 0; }
|
||||
fill_opt(N) ::= FILL LP ID(Y) COMMA tagitemlist(X) RP. {
|
||||
tVariant A = {0};
|
||||
toTSDBType(Y.type);
|
||||
|
@ -837,6 +845,5 @@ cmd ::= KILL QUERY INTEGER(X) COLON(Z) INTEGER(Y). {X.n += (Z.n + Y.n); s
|
|||
%fallback ID ABORT AFTER ASC ATTACH BEFORE BEGIN CASCADE CLUSTER CONFLICT COPY DATABASE DEFERRED
|
||||
DELIMITERS DESC DETACH EACH END EXPLAIN FAIL FOR GLOB IGNORE IMMEDIATE INITIALLY INSTEAD
|
||||
LIKE MATCH KEY OF OFFSET RAISE REPLACE RESTRICT ROW STATEMENT TRIGGER VIEW ALL
|
||||
COUNT SUM AVG MIN MAX FIRST LAST TOP BOTTOM STDDEV PERCENTILE APERCENTILE LEASTSQUARES HISTOGRAM DIFF
|
||||
SPREAD TWA INTERP LAST_ROW RATE IRATE SUM_RATE SUM_IRATE AVG_RATE AVG_IRATE TBID NOW IPTOKEN SEMI NONE PREV LINEAR IMPORT
|
||||
NOW IPTOKEN SEMI NONE PREV LINEAR IMPORT
|
||||
METRIC TBNAME JOIN METRICS STABLE NULL INSERT INTO VALUES.
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include "taosmsg.h"
|
||||
#include "texpr.h"
|
||||
#include "ttype.h"
|
||||
#include "tsdb.h"
|
||||
|
||||
#include "qAggMain.h"
|
||||
#include "qFill.h"
|
||||
|
@ -26,11 +27,9 @@
|
|||
#include "qTsbuf.h"
|
||||
#include "queryLog.h"
|
||||
|
||||
//#define GET_INPUT_DATA_LIST(x) (((char *)((x)->pInput)) + ((x)->startOffset) * ((x)->inputBytes))
|
||||
#define GET_INPUT_DATA_LIST(x) ((char *)((x)->pInput))
|
||||
#define GET_INPUT_DATA(x, y) (GET_INPUT_DATA_LIST(x) + (y) * (x)->inputBytes)
|
||||
|
||||
//#define GET_TS_LIST(x) ((TSKEY*)&((x)->ptsList[(x)->startOffset]))
|
||||
#define GET_TS_LIST(x) ((TSKEY*)((x)->ptsList))
|
||||
#define GET_TS_DATA(x, y) (GET_TS_LIST(x)[(y)])
|
||||
|
||||
|
@ -191,6 +190,11 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI
|
|||
*bytes = (int16_t)(dataBytes + sizeof(int16_t) + sizeof(int64_t) + sizeof(int32_t) + sizeof(int32_t) + VARSTR_HEADER_SIZE);
|
||||
*interBytes = 0;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else if (functionId == TSDB_FUNC_BLKINFO) {
|
||||
*type = TSDB_DATA_TYPE_BINARY;
|
||||
*bytes = 16384;
|
||||
*interBytes = 0;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (functionId == TSDB_FUNC_COUNT) {
|
||||
|
@ -209,7 +213,7 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI
|
|||
|
||||
if (functionId == TSDB_FUNC_TS_COMP) {
|
||||
*type = TSDB_DATA_TYPE_BINARY;
|
||||
*bytes = sizeof(int32_t); // this results is compressed ts data
|
||||
*bytes = 1; // this results is compressed ts data, only one byte
|
||||
*interBytes = POINTER_BYTES;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -355,6 +359,22 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
// TODO use hash table
|
||||
int32_t isValidFunction(const char* name, int32_t len) {
|
||||
for(int32_t i = 0; i <= TSDB_FUNC_BLKINFO; ++i) {
|
||||
int32_t nameLen = (int32_t) strlen(aAggs[i].name);
|
||||
if (len != nameLen) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (strncasecmp(aAggs[i].name, name, len) == 0) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
// set the query flag to denote that query is completed
|
||||
static void no_next_step(SQLFunctionCtx *pCtx) {
|
||||
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
|
||||
|
@ -458,7 +478,7 @@ static void count_func_merge(SQLFunctionCtx *pCtx) {
|
|||
* @param filterCols
|
||||
* @return
|
||||
*/
|
||||
int32_t count_load_data_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) {
|
||||
int32_t countRequired(SQLFunctionCtx *pCtx, STimeWindow* w, int32_t colId) {
|
||||
if (colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
|
||||
return BLK_DATA_NO_NEEDED;
|
||||
} else {
|
||||
|
@ -466,7 +486,7 @@ int32_t count_load_data_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32
|
|||
}
|
||||
}
|
||||
|
||||
int32_t no_data_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) {
|
||||
int32_t noDataRequired(SQLFunctionCtx *pCtx, STimeWindow* w, int32_t colId) {
|
||||
return BLK_DATA_NO_NEEDED;
|
||||
}
|
||||
|
||||
|
@ -674,16 +694,16 @@ static void sum_func_merge(SQLFunctionCtx *pCtx) {
|
|||
}
|
||||
}
|
||||
|
||||
static int32_t statisRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) {
|
||||
static int32_t statisRequired(SQLFunctionCtx *pCtx, STimeWindow* w, int32_t colId) {
|
||||
return BLK_DATA_STATIS_NEEDED;
|
||||
}
|
||||
|
||||
static int32_t dataBlockRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) {
|
||||
static int32_t dataBlockRequired(SQLFunctionCtx *pCtx, STimeWindow* w, int32_t colId) {
|
||||
return BLK_DATA_ALL_NEEDED;
|
||||
}
|
||||
|
||||
// todo: if column in current data block are null, opt for this case
|
||||
static int32_t firstFuncRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) {
|
||||
// todo: if column in current data block are null, opt for this case
|
||||
static int32_t firstFuncRequired(SQLFunctionCtx *pCtx, STimeWindow* w, int32_t colId) {
|
||||
if (pCtx->order == TSDB_ORDER_DESC) {
|
||||
return BLK_DATA_NO_NEEDED;
|
||||
}
|
||||
|
@ -696,7 +716,7 @@ static int32_t firstFuncRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, i
|
|||
}
|
||||
}
|
||||
|
||||
static int32_t lastFuncRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) {
|
||||
static int32_t lastFuncRequired(SQLFunctionCtx *pCtx, STimeWindow* w, int32_t colId) {
|
||||
if (pCtx->order != pCtx->param[0].i64) {
|
||||
return BLK_DATA_NO_NEEDED;
|
||||
}
|
||||
|
@ -708,7 +728,7 @@ static int32_t lastFuncRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, in
|
|||
}
|
||||
}
|
||||
|
||||
static int32_t firstDistFuncRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) {
|
||||
static int32_t firstDistFuncRequired(SQLFunctionCtx *pCtx, STimeWindow* w, int32_t colId) {
|
||||
if (pCtx->order == TSDB_ORDER_DESC) {
|
||||
return BLK_DATA_NO_NEEDED;
|
||||
}
|
||||
|
@ -724,11 +744,11 @@ static int32_t firstDistFuncRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY en
|
|||
if (pInfo->hasResult != DATA_SET_FLAG) {
|
||||
return BLK_DATA_ALL_NEEDED;
|
||||
} else { // data in current block is not earlier than current result
|
||||
return (pInfo->ts <= start) ? BLK_DATA_NO_NEEDED : BLK_DATA_ALL_NEEDED;
|
||||
return (pInfo->ts <= w->skey) ? BLK_DATA_NO_NEEDED : BLK_DATA_ALL_NEEDED;
|
||||
}
|
||||
}
|
||||
|
||||
static int32_t lastDistFuncRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) {
|
||||
static int32_t lastDistFuncRequired(SQLFunctionCtx *pCtx, STimeWindow* w, int32_t colId) {
|
||||
if (pCtx->order != pCtx->param[0].i64) {
|
||||
return BLK_DATA_NO_NEEDED;
|
||||
}
|
||||
|
@ -744,7 +764,7 @@ static int32_t lastDistFuncRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end
|
|||
if (pInfo->hasResult != DATA_SET_FLAG) {
|
||||
return BLK_DATA_ALL_NEEDED;
|
||||
} else {
|
||||
return (pInfo->ts > end) ? BLK_DATA_NO_NEEDED : BLK_DATA_ALL_NEEDED;
|
||||
return (pInfo->ts > w->ekey) ? BLK_DATA_NO_NEEDED : BLK_DATA_ALL_NEEDED;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1359,7 +1379,21 @@ static void min_function_f(SQLFunctionCtx *pCtx, int32_t index) {
|
|||
}
|
||||
|
||||
static void stddev_function(SQLFunctionCtx *pCtx) {
|
||||
SStddevInfo *pStd = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx));
|
||||
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
|
||||
SStddevInfo *pStd = GET_ROWCELL_INTERBUF(pResInfo);
|
||||
|
||||
if (pCtx->currentStage == REPEAT_SCAN && pStd->stage == 0) {
|
||||
pStd->stage++;
|
||||
avg_finalizer(pCtx);
|
||||
|
||||
pResInfo->initialized = true; // set it initialized to avoid re-initialization
|
||||
|
||||
// save average value into tmpBuf, for second stage scan
|
||||
SAvgInfo *pAvg = GET_ROWCELL_INTERBUF(pResInfo);
|
||||
|
||||
pStd->avg = GET_DOUBLE_VAL(pCtx->pOutput);
|
||||
assert((isnan(pAvg->sum) && pAvg->num == 0) || (pStd->num == pAvg->num && pStd->avg == pAvg->sum));
|
||||
}
|
||||
|
||||
if (pStd->stage == 0) {
|
||||
// the first stage is to calculate average value
|
||||
|
@ -1432,7 +1466,20 @@ static void stddev_function_f(SQLFunctionCtx *pCtx, int32_t index) {
|
|||
// the second stage to calculate standard deviation
|
||||
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
|
||||
SStddevInfo *pStd = GET_ROWCELL_INTERBUF(pResInfo);
|
||||
|
||||
|
||||
if (pCtx->currentStage == REPEAT_SCAN && pStd->stage == 0) {
|
||||
pStd->stage++;
|
||||
avg_finalizer(pCtx);
|
||||
|
||||
pResInfo->initialized = true; // set it initialized to avoid re-initialization
|
||||
|
||||
// save average value into tmpBuf, for second stage scan
|
||||
SAvgInfo *pAvg = GET_ROWCELL_INTERBUF(pResInfo);
|
||||
|
||||
pStd->avg = GET_DOUBLE_VAL(pCtx->pOutput);
|
||||
assert((isnan(pAvg->sum) && pAvg->num == 0) || (pStd->num == pAvg->num && pStd->avg == pAvg->sum));
|
||||
}
|
||||
|
||||
/* the first stage is to calculate average value */
|
||||
if (pStd->stage == 0) {
|
||||
avg_function_f(pCtx, index);
|
||||
|
@ -1574,7 +1621,7 @@ static void stddev_dst_function(SQLFunctionCtx *pCtx) {
|
|||
if (p == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
avg = p->avg;
|
||||
}
|
||||
|
||||
|
@ -1776,7 +1823,7 @@ static bool first_last_function_setup(SQLFunctionCtx *pCtx) {
|
|||
|
||||
// todo opt for null block
|
||||
static void first_function(SQLFunctionCtx *pCtx) {
|
||||
if (pCtx->order == TSDB_ORDER_DESC || pCtx->preAggVals.dataBlockLoaded == false) {
|
||||
if (pCtx->order == TSDB_ORDER_DESC /*|| pCtx->preAggVals.dataBlockLoaded == false*/) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1850,7 +1897,7 @@ static void first_dist_function(SQLFunctionCtx *pCtx) {
|
|||
* 1. data block that are not loaded
|
||||
* 2. scan data files in desc order
|
||||
*/
|
||||
if (pCtx->order == TSDB_ORDER_DESC || pCtx->preAggVals.dataBlockLoaded == false) {
|
||||
if (pCtx->order == TSDB_ORDER_DESC/* || pCtx->preAggVals.dataBlockLoaded == false*/) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1921,7 +1968,7 @@ static void first_dist_func_merge(SQLFunctionCtx *pCtx) {
|
|||
* least one data in this block that is not null.(TODO opt for this case)
|
||||
*/
|
||||
static void last_function(SQLFunctionCtx *pCtx) {
|
||||
if (pCtx->order != pCtx->param[0].i64 || pCtx->preAggVals.dataBlockLoaded == false) {
|
||||
if (pCtx->order != pCtx->param[0].i64/* || pCtx->preAggVals.dataBlockLoaded == false*/) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1934,6 +1981,7 @@ static void last_function(SQLFunctionCtx *pCtx) {
|
|||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
memcpy(pCtx->pOutput, data, pCtx->inputBytes);
|
||||
|
||||
TSKEY ts = GET_TS_DATA(pCtx, i);
|
||||
|
@ -2013,13 +2061,7 @@ static void last_dist_function(SQLFunctionCtx *pCtx) {
|
|||
return;
|
||||
}
|
||||
|
||||
// data block is discard, not loaded, do not need to check it
|
||||
if (!pCtx->preAggVals.dataBlockLoaded) {
|
||||
return;
|
||||
}
|
||||
|
||||
int32_t notNullElems = 0;
|
||||
|
||||
for (int32_t i = pCtx->size - 1; i >= 0; --i) {
|
||||
char *data = GET_INPUT_DATA(pCtx, i);
|
||||
if (pCtx->hasNull && isNull(data, pCtx->inputType)) {
|
||||
|
@ -2125,12 +2167,7 @@ static void last_row_finalizer(SQLFunctionCtx *pCtx) {
|
|||
// do nothing at the first stage
|
||||
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
|
||||
if (pResInfo->hasResult != DATA_SET_FLAG) {
|
||||
if (pCtx->outputType == TSDB_DATA_TYPE_BINARY || pCtx->outputType == TSDB_DATA_TYPE_NCHAR) {
|
||||
setVardataNull(pCtx->pOutput, pCtx->outputType);
|
||||
} else {
|
||||
setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes);
|
||||
}
|
||||
|
||||
setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -2445,7 +2482,7 @@ static STopBotInfo *getTopBotOutputInfo(SQLFunctionCtx *pCtx) {
|
|||
}
|
||||
}
|
||||
|
||||
bool topbot_datablock_filter(SQLFunctionCtx *pCtx, int32_t functionId, const char *minval, const char *maxval) {
|
||||
bool topbot_datablock_filter(SQLFunctionCtx *pCtx, const char *minval, const char *maxval) {
|
||||
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
|
||||
if (pResInfo == NULL) {
|
||||
return true;
|
||||
|
@ -2460,7 +2497,7 @@ bool topbot_datablock_filter(SQLFunctionCtx *pCtx, int32_t functionId, const cha
|
|||
|
||||
tValuePair **pRes = (tValuePair**) pTopBotInfo->res;
|
||||
|
||||
if (functionId == TSDB_FUNC_TOP) {
|
||||
if (pCtx->functionId == TSDB_FUNC_TOP) {
|
||||
switch (pCtx->inputType) {
|
||||
case TSDB_DATA_TYPE_TINYINT:
|
||||
return GET_INT8_VAL(maxval) > pRes[0]->v.i64;
|
||||
|
@ -2531,9 +2568,13 @@ static bool top_bottom_function_setup(SQLFunctionCtx *pCtx) {
|
|||
|
||||
static void top_function(SQLFunctionCtx *pCtx) {
|
||||
int32_t notNullElems = 0;
|
||||
|
||||
|
||||
STopBotInfo *pRes = getTopBotOutputInfo(pCtx);
|
||||
assert(pRes->num >= 0);
|
||||
|
||||
if ((void *)pRes->res[0] != (void *)((char *)pRes + sizeof(STopBotInfo) + POINTER_BYTES * pCtx->param[0].i64)) {
|
||||
buildTopBotStruct(pRes, pCtx);
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < pCtx->size; ++i) {
|
||||
char *data = GET_INPUT_DATA(pCtx, i);
|
||||
|
@ -2609,13 +2650,13 @@ static void bottom_function(SQLFunctionCtx *pCtx) {
|
|||
if ((void *)pRes->res[0] != (void *)((char *)pRes + sizeof(STopBotInfo) + POINTER_BYTES * pCtx->param[0].i64)) {
|
||||
buildTopBotStruct(pRes, pCtx);
|
||||
}
|
||||
|
||||
|
||||
for (int32_t i = 0; i < pCtx->size; ++i) {
|
||||
char *data = GET_INPUT_DATA(pCtx, i);
|
||||
TSKEY ts = GET_TS_DATA(pCtx, i);
|
||||
|
||||
if (pCtx->hasNull && isNull(data, pCtx->inputType)) {
|
||||
continue;
|
||||
continue;
|
||||
}
|
||||
|
||||
notNullElems++;
|
||||
|
@ -2648,7 +2689,7 @@ static void bottom_function_f(SQLFunctionCtx *pCtx, int32_t index) {
|
|||
if ((void *)pRes->res[0] != (void *)((char *)pRes + sizeof(STopBotInfo) + POINTER_BYTES * pCtx->param[0].i64)) {
|
||||
buildTopBotStruct(pRes, pCtx);
|
||||
}
|
||||
|
||||
|
||||
SET_VAL(pCtx, 1, 1);
|
||||
do_bottom_function_add(pRes, (int32_t)pCtx->param[0].i64, pData, ts, pCtx->inputType, &pCtx->tagInfo, NULL, 0);
|
||||
|
||||
|
@ -2729,6 +2770,17 @@ static void percentile_function(SQLFunctionCtx *pCtx) {
|
|||
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
|
||||
SPercentileInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo);
|
||||
|
||||
if (pCtx->currentStage == REPEAT_SCAN && pInfo->stage == 0) {
|
||||
// all data are null, set it completed
|
||||
if (pInfo->numOfElems == 0) {
|
||||
pResInfo->complete = true;
|
||||
} else {
|
||||
pInfo->pMemBucket = tMemBucketCreate(pCtx->inputBytes, pCtx->inputType, pInfo->minval, pInfo->maxval);
|
||||
}
|
||||
|
||||
pInfo->stage += 1;
|
||||
}
|
||||
|
||||
// the first stage, only acquire the min/max value
|
||||
if (pInfo->stage == 0) {
|
||||
if (pCtx->preAggVals.isSet) {
|
||||
|
@ -2802,10 +2854,20 @@ static void percentile_function_f(SQLFunctionCtx *pCtx, int32_t index) {
|
|||
}
|
||||
|
||||
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
|
||||
|
||||
SPercentileInfo *pInfo = (SPercentileInfo *)GET_ROWCELL_INTERBUF(pResInfo);
|
||||
if (pInfo->stage == 0) {
|
||||
|
||||
if (pCtx->currentStage == REPEAT_SCAN && pInfo->stage == 0) {
|
||||
// all data are null, set it completed
|
||||
if (pInfo->numOfElems == 0) {
|
||||
pResInfo->complete = true;
|
||||
} else {
|
||||
pInfo->pMemBucket = tMemBucketCreate(pCtx->inputBytes, pCtx->inputType, pInfo->minval, pInfo->maxval);
|
||||
}
|
||||
|
||||
pInfo->stage += 1;
|
||||
}
|
||||
|
||||
if (pInfo->stage == 0) {
|
||||
double v = 0;
|
||||
GET_TYPED_DATA(v, double, pCtx->inputType, pData);
|
||||
|
||||
|
@ -3240,8 +3302,6 @@ static void col_project_function(SQLFunctionCtx *pCtx) {
|
|||
pCtx->inputBytes);
|
||||
}
|
||||
}
|
||||
|
||||
pCtx->pOutput += pCtx->size * pCtx->outputBytes;
|
||||
}
|
||||
|
||||
static void col_project_function_f(SQLFunctionCtx *pCtx, int32_t index) {
|
||||
|
@ -3547,9 +3607,6 @@ static void diff_function(SQLFunctionCtx *pCtx) {
|
|||
int32_t forwardStep = (isFirstBlock) ? notNullElems - 1 : notNullElems;
|
||||
|
||||
GET_RES_INFO(pCtx)->numOfRes += forwardStep;
|
||||
|
||||
pCtx->pOutput += forwardStep * pCtx->outputBytes;
|
||||
pCtx->ptsOutputBuf = (char*)pCtx->ptsOutputBuf + forwardStep * TSDB_KEYSIZE;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3631,7 +3688,7 @@ char *getArithColumnData(void *param, const char* name, int32_t colId) {
|
|||
}
|
||||
}
|
||||
|
||||
assert(index >= 0 && colId >= 0);
|
||||
assert(index >= 0 /*&& colId >= 0*/);
|
||||
return pSupport->data[index] + pSupport->offset * pSupport->colList[index].bytes;
|
||||
}
|
||||
|
||||
|
@ -3640,7 +3697,6 @@ static void arithmetic_function(SQLFunctionCtx *pCtx) {
|
|||
SArithmeticSupport *sas = (SArithmeticSupport *)pCtx->param[1].pz;
|
||||
|
||||
arithmeticTreeTraverse(sas->pArithExpr->pExpr, pCtx->size, pCtx->pOutput, sas, pCtx->order, getArithColumnData);
|
||||
pCtx->pOutput += pCtx->outputBytes * pCtx->size;
|
||||
}
|
||||
|
||||
static void arithmetic_function_f(SQLFunctionCtx *pCtx, int32_t index) {
|
||||
|
@ -4171,50 +4227,88 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) {
|
|||
}
|
||||
|
||||
if (pCtx->inputType == TSDB_DATA_TYPE_TIMESTAMP) {
|
||||
*(TSKEY *) pCtx->pOutput = pCtx->startTs;
|
||||
*(TSKEY *)pCtx->pOutput = pCtx->startTs;
|
||||
} else if (type == TSDB_FILL_NULL) {
|
||||
setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes);
|
||||
} else if (type == TSDB_FILL_SET_VALUE) {
|
||||
tVariantDump(&pCtx->param[1], pCtx->pOutput, pCtx->inputType, true);
|
||||
} else {
|
||||
if (pCtx->start.key == INT64_MIN) {
|
||||
assert(pCtx->end.key == INT64_MIN);
|
||||
return;
|
||||
}
|
||||
|
||||
if (type == TSDB_FILL_NULL) {
|
||||
setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes);
|
||||
} else if (type == TSDB_FILL_SET_VALUE) {
|
||||
tVariantDump(&pCtx->param[1], pCtx->pOutput, pCtx->inputType, true);
|
||||
} else if (type == TSDB_FILL_PREV) {
|
||||
if (IS_NUMERIC_TYPE(pCtx->inputType) || pCtx->inputType == TSDB_DATA_TYPE_BOOL) {
|
||||
SET_TYPED_DATA(pCtx->pOutput, pCtx->inputType, pCtx->start.val);
|
||||
} else {
|
||||
assignVal(pCtx->pOutput, pCtx->start.ptr, pCtx->outputBytes, pCtx->inputType);
|
||||
}
|
||||
} else if (type == TSDB_FILL_NEXT) {
|
||||
if (IS_NUMERIC_TYPE(pCtx->inputType) || pCtx->inputType == TSDB_DATA_TYPE_BOOL) {
|
||||
SET_TYPED_DATA(pCtx->pOutput, pCtx->inputType, pCtx->end.val);
|
||||
} else {
|
||||
assignVal(pCtx->pOutput, pCtx->end.ptr, pCtx->outputBytes, pCtx->inputType);
|
||||
}
|
||||
} else if (type == TSDB_FILL_LINEAR) {
|
||||
SPoint point1 = {.key = pCtx->start.key, .val = &pCtx->start.val};
|
||||
SPoint point2 = {.key = pCtx->end.key, .val = &pCtx->end.val};
|
||||
SPoint point = {.key = pCtx->startTs, .val = pCtx->pOutput};
|
||||
|
||||
int32_t srcType = pCtx->inputType;
|
||||
if (IS_NUMERIC_TYPE(srcType)) { // TODO should find the not null data?
|
||||
if (isNull((char *)&pCtx->start.val, srcType) || isNull((char *)&pCtx->end.val, srcType)) {
|
||||
setNull(pCtx->pOutput, srcType, pCtx->inputBytes);
|
||||
if (pCtx->start.key != INT64_MIN && pCtx->start.key < pCtx->startTs && pCtx->end.key > pCtx->startTs) {
|
||||
if (type == TSDB_FILL_PREV) {
|
||||
if (IS_NUMERIC_TYPE(pCtx->inputType) || pCtx->inputType == TSDB_DATA_TYPE_BOOL) {
|
||||
SET_TYPED_DATA(pCtx->pOutput, pCtx->inputType, pCtx->start.val);
|
||||
} else {
|
||||
taosGetLinearInterpolationVal(&point, pCtx->outputType, &point1, &point2, TSDB_DATA_TYPE_DOUBLE);
|
||||
assignVal(pCtx->pOutput, pCtx->start.ptr, pCtx->outputBytes, pCtx->inputType);
|
||||
}
|
||||
} else if (type == TSDB_FILL_NEXT) {
|
||||
if (IS_NUMERIC_TYPE(pCtx->inputType) || pCtx->inputType == TSDB_DATA_TYPE_BOOL) {
|
||||
SET_TYPED_DATA(pCtx->pOutput, pCtx->inputType, pCtx->end.val);
|
||||
} else {
|
||||
assignVal(pCtx->pOutput, pCtx->end.ptr, pCtx->outputBytes, pCtx->inputType);
|
||||
}
|
||||
} else if (type == TSDB_FILL_LINEAR) {
|
||||
SPoint point1 = {.key = pCtx->start.key, .val = &pCtx->start.val};
|
||||
SPoint point2 = {.key = pCtx->end.key, .val = &pCtx->end.val};
|
||||
SPoint point = {.key = pCtx->startTs, .val = pCtx->pOutput};
|
||||
|
||||
int32_t srcType = pCtx->inputType;
|
||||
if (IS_NUMERIC_TYPE(srcType)) { // TODO should find the not null data?
|
||||
if (isNull((char *)&pCtx->start.val, srcType) || isNull((char *)&pCtx->end.val, srcType)) {
|
||||
setNull(pCtx->pOutput, srcType, pCtx->inputBytes);
|
||||
} else {
|
||||
taosGetLinearInterpolationVal(&point, pCtx->outputType, &point1, &point2, TSDB_DATA_TYPE_DOUBLE);
|
||||
}
|
||||
} else {
|
||||
setNull(pCtx->pOutput, srcType, pCtx->inputBytes);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// no data generated yet
|
||||
if (pCtx->size == 1) {
|
||||
return;
|
||||
}
|
||||
|
||||
// check the timestamp in input buffer
|
||||
TSKEY skey = GET_TS_DATA(pCtx, 0);
|
||||
TSKEY ekey = GET_TS_DATA(pCtx, 1);
|
||||
|
||||
// no data generated yet
|
||||
if (!(skey < pCtx->startTs && ekey > pCtx->startTs)) {
|
||||
return;
|
||||
}
|
||||
|
||||
assert(pCtx->start.key == INT64_MIN && skey < pCtx->startTs && ekey > pCtx->startTs);
|
||||
|
||||
if (type == TSDB_FILL_PREV) {
|
||||
assignVal(pCtx->pOutput, pCtx->pInput, pCtx->outputBytes, pCtx->inputType);
|
||||
} else if (type == TSDB_FILL_NEXT) {
|
||||
char* val = ((char*)pCtx->pInput) + pCtx->inputBytes;
|
||||
assignVal(pCtx->pOutput, val, pCtx->outputBytes, pCtx->inputType);
|
||||
} else if (type == TSDB_FILL_LINEAR) {
|
||||
char *start = GET_INPUT_DATA(pCtx, 0);
|
||||
char *end = GET_INPUT_DATA(pCtx, 1);
|
||||
|
||||
SPoint point1 = {.key = skey, .val = start};
|
||||
SPoint point2 = {.key = ekey, .val = end};
|
||||
SPoint point = {.key = pCtx->startTs, .val = pCtx->pOutput};
|
||||
|
||||
int32_t srcType = pCtx->inputType;
|
||||
if (IS_NUMERIC_TYPE(srcType)) { // TODO should find the not null data?
|
||||
if (isNull(start, srcType) || isNull(end, srcType)) {
|
||||
setNull(pCtx->pOutput, srcType, pCtx->inputBytes);
|
||||
} else {
|
||||
taosGetLinearInterpolationVal(&point, pCtx->outputType, &point1, &point2, srcType);
|
||||
}
|
||||
} else {
|
||||
setNull(pCtx->pOutput, srcType, pCtx->inputBytes);
|
||||
}
|
||||
} else {
|
||||
setNull(pCtx->pOutput, srcType, pCtx->inputBytes);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
SET_VAL(pCtx, 1, 1);
|
||||
|
||||
}
|
||||
|
||||
static void interp_function(SQLFunctionCtx *pCtx) {
|
||||
// at this point, the value is existed, return directly
|
||||
if (pCtx->size > 0) {
|
||||
|
@ -4289,11 +4383,22 @@ static void ts_comp_finalize(SQLFunctionCtx *pCtx) {
|
|||
STSBuf * pTSbuf = pInfo->pTSBuf;
|
||||
|
||||
tsBufFlush(pTSbuf);
|
||||
|
||||
qDebug("total timestamp :%"PRId64, pTSbuf->numOfTotal);
|
||||
|
||||
// TODO refactor transfer ownership of current file
|
||||
*(FILE **)pCtx->pOutput = pTSbuf->f;
|
||||
|
||||
pResInfo->complete = true;
|
||||
|
||||
// get the file size
|
||||
struct stat fStat;
|
||||
if ((fstat(fileno(pTSbuf->f), &fStat) == 0)) {
|
||||
pResInfo->numOfRes = fStat.st_size;
|
||||
}
|
||||
|
||||
pTSbuf->remainOpen = true;
|
||||
tsBufDestroy(pTSbuf);
|
||||
|
||||
doFinalizer(pCtx);
|
||||
}
|
||||
|
||||
|
@ -4637,10 +4742,126 @@ static void sumrate_finalizer(SQLFunctionCtx *pCtx) {
|
|||
doFinalizer(pCtx);
|
||||
}
|
||||
|
||||
void blockInfo_func(SQLFunctionCtx* pCtx) {
|
||||
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
|
||||
STableBlockDist* pDist = (STableBlockDist*) GET_ROWCELL_INTERBUF(pResInfo);
|
||||
|
||||
int32_t len = *(int32_t*) pCtx->pInput;
|
||||
blockDistInfoFromBinary((char*)pCtx->pInput + sizeof(int32_t), len, pDist);
|
||||
pDist->rowSize = (int16_t) pCtx->param[0].i64;
|
||||
|
||||
memcpy(pCtx->pOutput, pCtx->pInput, sizeof(int32_t) + len);
|
||||
|
||||
pResInfo->numOfRes = 1;
|
||||
pResInfo->hasResult = DATA_SET_FLAG;
|
||||
}
|
||||
|
||||
static void mergeTableBlockDist(STableBlockDist* pDist, const STableBlockDist* pSrc) {
|
||||
assert(pDist != NULL && pSrc != NULL);
|
||||
pDist->numOfTables += pSrc->numOfTables;
|
||||
pDist->numOfRowsInMemTable += pSrc->numOfRowsInMemTable;
|
||||
pDist->numOfFiles += pSrc->numOfFiles;
|
||||
pDist->totalSize += pSrc->totalSize;
|
||||
|
||||
if (pDist->dataBlockInfos == NULL) {
|
||||
pDist->dataBlockInfos = taosArrayInit(4, sizeof(SFileBlockInfo));
|
||||
}
|
||||
|
||||
taosArrayPushBatch(pDist->dataBlockInfos, pSrc->dataBlockInfos->pData, (int32_t) taosArrayGetSize(pSrc->dataBlockInfos));
|
||||
}
|
||||
|
||||
void block_func_merge(SQLFunctionCtx* pCtx) {
|
||||
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
|
||||
|
||||
STableBlockDist* pDist = (STableBlockDist*) GET_ROWCELL_INTERBUF(pResInfo);
|
||||
STableBlockDist info = {0};
|
||||
|
||||
int32_t len = *(int32_t*) pCtx->pInput;
|
||||
blockDistInfoFromBinary(((char*)pCtx->pInput) + sizeof(int32_t), len, &info);
|
||||
|
||||
mergeTableBlockDist(pDist, &info);
|
||||
}
|
||||
|
||||
static int32_t doGetPercentile(const SArray* pArray, double rate) {
|
||||
int32_t len = (int32_t)taosArrayGetSize(pArray);
|
||||
if (len <= 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
assert(rate >= 0 && rate <= 1.0);
|
||||
int idx = (int32_t)((len - 1) * rate);
|
||||
|
||||
return ((SFileBlockInfo *)(taosArrayGet(pArray, idx)))->numOfRows;
|
||||
}
|
||||
|
||||
static int compareBlockInfo(const void *pLeft, const void *pRight) {
|
||||
int32_t left = ((SFileBlockInfo *)pLeft)->numOfRows;
|
||||
int32_t right = ((SFileBlockInfo *)pRight)->numOfRows;
|
||||
|
||||
if (left > right) return 1;
|
||||
if (left < right) return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void generateBlockDistResult(STableBlockDist *pTableBlockDist, char* result) {
|
||||
if (pTableBlockDist == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
int64_t min = INT64_MAX, max = INT64_MIN, avg = 0;
|
||||
SArray* blockInfos= pTableBlockDist->dataBlockInfos;
|
||||
int64_t totalRows = 0, totalBlocks = taosArrayGetSize(blockInfos);
|
||||
|
||||
for (size_t i = 0; i < taosArrayGetSize(blockInfos); i++) {
|
||||
SFileBlockInfo *blockInfo = taosArrayGet(blockInfos, i);
|
||||
int64_t rows = blockInfo->numOfRows;
|
||||
|
||||
min = MIN(min, rows);
|
||||
max = MAX(max, rows);
|
||||
totalRows += rows;
|
||||
}
|
||||
|
||||
avg = totalBlocks > 0 ? (int64_t)(totalRows/totalBlocks) : 0;
|
||||
taosArraySort(blockInfos, compareBlockInfo);
|
||||
|
||||
uint64_t totalLen = pTableBlockDist->totalSize;
|
||||
int32_t rowSize = pTableBlockDist->rowSize;
|
||||
|
||||
int sz = sprintf(result + VARSTR_HEADER_SIZE,
|
||||
"summary: \n\t "
|
||||
"5th=[%d], 10th=[%d], 20th=[%d], 30th=[%d], 40th=[%d], 50th=[%d]\n\t "
|
||||
"60th=[%d], 70th=[%d], 80th=[%d], 90th=[%d], 95th=[%d], 99th=[%d]\n\t "
|
||||
"Min=[%"PRId64"(Rows)] Max=[%"PRId64"(Rows)] Avg=[%"PRId64"(Rows)] Stddev=[%.2f] \n\t "
|
||||
"Rows=[%"PRId64"], Blocks=[%"PRId64"], Size=[%.3f(Kb)] Comp=[%.2f%%]\n\t "
|
||||
"RowsInMem=[%d] \n\t SeekHeaderTime=[%d(us)]",
|
||||
doGetPercentile(blockInfos, 0.05), doGetPercentile(blockInfos, 0.10),
|
||||
doGetPercentile(blockInfos, 0.20), doGetPercentile(blockInfos, 0.30),
|
||||
doGetPercentile(blockInfos, 0.40), doGetPercentile(blockInfos, 0.50),
|
||||
doGetPercentile(blockInfos, 0.60), doGetPercentile(blockInfos, 0.70),
|
||||
doGetPercentile(blockInfos, 0.80), doGetPercentile(blockInfos, 0.90),
|
||||
doGetPercentile(blockInfos, 0.95), doGetPercentile(blockInfos, 0.99),
|
||||
min, max, avg, 0.0,
|
||||
totalRows, totalBlocks, totalLen/1024.0, (double)(totalLen*100.0)/(rowSize*totalRows),
|
||||
pTableBlockDist->numOfRowsInMemTable, pTableBlockDist->firstSeekTimeUs);
|
||||
varDataSetLen(result, sz);
|
||||
UNUSED(sz);
|
||||
}
|
||||
|
||||
void blockinfo_func_finalizer(SQLFunctionCtx* pCtx) {
|
||||
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
|
||||
STableBlockDist* pDist = (STableBlockDist*) GET_ROWCELL_INTERBUF(pResInfo);
|
||||
|
||||
pDist->rowSize = (int16_t)pCtx->param[0].i64;
|
||||
generateBlockDistResult(pDist, pCtx->pOutput);
|
||||
|
||||
// cannot set the numOfIteratedElems again since it is set during previous iteration
|
||||
pResInfo->numOfRes = 1;
|
||||
pResInfo->hasResult = DATA_SET_FLAG;
|
||||
|
||||
doFinalizer(pCtx);
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
|
||||
/*
|
||||
* function compatible list.
|
||||
* tag and ts are not involved in the compatibility check
|
||||
|
@ -4659,8 +4880,8 @@ int32_t functionCompatList[] = {
|
|||
4, -1, -1, 1, 1, 1, 1, 1, 1, -1,
|
||||
// tag, colprj, tagprj, arithmetic, diff, first_dist, last_dist, interp rate irate
|
||||
1, 1, 1, 1, -1, 1, 1, 5, 1, 1,
|
||||
// sum_rate, sum_irate, avg_rate, avg_irate
|
||||
1, 1, 1, 1,
|
||||
// sum_rate, sum_irate, avg_rate, avg_irate, tid_tag, blk_info
|
||||
1, 1, 1, 1, 6, 7
|
||||
};
|
||||
|
||||
SAggFunctionInfo aAggs[] = {{
|
||||
|
@ -4675,7 +4896,7 @@ SAggFunctionInfo aAggs[] = {{
|
|||
no_next_step,
|
||||
doFinalizer,
|
||||
count_func_merge,
|
||||
count_load_data_info,
|
||||
countRequired,
|
||||
},
|
||||
{
|
||||
// 1
|
||||
|
@ -4860,7 +5081,7 @@ SAggFunctionInfo aAggs[] = {{
|
|||
no_next_step,
|
||||
spread_function_finalizer,
|
||||
spread_func_merge,
|
||||
count_load_data_info,
|
||||
countRequired,
|
||||
},
|
||||
{
|
||||
// 14
|
||||
|
@ -4902,7 +5123,7 @@ SAggFunctionInfo aAggs[] = {{
|
|||
no_next_step,
|
||||
doFinalizer,
|
||||
copy_function,
|
||||
no_data_info,
|
||||
noDataRequired,
|
||||
},
|
||||
{
|
||||
// 17
|
||||
|
@ -4930,7 +5151,7 @@ SAggFunctionInfo aAggs[] = {{
|
|||
no_next_step,
|
||||
doFinalizer,
|
||||
copy_function,
|
||||
no_data_info,
|
||||
noDataRequired,
|
||||
},
|
||||
{
|
||||
// 19
|
||||
|
@ -4958,7 +5179,7 @@ SAggFunctionInfo aAggs[] = {{
|
|||
no_next_step,
|
||||
doFinalizer,
|
||||
copy_function,
|
||||
no_data_info,
|
||||
noDataRequired,
|
||||
},
|
||||
{
|
||||
// 21, column project sql function
|
||||
|
@ -4986,7 +5207,7 @@ SAggFunctionInfo aAggs[] = {{
|
|||
no_next_step,
|
||||
doFinalizer,
|
||||
copy_function,
|
||||
no_data_info,
|
||||
noDataRequired,
|
||||
},
|
||||
{
|
||||
// 23
|
||||
|
@ -5159,7 +5380,7 @@ SAggFunctionInfo aAggs[] = {{
|
|||
},
|
||||
{
|
||||
// 35
|
||||
"tid_tag", // return table id and the corresponding tags for join match and subscribe
|
||||
"tbid", // return table id and the corresponding tags for join match and subscribe
|
||||
TSDB_FUNC_TID_TAG,
|
||||
TSDB_FUNC_TID_TAG,
|
||||
TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE,
|
||||
|
@ -5170,4 +5391,18 @@ SAggFunctionInfo aAggs[] = {{
|
|||
noop1,
|
||||
noop1,
|
||||
dataBlockRequired,
|
||||
} };
|
||||
},
|
||||
{
|
||||
// 35
|
||||
"_block_dist", // return table id and the corresponding tags for join match and subscribe
|
||||
TSDB_FUNC_BLKINFO,
|
||||
TSDB_FUNC_BLKINFO,
|
||||
TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STABLE,
|
||||
function_setup,
|
||||
blockInfo_func,
|
||||
noop2,
|
||||
no_next_step,
|
||||
blockinfo_func_finalizer,
|
||||
block_func_merge,
|
||||
dataBlockRequired,
|
||||
}};
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -23,18 +23,19 @@
|
|||
#include "qFill.h"
|
||||
#include "qExtbuffer.h"
|
||||
#include "queryLog.h"
|
||||
#include "qExecutor.h"
|
||||
|
||||
#define FILL_IS_ASC_FILL(_f) ((_f)->order == TSDB_ORDER_ASC)
|
||||
#define DO_INTERPOLATION(_v1, _v2, _k1, _k2, _k) ((_v1) + ((_v2) - (_v1)) * (((double)(_k)) - ((double)(_k1))) / (((double)(_k2)) - ((double)(_k1))))
|
||||
|
||||
static void setTagsValue(SFillInfo* pFillInfo, tFilePage** data, int32_t genRows) {
|
||||
static void setTagsValue(SFillInfo* pFillInfo, void** data, int32_t genRows) {
|
||||
for(int32_t j = 0; j < pFillInfo->numOfCols; ++j) {
|
||||
SFillColInfo* pCol = &pFillInfo->pFillCol[j];
|
||||
if (TSDB_COL_IS_NORMAL_COL(pCol->flag)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
char* val1 = elePtrAt(data[j]->data, pCol->col.bytes, genRows);
|
||||
char* val1 = elePtrAt(data[j], pCol->col.bytes, genRows);
|
||||
|
||||
assert(pCol->tagIndex >= 0 && pCol->tagIndex < pFillInfo->numOfTags);
|
||||
SFillTagColInfo* pTag = &pFillInfo->pTags[pCol->tagIndex];
|
||||
|
@ -44,17 +45,17 @@ static void setTagsValue(SFillInfo* pFillInfo, tFilePage** data, int32_t genRows
|
|||
}
|
||||
}
|
||||
|
||||
static void setNullValueForRow(SFillInfo* pFillInfo, tFilePage** data, int32_t numOfCol, int32_t rowIndex) {
|
||||
static void setNullValueForRow(SFillInfo* pFillInfo, void** data, int32_t numOfCol, int32_t rowIndex) {
|
||||
// the first are always the timestamp column, so start from the second column.
|
||||
for (int32_t i = 1; i < numOfCol; ++i) {
|
||||
SFillColInfo* pCol = &pFillInfo->pFillCol[i];
|
||||
|
||||
char* output = elePtrAt(data[i]->data, pCol->col.bytes, rowIndex);
|
||||
char* output = elePtrAt(data[i], pCol->col.bytes, rowIndex);
|
||||
setNull(output, pCol->col.type, pCol->col.bytes);
|
||||
}
|
||||
}
|
||||
|
||||
static void doFillOneRowResult(SFillInfo* pFillInfo, tFilePage** data, char** srcData, int64_t ts, bool outOfBound) {
|
||||
static void doFillOneRowResult(SFillInfo* pFillInfo, void** data, char** srcData, int64_t ts, bool outOfBound) {
|
||||
char* prev = pFillInfo->prevValues;
|
||||
char* next = pFillInfo->nextValues;
|
||||
|
||||
|
@ -63,7 +64,7 @@ static void doFillOneRowResult(SFillInfo* pFillInfo, tFilePage** data, char** sr
|
|||
|
||||
// set the primary timestamp column value
|
||||
int32_t index = pFillInfo->numOfCurrent;
|
||||
char* val = elePtrAt(data[0]->data, TSDB_KEYSIZE, index);
|
||||
char* val = elePtrAt(data[0], TSDB_KEYSIZE, index);
|
||||
*(TSKEY*) val = pFillInfo->currentKey;
|
||||
|
||||
// set the other values
|
||||
|
@ -77,7 +78,7 @@ static void doFillOneRowResult(SFillInfo* pFillInfo, tFilePage** data, char** sr
|
|||
continue;
|
||||
}
|
||||
|
||||
char* output = elePtrAt(data[i]->data, pCol->col.bytes, index);
|
||||
char* output = elePtrAt(data[i], pCol->col.bytes, index);
|
||||
assignVal(output, p + pCol->col.offset, pCol->col.bytes, pCol->col.type);
|
||||
}
|
||||
} else { // no prev value yet, set the value for NULL
|
||||
|
@ -93,7 +94,7 @@ static void doFillOneRowResult(SFillInfo* pFillInfo, tFilePage** data, char** sr
|
|||
continue;
|
||||
}
|
||||
|
||||
char* output = elePtrAt(data[i]->data, pCol->col.bytes, index);
|
||||
char* output = elePtrAt(data[i], pCol->col.bytes, index);
|
||||
assignVal(output, p + pCol->col.offset, pCol->col.bytes, pCol->col.type);
|
||||
}
|
||||
} else { // no prev value yet, set the value for NULL
|
||||
|
@ -111,7 +112,7 @@ static void doFillOneRowResult(SFillInfo* pFillInfo, tFilePage** data, char** sr
|
|||
int16_t type = pCol->col.type;
|
||||
int16_t bytes = pCol->col.bytes;
|
||||
|
||||
char *val1 = elePtrAt(data[i]->data, pCol->col.bytes, index);
|
||||
char *val1 = elePtrAt(data[i], pCol->col.bytes, index);
|
||||
if (type == TSDB_DATA_TYPE_BINARY|| type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BOOL) {
|
||||
setNull(val1, pCol->col.type, bytes);
|
||||
continue;
|
||||
|
@ -132,7 +133,7 @@ static void doFillOneRowResult(SFillInfo* pFillInfo, tFilePage** data, char** sr
|
|||
continue;
|
||||
}
|
||||
|
||||
char* val1 = elePtrAt(data[i]->data, pCol->col.bytes, index);
|
||||
char* val1 = elePtrAt(data[i], pCol->col.bytes, index);
|
||||
assignVal(val1, (char*)&pCol->fillVal.i, pCol->col.bytes, pCol->col.type);
|
||||
}
|
||||
}
|
||||
|
@ -162,7 +163,7 @@ static void copyCurrentRowIntoBuf(SFillInfo* pFillInfo, char** srcData, char* bu
|
|||
}
|
||||
}
|
||||
|
||||
static int32_t fillResultImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t outputRows) {
|
||||
static int32_t fillResultImpl(SFillInfo* pFillInfo, void** data, int32_t outputRows) {
|
||||
pFillInfo->numOfCurrent = 0;
|
||||
|
||||
char** srcData = pFillInfo->pData;
|
||||
|
@ -213,7 +214,7 @@ static int32_t fillResultImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t ou
|
|||
continue;
|
||||
}
|
||||
|
||||
char* output = elePtrAt(data[i]->data, pCol->col.bytes, pFillInfo->numOfCurrent);
|
||||
char* output = elePtrAt(data[i], pCol->col.bytes, pFillInfo->numOfCurrent);
|
||||
char* src = elePtrAt(srcData[i], pCol->col.bytes, pFillInfo->index);
|
||||
|
||||
if (i == 0 || (pCol->functionId != TSDB_FUNC_COUNT && !isNull(src, pCol->col.type)) ||
|
||||
|
@ -255,7 +256,7 @@ static int32_t fillResultImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t ou
|
|||
return pFillInfo->numOfCurrent;
|
||||
}
|
||||
|
||||
static int64_t appendFilledResult(SFillInfo* pFillInfo, tFilePage** output, int64_t resultCapacity) {
|
||||
static int64_t appendFilledResult(SFillInfo* pFillInfo, void** output, int64_t resultCapacity) {
|
||||
/*
|
||||
* These data are generated according to fill strategy, since the current timestamp is out of the time window of
|
||||
* real result set. Note that we need to keep the direct previous result rows, to generated the filled data.
|
||||
|
@ -278,7 +279,7 @@ static int32_t setTagColumnInfo(SFillInfo* pFillInfo, int32_t numOfCols, int32_t
|
|||
int32_t k = 0;
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
SFillColInfo* pColInfo = &pFillInfo->pFillCol[i];
|
||||
pFillInfo->pData[i] = calloc(1, pColInfo->col.bytes * capacity);
|
||||
pFillInfo->pData[i] = NULL;
|
||||
|
||||
if (TSDB_COL_IS_TAG(pColInfo->flag)) {
|
||||
bool exists = false;
|
||||
|
@ -356,6 +357,10 @@ SFillInfo* taosCreateFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int3
|
|||
pFillInfo->rowSize = setTagColumnInfo(pFillInfo, pFillInfo->numOfCols, pFillInfo->alloc);
|
||||
assert(pFillInfo->rowSize > 0);
|
||||
|
||||
for(int32_t i = 0; i < pFillInfo->numOfCols; ++i) {
|
||||
pFillInfo->pData[i] = malloc(pFillInfo->pFillCol[i].col.bytes * pFillInfo->alloc);
|
||||
}
|
||||
|
||||
return pFillInfo;
|
||||
}
|
||||
|
||||
|
@ -375,11 +380,16 @@ void* taosDestroyFillInfo(SFillInfo* pFillInfo) {
|
|||
|
||||
tfree(pFillInfo->prevValues);
|
||||
tfree(pFillInfo->nextValues);
|
||||
tfree(pFillInfo->pTags);
|
||||
|
||||
|
||||
for(int32_t i = 0; i < pFillInfo->numOfTags; ++i) {
|
||||
tfree(pFillInfo->pTags[i].tagVal);
|
||||
}
|
||||
|
||||
for(int32_t i = 0; i < pFillInfo->numOfCols; ++i) {
|
||||
tfree(pFillInfo->pData[i]);
|
||||
}
|
||||
|
||||
tfree(pFillInfo->pTags);
|
||||
|
||||
tfree(pFillInfo->pData);
|
||||
tfree(pFillInfo->pFillCol);
|
||||
|
@ -413,10 +423,19 @@ void taosFillSetStartInfo(SFillInfo* pFillInfo, int32_t numOfRows, TSKEY endKey)
|
|||
}
|
||||
}
|
||||
|
||||
// copy the data into source data buffer
|
||||
void taosFillSetDataBlockFromFilePage(SFillInfo* pFillInfo, const tFilePage** pInput) {
|
||||
void taosFillSetInputDataBlock(SFillInfo* pFillInfo, const SSDataBlock* pInput) {
|
||||
for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) {
|
||||
memcpy(pFillInfo->pData[i], pInput[i]->data, pFillInfo->numOfRows * pFillInfo->pFillCol[i].col.bytes);
|
||||
SColumnInfoData* pColData = taosArrayGet(pInput->pDataBlock, i);
|
||||
// pFillInfo->pData[i] = pColData->pData;
|
||||
if (pInput->info.rows > pFillInfo->alloc) {
|
||||
char* t = realloc(pFillInfo->pData[i], pColData->info.bytes * pInput->info.rows);
|
||||
assert(t != NULL);
|
||||
|
||||
pFillInfo->pData[i] = t;
|
||||
pFillInfo->alloc = pInput->info.rows;
|
||||
}
|
||||
|
||||
memcpy(pFillInfo->pData[i], pColData->pData, pColData->info.bytes * pInput->info.rows);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -427,12 +446,20 @@ void taosFillCopyInputDataFromOneFilePage(SFillInfo* pFillInfo, const tFilePage*
|
|||
SFillColInfo* pCol = &pFillInfo->pFillCol[i];
|
||||
|
||||
const char* data = pInput->data + pCol->col.offset * pInput->num;
|
||||
memcpy(pFillInfo->pData[i], data, (size_t)(pInput->num * pCol->col.bytes));
|
||||
if (pInput->num > pFillInfo->alloc) {
|
||||
char* t = realloc(pFillInfo->pData[i], (size_t)(pCol->col.bytes * pInput->num));
|
||||
assert(t != NULL);
|
||||
|
||||
pFillInfo->pData[i] = t;
|
||||
pFillInfo->alloc = (int32_t)pInput->num;
|
||||
}
|
||||
|
||||
memcpy(pFillInfo->pData[i], data, (size_t)(pCol->col.bytes * pInput->num));
|
||||
|
||||
if (TSDB_COL_IS_TAG(pCol->flag)) { // copy the tag value to tag value buffer
|
||||
SFillTagColInfo* pTag = &pFillInfo->pTags[pCol->tagIndex];
|
||||
assert (pTag->col.colId == pCol->col.colId);
|
||||
memcpy(pTag->tagVal, data, pCol->col.bytes);
|
||||
memcpy(pTag->tagVal, data, pCol->col.bytes); // TODO not memcpy??
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -490,7 +517,7 @@ int32_t taosGetLinearInterpolationVal(SPoint* point, int32_t outputType, SPoint*
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int64_t taosFillResultDataBlock(SFillInfo* pFillInfo, tFilePage** output, int32_t capacity) {
|
||||
int64_t taosFillResultDataBlock(SFillInfo* pFillInfo, void** output, int32_t capacity) {
|
||||
int32_t remain = taosNumOfRemainRows(pFillInfo);
|
||||
|
||||
int64_t numOfRes = getNumOfResultsAfterFillGap(pFillInfo, pFillInfo->end, capacity);
|
||||
|
|
|
@ -135,28 +135,33 @@ tSQLExpr *tSqlExprIdValueCreate(SStrToken *pToken, int32_t optrType) {
|
|||
if (optrType == TK_INTEGER || optrType == TK_STRING || optrType == TK_FLOAT || optrType == TK_BOOL) {
|
||||
toTSDBType(pToken->type);
|
||||
|
||||
tVariantCreate(&pSqlExpr->val, pToken);
|
||||
pSqlExpr->nSQLOptr = optrType;
|
||||
tVariantCreate(&pSqlExpr->value, pToken);
|
||||
pSqlExpr->tokenId = optrType;
|
||||
pSqlExpr->type = SQL_NODE_VALUE;
|
||||
} else if (optrType == TK_NOW) {
|
||||
// use microsecond by default
|
||||
pSqlExpr->val.i64 = taosGetTimestamp(TSDB_TIME_PRECISION_MICRO);
|
||||
pSqlExpr->val.nType = TSDB_DATA_TYPE_BIGINT;
|
||||
pSqlExpr->nSQLOptr = TK_TIMESTAMP; // TK_TIMESTAMP used to denote the time value is in microsecond
|
||||
pSqlExpr->value.i64 = taosGetTimestamp(TSDB_TIME_PRECISION_MICRO);
|
||||
pSqlExpr->value.nType = TSDB_DATA_TYPE_BIGINT;
|
||||
pSqlExpr->tokenId = TK_TIMESTAMP; // TK_TIMESTAMP used to denote the time value is in microsecond
|
||||
pSqlExpr->type = SQL_NODE_VALUE;
|
||||
} else if (optrType == TK_VARIABLE) {
|
||||
int32_t ret = parseAbsoluteDuration(pToken->z, pToken->n, &pSqlExpr->val.i64);
|
||||
int32_t ret = parseAbsoluteDuration(pToken->z, pToken->n, &pSqlExpr->value.i64);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
terrno = TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
|
||||
}
|
||||
|
||||
pSqlExpr->val.nType = TSDB_DATA_TYPE_BIGINT;
|
||||
pSqlExpr->nSQLOptr = TK_TIMESTAMP;
|
||||
} else { // it must be the column name (tk_id) if it is not the number
|
||||
pSqlExpr->value.nType = TSDB_DATA_TYPE_BIGINT;
|
||||
pSqlExpr->tokenId = TK_TIMESTAMP;
|
||||
pSqlExpr->type = SQL_NODE_VALUE;
|
||||
} else {
|
||||
// Here it must be the column name (tk_id) if it is not a number or string.
|
||||
assert(optrType == TK_ID || optrType == TK_ALL);
|
||||
if (pToken != NULL) {
|
||||
pSqlExpr->colInfo = *pToken;
|
||||
}
|
||||
|
||||
pSqlExpr->nSQLOptr = optrType;
|
||||
pSqlExpr->tokenId = optrType;
|
||||
pSqlExpr->type = SQL_NODE_TABLE_COLUMN;
|
||||
}
|
||||
|
||||
return pSqlExpr;
|
||||
|
@ -167,19 +172,22 @@ tSQLExpr *tSqlExprIdValueCreate(SStrToken *pToken, int32_t optrType) {
|
|||
* function name is denoted by pFunctionToken
|
||||
*/
|
||||
tSQLExpr *tSqlExprCreateFunction(tSQLExprList *pList, SStrToken *pFuncToken, SStrToken *endToken, int32_t optType) {
|
||||
if (pFuncToken == NULL) return NULL;
|
||||
if (pFuncToken == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
tSQLExpr *pExpr = calloc(1, sizeof(tSQLExpr));
|
||||
pExpr->nSQLOptr = optType;
|
||||
pExpr->pParam = pList;
|
||||
pExpr->tokenId = optType;
|
||||
pExpr->type = SQL_NODE_SQLFUNCTION;
|
||||
pExpr->pParam = pList;
|
||||
|
||||
int32_t len = (int32_t)((endToken->z + endToken->n) - pFuncToken->z);
|
||||
pExpr->operand.z = pFuncToken->z;
|
||||
pExpr->operand = (*pFuncToken);
|
||||
|
||||
pExpr->operand.n = len; // raw field name
|
||||
pExpr->operand.type = pFuncToken->type;
|
||||
pExpr->token.n = len;
|
||||
pExpr->token.z = pFuncToken->z;
|
||||
pExpr->token.type = pFuncToken->type;
|
||||
|
||||
pExpr->token = pExpr->operand;
|
||||
return pExpr;
|
||||
}
|
||||
|
||||
|
@ -190,6 +198,7 @@ tSQLExpr *tSqlExprCreateFunction(tSQLExprList *pList, SStrToken *pFuncToken, SSt
|
|||
tSQLExpr *tSqlExprCreate(tSQLExpr *pLeft, tSQLExpr *pRight, int32_t optrType) {
|
||||
tSQLExpr *pExpr = calloc(1, sizeof(tSQLExpr));
|
||||
|
||||
pExpr->type = SQL_NODE_EXPR;
|
||||
if (pLeft != NULL && pRight != NULL && (optrType != TK_IN)) {
|
||||
char* endPos = pRight->token.z + pRight->token.n;
|
||||
pExpr->token.z = pLeft->token.z;
|
||||
|
@ -203,32 +212,33 @@ tSQLExpr *tSqlExprCreate(tSQLExpr *pLeft, tSQLExpr *pRight, int32_t optrType) {
|
|||
* if a token is noted as the TK_TIMESTAMP, the time precision is microsecond
|
||||
* Otherwise, the time precision is adaptive, determined by the time precision from databases.
|
||||
*/
|
||||
if ((pLeft->nSQLOptr == TK_INTEGER && pRight->nSQLOptr == TK_INTEGER) ||
|
||||
(pLeft->nSQLOptr == TK_TIMESTAMP && pRight->nSQLOptr == TK_TIMESTAMP)) {
|
||||
pExpr->val.nType = TSDB_DATA_TYPE_BIGINT;
|
||||
pExpr->nSQLOptr = pLeft->nSQLOptr;
|
||||
if ((pLeft->tokenId == TK_INTEGER && pRight->tokenId == TK_INTEGER) ||
|
||||
(pLeft->tokenId == TK_TIMESTAMP && pRight->tokenId == TK_TIMESTAMP)) {
|
||||
pExpr->value.nType = TSDB_DATA_TYPE_BIGINT;
|
||||
pExpr->tokenId = pLeft->tokenId;
|
||||
pExpr->type = SQL_NODE_VALUE;
|
||||
|
||||
switch (optrType) {
|
||||
case TK_PLUS: {
|
||||
pExpr->val.i64 = pLeft->val.i64 + pRight->val.i64;
|
||||
pExpr->value.i64 = pLeft->value.i64 + pRight->value.i64;
|
||||
break;
|
||||
}
|
||||
case TK_MINUS: {
|
||||
pExpr->val.i64 = pLeft->val.i64 - pRight->val.i64;
|
||||
pExpr->value.i64 = pLeft->value.i64 - pRight->value.i64;
|
||||
break;
|
||||
}
|
||||
case TK_STAR: {
|
||||
pExpr->val.i64 = pLeft->val.i64 * pRight->val.i64;
|
||||
pExpr->value.i64 = pLeft->value.i64 * pRight->value.i64;
|
||||
break;
|
||||
}
|
||||
case TK_DIVIDE: {
|
||||
pExpr->nSQLOptr = TK_FLOAT;
|
||||
pExpr->val.nType = TSDB_DATA_TYPE_DOUBLE;
|
||||
pExpr->val.dKey = (double)pLeft->val.i64 / pRight->val.i64;
|
||||
pExpr->tokenId = TK_FLOAT;
|
||||
pExpr->value.nType = TSDB_DATA_TYPE_DOUBLE;
|
||||
pExpr->value.dKey = (double)pLeft->value.i64 / pRight->value.i64;
|
||||
break;
|
||||
}
|
||||
case TK_REM: {
|
||||
pExpr->val.i64 = pLeft->val.i64 % pRight->val.i64;
|
||||
pExpr->value.i64 = pLeft->value.i64 % pRight->value.i64;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -236,33 +246,35 @@ tSQLExpr *tSqlExprCreate(tSQLExpr *pLeft, tSQLExpr *pRight, int32_t optrType) {
|
|||
tSqlExprDestroy(pLeft);
|
||||
tSqlExprDestroy(pRight);
|
||||
|
||||
} else if ((pLeft->nSQLOptr == TK_FLOAT && pRight->nSQLOptr == TK_INTEGER) || (pLeft->nSQLOptr == TK_INTEGER && pRight->nSQLOptr == TK_FLOAT) ||
|
||||
(pLeft->nSQLOptr == TK_FLOAT && pRight->nSQLOptr == TK_FLOAT)) {
|
||||
pExpr->val.nType = TSDB_DATA_TYPE_DOUBLE;
|
||||
pExpr->nSQLOptr = TK_FLOAT;
|
||||
} else if ((pLeft->tokenId == TK_FLOAT && pRight->tokenId == TK_INTEGER) ||
|
||||
(pLeft->tokenId == TK_INTEGER && pRight->tokenId == TK_FLOAT) ||
|
||||
(pLeft->tokenId == TK_FLOAT && pRight->tokenId == TK_FLOAT)) {
|
||||
pExpr->value.nType = TSDB_DATA_TYPE_DOUBLE;
|
||||
pExpr->tokenId = TK_FLOAT;
|
||||
pExpr->type = SQL_NODE_VALUE;
|
||||
|
||||
double left = (pLeft->val.nType == TSDB_DATA_TYPE_DOUBLE) ? pLeft->val.dKey : pLeft->val.i64;
|
||||
double right = (pRight->val.nType == TSDB_DATA_TYPE_DOUBLE) ? pRight->val.dKey : pRight->val.i64;
|
||||
double left = (pLeft->value.nType == TSDB_DATA_TYPE_DOUBLE) ? pLeft->value.dKey : pLeft->value.i64;
|
||||
double right = (pRight->value.nType == TSDB_DATA_TYPE_DOUBLE) ? pRight->value.dKey : pRight->value.i64;
|
||||
|
||||
switch (optrType) {
|
||||
case TK_PLUS: {
|
||||
pExpr->val.dKey = left + right;
|
||||
pExpr->value.dKey = left + right;
|
||||
break;
|
||||
}
|
||||
case TK_MINUS: {
|
||||
pExpr->val.dKey = left - right;
|
||||
pExpr->value.dKey = left - right;
|
||||
break;
|
||||
}
|
||||
case TK_STAR: {
|
||||
pExpr->val.dKey = left * right;
|
||||
pExpr->value.dKey = left * right;
|
||||
break;
|
||||
}
|
||||
case TK_DIVIDE: {
|
||||
pExpr->val.dKey = left / right;
|
||||
pExpr->value.dKey = left / right;
|
||||
break;
|
||||
}
|
||||
case TK_REM: {
|
||||
pExpr->val.dKey = left - ((int64_t)(left / right)) * right;
|
||||
pExpr->value.dKey = left - ((int64_t)(left / right)) * right;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -271,21 +283,21 @@ tSQLExpr *tSqlExprCreate(tSQLExpr *pLeft, tSQLExpr *pRight, int32_t optrType) {
|
|||
tSqlExprDestroy(pRight);
|
||||
|
||||
} else {
|
||||
pExpr->nSQLOptr = optrType;
|
||||
pExpr->tokenId = optrType;
|
||||
pExpr->pLeft = pLeft;
|
||||
pExpr->pRight = pRight;
|
||||
}
|
||||
} else if (optrType == TK_IN) {
|
||||
pExpr->nSQLOptr = optrType;
|
||||
pExpr->tokenId = optrType;
|
||||
pExpr->pLeft = pLeft;
|
||||
|
||||
tSQLExpr *pRSub = calloc(1, sizeof(tSQLExpr));
|
||||
pRSub->nSQLOptr = TK_SET; // TODO refactor .....
|
||||
pRSub->tokenId = TK_SET; // TODO refactor .....
|
||||
pRSub->pParam = (tSQLExprList *)pRight;
|
||||
|
||||
pExpr->pRight = pRSub;
|
||||
} else {
|
||||
pExpr->nSQLOptr = optrType;
|
||||
pExpr->tokenId = optrType;
|
||||
pExpr->pLeft = pLeft;
|
||||
|
||||
if (pLeft != NULL && pRight == NULL) {
|
||||
|
@ -325,8 +337,8 @@ void tSqlExprNodeDestroy(tSQLExpr *pExpr) {
|
|||
return;
|
||||
}
|
||||
|
||||
if (pExpr->nSQLOptr == TK_STRING) {
|
||||
tVariantDestroy(&pExpr->val);
|
||||
if (pExpr->tokenId == TK_STRING) {
|
||||
tVariantDestroy(&pExpr->value);
|
||||
}
|
||||
|
||||
tSqlExprListDestroy(pExpr->pParam);
|
||||
|
@ -538,8 +550,8 @@ void tSqlSetColumnType(TAOS_FIELD *pField, SStrToken *type) {
|
|||
/*
|
||||
* extract the select info out of sql string
|
||||
*/
|
||||
SQuerySQL *tSetQuerySqlElems(SStrToken *pSelectToken, tSQLExprList *pSelection, SArray *pFrom, tSQLExpr *pWhere,
|
||||
SArray *pGroupby, SArray *pSortOrder, SIntervalVal *pInterval,
|
||||
SQuerySQL *tSetQuerySqlNode(SStrToken *pSelectToken, tSQLExprList *pSelection, SArray *pFrom, tSQLExpr *pWhere,
|
||||
SArray *pGroupby, SArray *pSortOrder, SIntervalVal *pInterval, SSessionWindowVal *pSession,
|
||||
SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit, SLimitVal *pGLimit) {
|
||||
assert(pSelection != NULL);
|
||||
|
||||
|
@ -562,14 +574,17 @@ SQuerySQL *tSetQuerySqlElems(SStrToken *pSelectToken, tSQLExprList *pSelection,
|
|||
}
|
||||
|
||||
if (pInterval != NULL) {
|
||||
pQuery->interval = pInterval->interval;
|
||||
pQuery->offset = pInterval->offset;
|
||||
pQuery->interval = *pInterval;
|
||||
}
|
||||
|
||||
if (pSliding != NULL) {
|
||||
pQuery->sliding = *pSliding;
|
||||
}
|
||||
|
||||
if (pSession != NULL) {
|
||||
pQuery->sessionVal = *pSession;
|
||||
}
|
||||
|
||||
pQuery->fillType = pFill;
|
||||
return pQuery;
|
||||
}
|
||||
|
|
|
@ -254,7 +254,7 @@ tMemBucket *tMemBucketCreate(int16_t nElemSize, int16_t dataType, double minval,
|
|||
|
||||
resetSlotInfo(pBucket);
|
||||
|
||||
int32_t ret = createDiskbasedResultBuffer(&pBucket->pBuffer, pBucket->bytes, pBucket->bufPageSize, pBucket->bufPageSize * 512, NULL);
|
||||
int32_t ret = createDiskbasedResultBuffer(&pBucket->pBuffer, pBucket->bufPageSize, pBucket->bufPageSize * 512, NULL);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
tMemBucketDestroy(pBucket);
|
||||
return NULL;
|
||||
|
|
|
@ -9,8 +9,7 @@
|
|||
#define GET_DATA_PAYLOAD(_p) ((char *)(_p)->pData + POINTER_BYTES)
|
||||
#define NO_IN_MEM_AVAILABLE_PAGES(_b) (listNEles((_b)->lruList) >= (_b)->inMemPages)
|
||||
|
||||
int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t rowSize, int32_t pagesize,
|
||||
int32_t inMemBufSize, const void* handle) {
|
||||
int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t pagesize, int32_t inMemBufSize, const void* handle) {
|
||||
*pResultBuf = calloc(1, sizeof(SDiskbasedResultBuf));
|
||||
|
||||
SDiskbasedResultBuf* pResBuf = *pResultBuf;
|
||||
|
@ -31,7 +30,6 @@ int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t ro
|
|||
// at least more than 2 pages must be in memory
|
||||
assert(inMemBufSize >= pagesize * 2);
|
||||
|
||||
pResBuf->numOfRowsPerPage = (pagesize - sizeof(tFilePage)) / rowSize;
|
||||
pResBuf->lruList = tdListNew(POINTER_BYTES);
|
||||
|
||||
// init id hash table
|
||||
|
@ -387,8 +385,6 @@ void releaseResBufPageInfo(SDiskbasedResultBuf* pResultBuf, SPageInfo* pi) {
|
|||
pResultBuf->statis.releasePages += 1;
|
||||
}
|
||||
|
||||
size_t getNumOfRowsPerPage(const SDiskbasedResultBuf* pResultBuf) { return pResultBuf->numOfRowsPerPage; }
|
||||
|
||||
size_t getNumOfResultBufGroupId(const SDiskbasedResultBuf* pResultBuf) { return taosHashGetSize(pResultBuf->groupSet); }
|
||||
|
||||
size_t getResBufSize(const SDiskbasedResultBuf* pResultBuf) { return (size_t)pResultBuf->totalBufSize; }
|
||||
|
|
|
@ -139,6 +139,7 @@ static SKeyword keywordTable[] = {
|
|||
{"FROM", TK_FROM},
|
||||
{"VARIABLE", TK_VARIABLE},
|
||||
{"INTERVAL", TK_INTERVAL},
|
||||
{"SESSION", TK_SESSION},
|
||||
{"FILL", TK_FILL},
|
||||
{"SLIDING", TK_SLIDING},
|
||||
{"ORDER", TK_ORDER},
|
||||
|
@ -200,25 +201,6 @@ static SKeyword keywordTable[] = {
|
|||
{"TRIGGER", TK_TRIGGER},
|
||||
{"VIEW", TK_VIEW},
|
||||
{"ALL", TK_ALL},
|
||||
{"COUNT", TK_COUNT},
|
||||
{"SUM", TK_SUM},
|
||||
{"AVG", TK_AVG},
|
||||
{"MIN", TK_MIN},
|
||||
{"MAX", TK_MAX},
|
||||
{"FIRST", TK_FIRST},
|
||||
{"LAST", TK_LAST},
|
||||
{"TOP", TK_TOP},
|
||||
{"BOTTOM", TK_BOTTOM},
|
||||
{"STDDEV", TK_STDDEV},
|
||||
{"PERCENTILE", TK_PERCENTILE},
|
||||
{"APERCENTILE", TK_APERCENTILE},
|
||||
{"LEASTSQUARES", TK_LEASTSQUARES},
|
||||
{"HISTOGRAM", TK_HISTOGRAM},
|
||||
{"DIFF", TK_DIFF},
|
||||
{"SPREAD", TK_SPREAD},
|
||||
{"TWA", TK_TWA},
|
||||
{"INTERP", TK_INTERP},
|
||||
{"LAST_ROW", TK_LAST_ROW},
|
||||
{"SEMI", TK_SEMI},
|
||||
{"NONE", TK_NONE},
|
||||
{"PREV", TK_PREV},
|
||||
|
@ -228,17 +210,10 @@ static SKeyword keywordTable[] = {
|
|||
{"TBNAME", TK_TBNAME},
|
||||
{"JOIN", TK_JOIN},
|
||||
{"METRICS", TK_METRICS},
|
||||
{"TBID", TK_TBID},
|
||||
{"STABLE", TK_STABLE},
|
||||
{"FILE", TK_FILE},
|
||||
{"VNODES", TK_VNODES},
|
||||
{"UNION", TK_UNION},
|
||||
{"RATE", TK_RATE},
|
||||
{"IRATE", TK_IRATE},
|
||||
{"SUM_RATE", TK_SUM_RATE},
|
||||
{"SUM_IRATE", TK_SUM_IRATE},
|
||||
{"AVG_RATE", TK_AVG_RATE},
|
||||
{"AVG_IRATE", TK_AVG_IRATE},
|
||||
{"CACHELAST", TK_CACHELAST},
|
||||
{"DISTINCT", TK_DISTINCT},
|
||||
{"PARTITIONS", TK_PARTITIONS},
|
||||
|
@ -300,7 +275,7 @@ int tSQLKeywordCode(const char* z, int n) {
|
|||
* Return the length of the token that begins at z[0].
|
||||
* Store the token type in *type before returning.
|
||||
*/
|
||||
uint32_t tSQLGetToken(char* z, uint32_t* tokenType) {
|
||||
uint32_t tSQLGetToken(char* z, uint32_t* tokenId) {
|
||||
uint32_t i;
|
||||
switch (*z) {
|
||||
case ' ':
|
||||
|
@ -310,121 +285,121 @@ uint32_t tSQLGetToken(char* z, uint32_t* tokenType) {
|
|||
case '\r': {
|
||||
for (i = 1; isspace(z[i]); i++) {
|
||||
}
|
||||
*tokenType = TK_SPACE;
|
||||
*tokenId = TK_SPACE;
|
||||
return i;
|
||||
}
|
||||
case ':': {
|
||||
*tokenType = TK_COLON;
|
||||
*tokenId = TK_COLON;
|
||||
return 1;
|
||||
}
|
||||
case '-': {
|
||||
if (z[1] == '-') {
|
||||
for (i = 2; z[i] && z[i] != '\n'; i++) {
|
||||
}
|
||||
*tokenType = TK_COMMENT;
|
||||
*tokenId = TK_COMMENT;
|
||||
return i;
|
||||
}
|
||||
*tokenType = TK_MINUS;
|
||||
*tokenId = TK_MINUS;
|
||||
return 1;
|
||||
}
|
||||
case '(': {
|
||||
*tokenType = TK_LP;
|
||||
*tokenId = TK_LP;
|
||||
return 1;
|
||||
}
|
||||
case ')': {
|
||||
*tokenType = TK_RP;
|
||||
*tokenId = TK_RP;
|
||||
return 1;
|
||||
}
|
||||
case ';': {
|
||||
*tokenType = TK_SEMI;
|
||||
*tokenId = TK_SEMI;
|
||||
return 1;
|
||||
}
|
||||
case '+': {
|
||||
*tokenType = TK_PLUS;
|
||||
*tokenId = TK_PLUS;
|
||||
return 1;
|
||||
}
|
||||
case '*': {
|
||||
*tokenType = TK_STAR;
|
||||
*tokenId = TK_STAR;
|
||||
return 1;
|
||||
}
|
||||
case '/': {
|
||||
if (z[1] != '*' || z[2] == 0) {
|
||||
*tokenType = TK_SLASH;
|
||||
*tokenId = TK_SLASH;
|
||||
return 1;
|
||||
}
|
||||
for (i = 3; z[i] && (z[i] != '/' || z[i - 1] != '*'); i++) {
|
||||
}
|
||||
if (z[i]) i++;
|
||||
*tokenType = TK_COMMENT;
|
||||
*tokenId = TK_COMMENT;
|
||||
return i;
|
||||
}
|
||||
case '%': {
|
||||
*tokenType = TK_REM;
|
||||
*tokenId = TK_REM;
|
||||
return 1;
|
||||
}
|
||||
case '=': {
|
||||
*tokenType = TK_EQ;
|
||||
*tokenId = TK_EQ;
|
||||
return 1 + (z[1] == '=');
|
||||
}
|
||||
case '<': {
|
||||
if (z[1] == '=') {
|
||||
*tokenType = TK_LE;
|
||||
*tokenId = TK_LE;
|
||||
return 2;
|
||||
} else if (z[1] == '>') {
|
||||
*tokenType = TK_NE;
|
||||
*tokenId = TK_NE;
|
||||
return 2;
|
||||
} else if (z[1] == '<') {
|
||||
*tokenType = TK_LSHIFT;
|
||||
*tokenId = TK_LSHIFT;
|
||||
return 2;
|
||||
} else {
|
||||
*tokenType = TK_LT;
|
||||
*tokenId = TK_LT;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
case '>': {
|
||||
if (z[1] == '=') {
|
||||
*tokenType = TK_GE;
|
||||
*tokenId = TK_GE;
|
||||
return 2;
|
||||
} else if (z[1] == '>') {
|
||||
*tokenType = TK_RSHIFT;
|
||||
*tokenId = TK_RSHIFT;
|
||||
return 2;
|
||||
} else {
|
||||
*tokenType = TK_GT;
|
||||
*tokenId = TK_GT;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
case '!': {
|
||||
if (z[1] != '=') {
|
||||
*tokenType = TK_ILLEGAL;
|
||||
*tokenId = TK_ILLEGAL;
|
||||
return 2;
|
||||
} else {
|
||||
*tokenType = TK_NE;
|
||||
*tokenId = TK_NE;
|
||||
return 2;
|
||||
}
|
||||
}
|
||||
case '|': {
|
||||
if (z[1] != '|') {
|
||||
*tokenType = TK_BITOR;
|
||||
*tokenId = TK_BITOR;
|
||||
return 1;
|
||||
} else {
|
||||
*tokenType = TK_CONCAT;
|
||||
*tokenId = TK_CONCAT;
|
||||
return 2;
|
||||
}
|
||||
}
|
||||
case ',': {
|
||||
*tokenType = TK_COMMA;
|
||||
*tokenId = TK_COMMA;
|
||||
return 1;
|
||||
}
|
||||
case '&': {
|
||||
*tokenType = TK_BITAND;
|
||||
*tokenId = TK_BITAND;
|
||||
return 1;
|
||||
}
|
||||
case '~': {
|
||||
*tokenType = TK_BITNOT;
|
||||
*tokenId = TK_BITNOT;
|
||||
return 1;
|
||||
}
|
||||
case '?': {
|
||||
*tokenType = TK_QUESTION;
|
||||
*tokenId = TK_QUESTION;
|
||||
return 1;
|
||||
}
|
||||
case '\'':
|
||||
|
@ -450,7 +425,7 @@ uint32_t tSQLGetToken(char* z, uint32_t* tokenType) {
|
|||
if (z[i]) i++;
|
||||
|
||||
if (strEnd) {
|
||||
*tokenType = TK_STRING;
|
||||
*tokenId = TK_STRING;
|
||||
return i;
|
||||
}
|
||||
|
||||
|
@ -474,10 +449,10 @@ uint32_t tSQLGetToken(char* z, uint32_t* tokenType) {
|
|||
}
|
||||
}
|
||||
|
||||
*tokenType = TK_FLOAT;
|
||||
*tokenId = TK_FLOAT;
|
||||
return i;
|
||||
} else {
|
||||
*tokenType = TK_DOT;
|
||||
*tokenId = TK_DOT;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
@ -486,7 +461,7 @@ uint32_t tSQLGetToken(char* z, uint32_t* tokenType) {
|
|||
char next = z[1];
|
||||
|
||||
if (next == 'b') { // bin number
|
||||
*tokenType = TK_BIN;
|
||||
*tokenId = TK_BIN;
|
||||
for (i = 2; (z[i] == '0' || z[i] == '1'); ++i) {
|
||||
}
|
||||
|
||||
|
@ -496,7 +471,7 @@ uint32_t tSQLGetToken(char* z, uint32_t* tokenType) {
|
|||
|
||||
return i;
|
||||
} else if (next == 'x') { //hex number
|
||||
*tokenType = TK_HEX;
|
||||
*tokenId = TK_HEX;
|
||||
for (i = 2; isdigit(z[i]) || (z[i] >= 'a' && z[i] <= 'f') || (z[i] >= 'A' && z[i] <= 'F'); ++i) {
|
||||
}
|
||||
|
||||
|
@ -516,7 +491,7 @@ uint32_t tSQLGetToken(char* z, uint32_t* tokenType) {
|
|||
case '7':
|
||||
case '8':
|
||||
case '9': {
|
||||
*tokenType = TK_INTEGER;
|
||||
*tokenId = TK_INTEGER;
|
||||
for (i = 1; isdigit(z[i]); i++) {
|
||||
}
|
||||
|
||||
|
@ -526,7 +501,7 @@ uint32_t tSQLGetToken(char* z, uint32_t* tokenType) {
|
|||
z[i] == 'U' || z[i] == 'A' || z[i] == 'S' || z[i] == 'M' || z[i] == 'H' || z[i] == 'D' || z[i] == 'N' ||
|
||||
z[i] == 'Y' || z[i] == 'W') &&
|
||||
(isIdChar[(uint8_t)z[i + 1]] == 0)) {
|
||||
*tokenType = TK_VARIABLE;
|
||||
*tokenId = TK_VARIABLE;
|
||||
i += 1;
|
||||
return i;
|
||||
}
|
||||
|
@ -537,12 +512,12 @@ uint32_t tSQLGetToken(char* z, uint32_t* tokenType) {
|
|||
while (isdigit(z[i])) {
|
||||
i++;
|
||||
}
|
||||
*tokenType = TK_FLOAT;
|
||||
*tokenId = TK_FLOAT;
|
||||
seg++;
|
||||
}
|
||||
|
||||
if (seg == 4) { // ip address
|
||||
*tokenType = TK_IPTOKEN;
|
||||
*tokenId = TK_IPTOKEN;
|
||||
return i;
|
||||
}
|
||||
|
||||
|
@ -552,14 +527,14 @@ uint32_t tSQLGetToken(char* z, uint32_t* tokenType) {
|
|||
while (isdigit(z[i])) {
|
||||
i++;
|
||||
}
|
||||
*tokenType = TK_FLOAT;
|
||||
*tokenId = TK_FLOAT;
|
||||
}
|
||||
return i;
|
||||
}
|
||||
case '[': {
|
||||
for (i = 1; z[i] && z[i - 1] != ']'; i++) {
|
||||
}
|
||||
*tokenType = TK_ID;
|
||||
*tokenId = TK_ID;
|
||||
return i;
|
||||
}
|
||||
case 'T':
|
||||
|
@ -570,7 +545,7 @@ uint32_t tSQLGetToken(char* z, uint32_t* tokenType) {
|
|||
}
|
||||
|
||||
if ((i == 4 && strncasecmp(z, "true", 4) == 0) || (i == 5 && strncasecmp(z, "false", 5) == 0)) {
|
||||
*tokenType = TK_BOOL;
|
||||
*tokenId = TK_BOOL;
|
||||
return i;
|
||||
}
|
||||
}
|
||||
|
@ -580,12 +555,12 @@ uint32_t tSQLGetToken(char* z, uint32_t* tokenType) {
|
|||
}
|
||||
for (i = 1; ((z[i] & 0x80) == 0) && isIdChar[(uint8_t) z[i]]; i++) {
|
||||
}
|
||||
*tokenType = tSQLKeywordCode(z, i);
|
||||
*tokenId = tSQLKeywordCode(z, i);
|
||||
return i;
|
||||
}
|
||||
}
|
||||
|
||||
*tokenType = TK_ILLEGAL;
|
||||
*tokenId = TK_ILLEGAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include "tbuffer.h"
|
||||
#include "tlosertree.h"
|
||||
#include "queryLog.h"
|
||||
#include "tscompression.h"
|
||||
|
||||
typedef struct SCompSupporter {
|
||||
STableQueryInfo **pTableQueryInfo;
|
||||
|
@ -41,12 +42,11 @@ int32_t getOutputInterResultBufSize(SQuery* pQuery) {
|
|||
}
|
||||
|
||||
int32_t initResultRowInfo(SResultRowInfo *pResultRowInfo, int32_t size, int16_t type) {
|
||||
pResultRowInfo->capacity = size;
|
||||
|
||||
pResultRowInfo->type = type;
|
||||
pResultRowInfo->curIndex = -1;
|
||||
pResultRowInfo->type = type;
|
||||
pResultRowInfo->size = 0;
|
||||
pResultRowInfo->prevSKey = TSKEY_INITIAL_VAL;
|
||||
pResultRowInfo->curIndex = -1;
|
||||
pResultRowInfo->capacity = size;
|
||||
|
||||
pResultRowInfo->pResult = calloc(pResultRowInfo->capacity, POINTER_BYTES);
|
||||
if (pResultRowInfo->pResult == NULL) {
|
||||
|
@ -135,20 +135,22 @@ void clearResultRow(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResultRow, int16
|
|||
if (pResultRow->pageId >= 0) {
|
||||
tFilePage *page = getResBufPage(pRuntimeEnv->pResultBuf, pResultRow->pageId);
|
||||
|
||||
int16_t offset = 0;
|
||||
for (int32_t i = 0; i < pRuntimeEnv->pQuery->numOfOutput; ++i) {
|
||||
SResultRowCellInfo *pResultInfo = &pResultRow->pCellInfo[i];
|
||||
|
||||
char * s = getPosInResultPage(pRuntimeEnv, i, pResultRow, page);
|
||||
size_t size = pRuntimeEnv->pQuery->pExpr1[i].bytes;
|
||||
int16_t size = pRuntimeEnv->pQuery->pExpr1[i].bytes;
|
||||
char * s = getPosInResultPage(pRuntimeEnv->pQuery, page, pResultRow->offset, offset);
|
||||
memset(s, 0, size);
|
||||
|
||||
offset += size;
|
||||
RESET_RESULT_INFO(pResultInfo);
|
||||
}
|
||||
}
|
||||
|
||||
pResultRow->numOfRows = 0;
|
||||
pResultRow->pageId = -1;
|
||||
pResultRow->rowId = -1;
|
||||
pResultRow->offset = -1;
|
||||
pResultRow->closed = false;
|
||||
|
||||
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
|
||||
|
@ -158,13 +160,15 @@ void clearResultRow(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResultRow, int16
|
|||
}
|
||||
}
|
||||
|
||||
SResultRowCellInfo* getResultCell(SQueryRuntimeEnv* pRuntimeEnv, const SResultRow* pRow, int32_t index) {
|
||||
assert(index >= 0 && index < pRuntimeEnv->pQuery->numOfOutput);
|
||||
return (SResultRowCellInfo*)((char*) pRow->pCellInfo + pRuntimeEnv->rowCellInfoOffset[index]);
|
||||
// TODO refactor: use macro
|
||||
SResultRowCellInfo* getResultCell(const SResultRow* pRow, int32_t index, int32_t* offset) {
|
||||
assert(index >= 0 && offset != NULL);
|
||||
return (SResultRowCellInfo*)((char*) pRow->pCellInfo + offset[index]);
|
||||
}
|
||||
|
||||
size_t getResultRowSize(SQueryRuntimeEnv* pRuntimeEnv) {
|
||||
return (pRuntimeEnv->pQuery->numOfOutput * sizeof(SResultRowCellInfo)) + pRuntimeEnv->interBufSize + sizeof(SResultRow);
|
||||
SQuery* pQuery = pRuntimeEnv->pQuery;
|
||||
return (pQuery->numOfOutput * sizeof(SResultRowCellInfo)) + pQuery->interBufSize + sizeof(SResultRow);
|
||||
}
|
||||
|
||||
SResultRowPool* initResultRowPool(size_t size) {
|
||||
|
@ -340,18 +344,18 @@ void cleanupGroupResInfo(SGroupResInfo* pGroupResInfo) {
|
|||
pGroupResInfo->index = 0;
|
||||
}
|
||||
|
||||
void initGroupResInfo(SGroupResInfo* pGroupResInfo, SResultRowInfo* pResultInfo, int32_t offset) {
|
||||
void initGroupResInfo(SGroupResInfo* pGroupResInfo, SResultRowInfo* pResultInfo) {
|
||||
if (pGroupResInfo->pRows != NULL) {
|
||||
taosArrayDestroy(pGroupResInfo->pRows);
|
||||
}
|
||||
|
||||
pGroupResInfo->pRows = taosArrayFromList(pResultInfo->pResult, pResultInfo->size, POINTER_BYTES);
|
||||
pGroupResInfo->index = offset;
|
||||
pGroupResInfo->index = 0;
|
||||
|
||||
assert(pGroupResInfo->index <= getNumOfTotalRes(pGroupResInfo));
|
||||
}
|
||||
|
||||
bool hasRemainData(SGroupResInfo* pGroupResInfo) {
|
||||
bool hasRemainDataInCurrentGroup(SGroupResInfo* pGroupResInfo) {
|
||||
if (pGroupResInfo->pRows == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
@ -359,6 +363,14 @@ bool hasRemainData(SGroupResInfo* pGroupResInfo) {
|
|||
return pGroupResInfo->index < taosArrayGetSize(pGroupResInfo->pRows);
|
||||
}
|
||||
|
||||
bool hasRemainData(SGroupResInfo* pGroupResInfo) {
|
||||
if (hasRemainDataInCurrentGroup(pGroupResInfo)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return pGroupResInfo->currentGroup < pGroupResInfo->totalGroup;
|
||||
}
|
||||
|
||||
bool incNextGroup(SGroupResInfo* pGroupResInfo) {
|
||||
return (++pGroupResInfo->currentGroup) < pGroupResInfo->totalGroup;
|
||||
}
|
||||
|
@ -372,7 +384,7 @@ int32_t getNumOfTotalRes(SGroupResInfo* pGroupResInfo) {
|
|||
return (int32_t) taosArrayGetSize(pGroupResInfo->pRows);
|
||||
}
|
||||
|
||||
static int64_t getNumOfResultWindowRes(SQueryRuntimeEnv* pRuntimeEnv, SResultRow *pResultRow) {
|
||||
static int64_t getNumOfResultWindowRes(SQueryRuntimeEnv* pRuntimeEnv, SResultRow *pResultRow, int32_t* rowCellInfoOffset) {
|
||||
SQuery* pQuery = pRuntimeEnv->pQuery;
|
||||
|
||||
for (int32_t j = 0; j < pQuery->numOfOutput; ++j) {
|
||||
|
@ -386,7 +398,7 @@ static int64_t getNumOfResultWindowRes(SQueryRuntimeEnv* pRuntimeEnv, SResultRow
|
|||
continue;
|
||||
}
|
||||
|
||||
SResultRowCellInfo *pResultInfo = getResultCell(pRuntimeEnv, pResultRow, j);
|
||||
SResultRowCellInfo *pResultInfo = getResultCell(pResultRow, j, rowCellInfoOffset);
|
||||
assert(pResultInfo != NULL);
|
||||
|
||||
if (pResultInfo->numOfRes > 0) {
|
||||
|
@ -437,7 +449,8 @@ static int32_t tableResultComparFn(const void *pLeft, const void *pRight, void *
|
|||
}
|
||||
}
|
||||
|
||||
static int32_t mergeIntoGroupResultImpl(SQueryRuntimeEnv *pRuntimeEnv, SGroupResInfo* pGroupResInfo, SArray *pTableList, void* qinfo) {
|
||||
static int32_t mergeIntoGroupResultImpl(SQueryRuntimeEnv *pRuntimeEnv, SGroupResInfo* pGroupResInfo, SArray *pTableList,
|
||||
int32_t* rowCellInfoOffset) {
|
||||
bool ascQuery = QUERY_IS_ASC_QUERY(pRuntimeEnv->pQuery);
|
||||
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
@ -455,7 +468,7 @@ static int32_t mergeIntoGroupResultImpl(SQueryRuntimeEnv *pRuntimeEnv, SGroupRes
|
|||
pTableQueryInfoList = malloc(POINTER_BYTES * size);
|
||||
|
||||
if (pTableQueryInfoList == NULL || posList == NULL || pGroupResInfo->pRows == NULL || pGroupResInfo->pRows == NULL) {
|
||||
qError("QInfo:%p failed alloc memory", qinfo);
|
||||
qError("QInfo:%p failed alloc memory", pRuntimeEnv->qinfo);
|
||||
code = TSDB_CODE_QRY_OUT_OF_MEMORY;
|
||||
goto _end;
|
||||
}
|
||||
|
@ -491,7 +504,7 @@ static int32_t mergeIntoGroupResultImpl(SQueryRuntimeEnv *pRuntimeEnv, SGroupRes
|
|||
SResultRowInfo *pWindowResInfo = &pTableQueryInfoList[tableIndex]->resInfo;
|
||||
SResultRow *pWindowRes = getResultRow(pWindowResInfo, cs.rowIndex[tableIndex]);
|
||||
|
||||
int64_t num = getNumOfResultWindowRes(pRuntimeEnv, pWindowRes);
|
||||
int64_t num = getNumOfResultWindowRes(pRuntimeEnv, pWindowRes, rowCellInfoOffset);
|
||||
if (num <= 0) {
|
||||
cs.rowIndex[tableIndex] += 1;
|
||||
|
||||
|
@ -527,7 +540,7 @@ static int32_t mergeIntoGroupResultImpl(SQueryRuntimeEnv *pRuntimeEnv, SGroupRes
|
|||
|
||||
int64_t endt = taosGetTimestampMs();
|
||||
|
||||
qDebug("QInfo:%p result merge completed for group:%d, elapsed time:%" PRId64 " ms", qinfo,
|
||||
qDebug("QInfo:%p result merge completed for group:%d, elapsed time:%" PRId64 " ms", pRuntimeEnv->qinfo,
|
||||
pGroupResInfo->currentGroup, endt - startt);
|
||||
|
||||
_end:
|
||||
|
@ -538,13 +551,13 @@ static int32_t mergeIntoGroupResultImpl(SQueryRuntimeEnv *pRuntimeEnv, SGroupRes
|
|||
return code;
|
||||
}
|
||||
|
||||
int32_t mergeIntoGroupResult(SGroupResInfo* pGroupResInfo, SQInfo *pQInfo) {
|
||||
int32_t mergeIntoGroupResult(SGroupResInfo* pGroupResInfo, SQueryRuntimeEnv* pRuntimeEnv, int32_t* offset) {
|
||||
int64_t st = taosGetTimestampUs();
|
||||
|
||||
while (pGroupResInfo->currentGroup < pGroupResInfo->totalGroup) {
|
||||
SArray *group = GET_TABLEGROUP(pQInfo, pGroupResInfo->currentGroup);
|
||||
SArray *group = GET_TABLEGROUP(pRuntimeEnv, pGroupResInfo->currentGroup);
|
||||
|
||||
int32_t ret = mergeIntoGroupResultImpl(&pQInfo->runtimeEnv, pGroupResInfo, group, pQInfo);
|
||||
int32_t ret = mergeIntoGroupResultImpl(pRuntimeEnv, pGroupResInfo, group, offset);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
return ret;
|
||||
}
|
||||
|
@ -554,19 +567,83 @@ int32_t mergeIntoGroupResult(SGroupResInfo* pGroupResInfo, SQInfo *pQInfo) {
|
|||
break;
|
||||
}
|
||||
|
||||
qDebug("QInfo:%p no result in group %d, continue", pQInfo, pGroupResInfo->currentGroup);
|
||||
qDebug("QInfo:%p no result in group %d, continue", pRuntimeEnv->qinfo, pGroupResInfo->currentGroup);
|
||||
cleanupGroupResInfo(pGroupResInfo);
|
||||
incNextGroup(pGroupResInfo);
|
||||
}
|
||||
|
||||
if (pGroupResInfo->currentGroup >= pGroupResInfo->totalGroup && !hasRemainData(pGroupResInfo)) {
|
||||
SET_STABLE_QUERY_OVER(pQInfo);
|
||||
}
|
||||
|
||||
int64_t elapsedTime = taosGetTimestampUs() - st;
|
||||
qDebug("QInfo:%p merge res data into group, index:%d, total group:%d, elapsed time:%" PRId64 "us", pQInfo,
|
||||
qDebug("QInfo:%p merge res data into group, index:%d, total group:%d, elapsed time:%" PRId64 "us", pRuntimeEnv->qinfo,
|
||||
pGroupResInfo->currentGroup, pGroupResInfo->totalGroup, elapsedTime);
|
||||
|
||||
pQInfo->runtimeEnv.summary.firstStageMergeTime += elapsedTime;
|
||||
// pQInfo->summary.firstStageMergeTime += elapsedTime;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
void blockDistInfoToBinary(STableBlockDist* pDist, struct SBufferWriter* bw) {
|
||||
tbufWriteUint32(bw, pDist->numOfTables);
|
||||
tbufWriteUint16(bw, pDist->numOfFiles);
|
||||
tbufWriteUint64(bw, pDist->totalSize);
|
||||
tbufWriteUint32(bw, pDist->numOfRowsInMemTable);
|
||||
tbufWriteUint64(bw, taosArrayGetSize(pDist->dataBlockInfos));
|
||||
|
||||
// compress the binary string
|
||||
char* p = TARRAY_GET_START(pDist->dataBlockInfos);
|
||||
|
||||
// compress extra bytes
|
||||
size_t x = taosArrayGetSize(pDist->dataBlockInfos) * pDist->dataBlockInfos->elemSize;
|
||||
char* tmp = malloc(x + 2);
|
||||
|
||||
bool comp = false;
|
||||
int32_t len = tsCompressString(p, (int32_t)x, 1, tmp, (int32_t)x, ONE_STAGE_COMP, NULL, 0);
|
||||
if (len == -1 || len >= x) { // compress failed, do not compress this binary data
|
||||
comp = false;
|
||||
len = (int32_t)x;
|
||||
} else {
|
||||
comp = true;
|
||||
}
|
||||
|
||||
tbufWriteUint8(bw, comp);
|
||||
tbufWriteUint32(bw, len);
|
||||
if (comp) {
|
||||
tbufWriteBinary(bw, tmp, len);
|
||||
} else {
|
||||
tbufWriteBinary(bw, p, len);
|
||||
}
|
||||
tfree(tmp);
|
||||
}
|
||||
|
||||
void blockDistInfoFromBinary(const char* data, int32_t len, STableBlockDist* pDist) {
|
||||
SBufferReader br = tbufInitReader(data, len, false);
|
||||
|
||||
pDist->numOfTables = tbufReadUint32(&br);
|
||||
pDist->numOfFiles = tbufReadUint16(&br);
|
||||
pDist->totalSize = tbufReadUint64(&br);
|
||||
pDist->numOfRowsInMemTable = tbufReadUint32(&br);
|
||||
int64_t numOfBlocks = tbufReadUint64(&br);
|
||||
|
||||
bool comp = tbufReadUint8(&br);
|
||||
uint32_t compLen = tbufReadUint32(&br);
|
||||
|
||||
size_t originalLen = (size_t) (numOfBlocks*sizeof(SFileBlockInfo));
|
||||
|
||||
char* outputBuf = NULL;
|
||||
if (comp) {
|
||||
outputBuf = malloc(originalLen);
|
||||
|
||||
size_t actualLen = compLen;
|
||||
const char* compStr = tbufReadBinary(&br, &actualLen);
|
||||
|
||||
int32_t orignalLen = tsDecompressString(compStr, compLen, 1, outputBuf,
|
||||
(int32_t)originalLen , ONE_STAGE_COMP, NULL, 0);
|
||||
assert(orignalLen == numOfBlocks*sizeof(SFileBlockInfo));
|
||||
} else {
|
||||
outputBuf = (char*) tbufReadBinary(&br, &originalLen);
|
||||
}
|
||||
|
||||
pDist->dataBlockInfos = taosArrayFromList(outputBuf, (uint32_t) numOfBlocks, sizeof(SFileBlockInfo));
|
||||
if (comp) {
|
||||
tfree(outputBuf);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -96,7 +96,7 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi
|
|||
}
|
||||
|
||||
if (param.pSecExprMsg != NULL) {
|
||||
if ((code = createQueryFuncExprFromMsg(pQueryMsg, pQueryMsg->secondStageOutput, ¶m.pSecExprs, param.pSecExprMsg, param.pTagColumnInfo)) != TSDB_CODE_SUCCESS) {
|
||||
if ((code = createIndirectQueryFuncExprFromMsg(pQueryMsg, pQueryMsg->secondStageOutput, ¶m.pSecExprs, param.pSecExprMsg, param.pExprs)) != TSDB_CODE_SUCCESS) {
|
||||
goto _over;
|
||||
}
|
||||
}
|
||||
|
@ -144,11 +144,11 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi
|
|||
goto _over;
|
||||
}
|
||||
|
||||
qDebug("qmsg:%p query on %" PRIzu " tables in one group from client", pQueryMsg, tableGroupInfo.numOfTables);
|
||||
qDebug("qmsg:%p query on %u tables in one group from client", pQueryMsg, tableGroupInfo.numOfTables);
|
||||
}
|
||||
|
||||
int64_t el = taosGetTimestampUs() - st;
|
||||
qDebug("qmsg:%p tag filter completed, numOfTables:%" PRIzu ", elapsed time:%"PRId64"us", pQueryMsg, tableGroupInfo.numOfTables, el);
|
||||
qDebug("qmsg:%p tag filter completed, numOfTables:%u, elapsed time:%"PRId64"us", pQueryMsg, tableGroupInfo.numOfTables, el);
|
||||
} else {
|
||||
assert(0);
|
||||
}
|
||||
|
@ -209,6 +209,7 @@ bool qTableQuery(qinfo_t qinfo) {
|
|||
return false;
|
||||
}
|
||||
|
||||
|
||||
pQInfo->startExecTs = taosGetTimestampSec();
|
||||
|
||||
if (isQueryKilled(pQInfo)) {
|
||||
|
@ -216,9 +217,10 @@ bool qTableQuery(qinfo_t qinfo) {
|
|||
return doBuildResCheck(pQInfo);
|
||||
}
|
||||
|
||||
if (pQInfo->tableqinfoGroupInfo.numOfTables == 0) {
|
||||
SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv;
|
||||
if (pRuntimeEnv->tableqinfoGroupInfo.numOfTables == 0) {
|
||||
qDebug("QInfo:%p no table exists for query, abort", pQInfo);
|
||||
setQueryStatus(pQInfo->runtimeEnv.pQuery, QUERY_COMPLETED);
|
||||
setQueryStatus(pRuntimeEnv, QUERY_COMPLETED);
|
||||
return doBuildResCheck(pQInfo);
|
||||
}
|
||||
|
||||
|
@ -232,26 +234,16 @@ bool qTableQuery(qinfo_t qinfo) {
|
|||
|
||||
qDebug("QInfo:%p query task is launched", pQInfo);
|
||||
|
||||
SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv;
|
||||
if (onlyQueryTags(pQInfo->runtimeEnv.pQuery)) {
|
||||
assert(pQInfo->runtimeEnv.pQueryHandle == NULL);
|
||||
buildTagQueryResult(pQInfo);
|
||||
} else if (pQInfo->runtimeEnv.stableQuery) {
|
||||
stableQueryImpl(pQInfo);
|
||||
} else if (pQInfo->runtimeEnv.queryBlockDist){
|
||||
buildTableBlockDistResult(pQInfo);
|
||||
} else {
|
||||
tableQueryImpl(pQInfo);
|
||||
}
|
||||
pRuntimeEnv->outputBuf = pRuntimeEnv->proot->exec(pRuntimeEnv->proot);
|
||||
|
||||
SQuery* pQuery = pRuntimeEnv->pQuery;
|
||||
if (isQueryKilled(pQInfo)) {
|
||||
qDebug("QInfo:%p query is killed", pQInfo);
|
||||
} else if (pQuery->rec.rows == 0) {
|
||||
qDebug("QInfo:%p over, %" PRIzu " tables queried, %"PRId64" rows are returned", pQInfo, pQInfo->tableqinfoGroupInfo.numOfTables, pQuery->rec.total);
|
||||
} else if (GET_NUM_OF_RESULTS(pRuntimeEnv) == 0) {
|
||||
qDebug("QInfo:%p over, %u tables queried, %"PRId64" rows are returned", pQInfo, pRuntimeEnv->tableqinfoGroupInfo.numOfTables,
|
||||
pRuntimeEnv->resultInfo.total);
|
||||
} else {
|
||||
qDebug("QInfo:%p query paused, %" PRId64 " rows returned, numOfTotal:%" PRId64 " rows",
|
||||
pQInfo, pQuery->rec.rows, pQuery->rec.total + pQuery->rec.rows);
|
||||
qDebug("QInfo:%p query paused, %d rows returned, numOfTotal:%" PRId64 " rows",
|
||||
pQInfo, GET_NUM_OF_RESULTS(pRuntimeEnv), pRuntimeEnv->resultInfo.total + GET_NUM_OF_RESULTS(pRuntimeEnv));
|
||||
}
|
||||
|
||||
return doBuildResCheck(pQInfo);
|
||||
|
@ -279,6 +271,7 @@ int32_t qRetrieveQueryResultInfo(qinfo_t qinfo, bool* buildRes, void* pRspContex
|
|||
*buildRes = true;
|
||||
code = pQInfo->code;
|
||||
} else {
|
||||
SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv;
|
||||
SQuery *pQuery = pQInfo->runtimeEnv.pQuery;
|
||||
|
||||
pthread_mutex_lock(&pQInfo->lock);
|
||||
|
@ -286,8 +279,8 @@ int32_t qRetrieveQueryResultInfo(qinfo_t qinfo, bool* buildRes, void* pRspContex
|
|||
assert(pQInfo->rspContext == NULL);
|
||||
if (pQInfo->dataReady == QUERY_RESULT_READY) {
|
||||
*buildRes = true;
|
||||
qDebug("QInfo:%p retrieve result info, rowsize:%d, rows:%" PRId64 ", code:%s", pQInfo, pQuery->resultRowSize,
|
||||
pQuery->rec.rows, tstrerror(pQInfo->code));
|
||||
qDebug("QInfo:%p retrieve result info, rowsize:%d, rows:%d, code:%s", pQInfo, pQuery->resultRowSize,
|
||||
GET_NUM_OF_RESULTS(pRuntimeEnv), tstrerror(pQInfo->code));
|
||||
} else {
|
||||
*buildRes = false;
|
||||
qDebug("QInfo:%p retrieve req set query return result after paused", pQInfo);
|
||||
|
@ -309,12 +302,13 @@ int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *co
|
|||
return TSDB_CODE_QRY_INVALID_QHANDLE;
|
||||
}
|
||||
|
||||
SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv;
|
||||
SQuery *pQuery = pQInfo->runtimeEnv.pQuery;
|
||||
size_t size = getResultSize(pQInfo, &pQuery->rec.rows);
|
||||
SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv;
|
||||
|
||||
int32_t s = GET_NUM_OF_RESULTS(pRuntimeEnv);
|
||||
size_t size = pQuery->resultRowSize * s;
|
||||
size += sizeof(int32_t);
|
||||
size += sizeof(STableIdInfo) * taosHashGetSize(pQInfo->arrTableIdInfo);
|
||||
size += sizeof(STableIdInfo) * taosHashGetSize(pRuntimeEnv->pTableRetrieveTsMap);
|
||||
|
||||
*contLen = (int32_t)(size + sizeof(SRetrieveTableRsp));
|
||||
|
||||
|
@ -324,27 +318,27 @@ int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *co
|
|||
return TSDB_CODE_QRY_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
(*pRsp)->numOfRows = htonl((int32_t)pQuery->rec.rows);
|
||||
(*pRsp)->numOfRows = htonl((int32_t)s);
|
||||
|
||||
if (pQInfo->code == TSDB_CODE_SUCCESS) {
|
||||
(*pRsp)->offset = htobe64(pQuery->limit.offset);
|
||||
(*pRsp)->useconds = htobe64(pRuntimeEnv->summary.elapsedTime);
|
||||
(*pRsp)->offset = htobe64(pQInfo->runtimeEnv.currentOffset);
|
||||
(*pRsp)->useconds = htobe64(pQInfo->summary.elapsedTime);
|
||||
} else {
|
||||
(*pRsp)->offset = 0;
|
||||
(*pRsp)->useconds = htobe64(pRuntimeEnv->summary.elapsedTime);
|
||||
(*pRsp)->useconds = htobe64(pQInfo->summary.elapsedTime);
|
||||
}
|
||||
|
||||
(*pRsp)->precision = htons(pQuery->precision);
|
||||
if (pQuery->rec.rows > 0 && pQInfo->code == TSDB_CODE_SUCCESS) {
|
||||
if (GET_NUM_OF_RESULTS(&(pQInfo->runtimeEnv)) > 0 && pQInfo->code == TSDB_CODE_SUCCESS) {
|
||||
doDumpQueryResult(pQInfo, (*pRsp)->data);
|
||||
} else {
|
||||
setQueryStatus(pQuery, QUERY_OVER);
|
||||
setQueryStatus(pRuntimeEnv, QUERY_OVER);
|
||||
}
|
||||
|
||||
pQInfo->rspContext = NULL;
|
||||
pQInfo->dataReady = QUERY_RESULT_NOT_READY;
|
||||
|
||||
if (IS_QUERY_KILLED(pQInfo) || Q_STATUS_EQUAL(pQuery->status, QUERY_OVER)) {
|
||||
if (IS_QUERY_KILLED(pQInfo) || Q_STATUS_EQUAL(pRuntimeEnv->status, QUERY_OVER)) {
|
||||
// here current thread hold the refcount, so it is safe to free tsdbQueryHandle.
|
||||
*continueExec = false;
|
||||
(*pRsp)->completed = 1; // notify no more result to client
|
||||
|
@ -394,8 +388,7 @@ int32_t qQueryCompleted(qinfo_t qinfo) {
|
|||
return TSDB_CODE_QRY_INVALID_QHANDLE;
|
||||
}
|
||||
|
||||
SQuery* pQuery = pQInfo->runtimeEnv.pQuery;
|
||||
return isQueryKilled(pQInfo) || Q_STATUS_EQUAL(pQuery->status, QUERY_OVER);
|
||||
return isQueryKilled(pQInfo) || Q_STATUS_EQUAL(pQInfo->runtimeEnv.status, QUERY_OVER);
|
||||
}
|
||||
|
||||
void qDestroyQueryInfo(qinfo_t qHandle) {
|
||||
|
|
2466
src/query/src/sql.c
2466
src/query/src/sql.c
File diff suppressed because it is too large
Load Diff
|
@ -10,7 +10,7 @@ namespace {
|
|||
// simple test
|
||||
void simpleTest() {
|
||||
SDiskbasedResultBuf* pResultBuf = NULL;
|
||||
int32_t ret = createDiskbasedResultBuffer(&pResultBuf, 64, 1024, 4096, NULL);
|
||||
int32_t ret = createDiskbasedResultBuffer(&pResultBuf, 1024, 4096, NULL);
|
||||
|
||||
int32_t pageId = 0;
|
||||
int32_t groupId = 0;
|
||||
|
@ -52,7 +52,7 @@ void simpleTest() {
|
|||
|
||||
void writeDownTest() {
|
||||
SDiskbasedResultBuf* pResultBuf = NULL;
|
||||
int32_t ret = createDiskbasedResultBuffer(&pResultBuf, 64, 1024, 4*1024, NULL);
|
||||
int32_t ret = createDiskbasedResultBuffer(&pResultBuf, 1024, 4*1024, NULL);
|
||||
|
||||
int32_t pageId = 0;
|
||||
int32_t writePageId = 0;
|
||||
|
@ -99,7 +99,7 @@ void writeDownTest() {
|
|||
|
||||
void recyclePageTest() {
|
||||
SDiskbasedResultBuf* pResultBuf = NULL;
|
||||
int32_t ret = createDiskbasedResultBuffer(&pResultBuf, 64, 1024, 4*1024, NULL);
|
||||
int32_t ret = createDiskbasedResultBuffer(&pResultBuf, 1024, 4*1024, NULL);
|
||||
|
||||
int32_t pageId = 0;
|
||||
int32_t writePageId = 0;
|
||||
|
|
|
@ -15,7 +15,13 @@
|
|||
#include "tsdbint.h"
|
||||
|
||||
#define TSDB_MAX_SUBBLOCKS 8
|
||||
#define TSDB_KEY_FID(key, days, precision) ((key) / tsMsPerDay[(precision)] / (days))
|
||||
static FORCE_INLINE int TSDB_KEY_FID(TSKEY key, int32_t days, int8_t precision) {
|
||||
if (key < 0) {
|
||||
return (int)(-((-key) / tsMsPerDay[precision] / days + 1));
|
||||
} else {
|
||||
return (int)((key / tsMsPerDay[precision] / days));
|
||||
}
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
SRtn rtn; // retention snapshot
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
static int tsdbCompareSchemaVersion(const void *key1, const void *key2);
|
||||
static char * getTagIndexKey(const void *pData);
|
||||
static STable *tsdbNewTable();
|
||||
static STable *tsdbCreateTableFromCfg(STableCfg *pCfg, bool isSuper);
|
||||
static STable *tsdbCreateTableFromCfg(STableCfg *pCfg, bool isSuper, STable *pSTable);
|
||||
static void tsdbFreeTable(STable *pTable);
|
||||
static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx, bool lock);
|
||||
static void tsdbRemoveTableFromMeta(STsdbRepo *pRepo, STable *pTable, bool rmFromIdx, bool lock);
|
||||
|
@ -43,6 +43,7 @@ static void * tsdbInsertTableAct(STsdbRepo *pRepo, int8_t act, void *buf, STabl
|
|||
static int tsdbRemoveTableFromStore(STsdbRepo *pRepo, STable *pTable);
|
||||
static int tsdbRmTableFromMeta(STsdbRepo *pRepo, STable *pTable);
|
||||
static int tsdbAdjustMetaTables(STsdbRepo *pRepo, int tid);
|
||||
static int tsdbCheckTableTagVal(SKVRow *pKVRow, STSchema *pSchema);
|
||||
|
||||
// ------------------ OUTER FUNCTIONS ------------------
|
||||
int tsdbCreateTable(STsdbRepo *repo, STableCfg *pCfg) {
|
||||
|
@ -87,7 +88,7 @@ int tsdbCreateTable(STsdbRepo *repo, STableCfg *pCfg) {
|
|||
super = tsdbGetTableByUid(pMeta, pCfg->superUid);
|
||||
if (super == NULL) { // super table not exists, try to create it
|
||||
newSuper = true;
|
||||
super = tsdbCreateTableFromCfg(pCfg, true);
|
||||
super = tsdbCreateTableFromCfg(pCfg, true, NULL);
|
||||
if (super == NULL) goto _err;
|
||||
} else {
|
||||
if (TABLE_TYPE(super) != TSDB_SUPER_TABLE || TABLE_UID(super) != pCfg->superUid) {
|
||||
|
@ -108,7 +109,7 @@ int tsdbCreateTable(STsdbRepo *repo, STableCfg *pCfg) {
|
|||
}
|
||||
}
|
||||
|
||||
table = tsdbCreateTableFromCfg(pCfg, false);
|
||||
table = tsdbCreateTableFromCfg(pCfg, false, super);
|
||||
if (table == NULL) goto _err;
|
||||
|
||||
// Register to meta
|
||||
|
@ -674,7 +675,7 @@ static STable *tsdbNewTable() {
|
|||
return pTable;
|
||||
}
|
||||
|
||||
static STable *tsdbCreateTableFromCfg(STableCfg *pCfg, bool isSuper) {
|
||||
static STable *tsdbCreateTableFromCfg(STableCfg *pCfg, bool isSuper, STable *pSTable) {
|
||||
STable *pTable = NULL;
|
||||
size_t tsize = 0;
|
||||
|
||||
|
@ -726,6 +727,9 @@ static STable *tsdbCreateTableFromCfg(STableCfg *pCfg, bool isSuper) {
|
|||
|
||||
if (pCfg->type == TSDB_CHILD_TABLE) {
|
||||
TABLE_SUID(pTable) = pCfg->superUid;
|
||||
if (tsdbCheckTableTagVal(pCfg->tagValues, pSTable->tagSchema) < 0) {
|
||||
goto _err;
|
||||
}
|
||||
pTable->tagVal = tdKVRowDup(pCfg->tagValues);
|
||||
if (pTable->tagVal == NULL) {
|
||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
|
@ -1302,3 +1306,20 @@ static int tsdbAdjustMetaTables(STsdbRepo *pRepo, int tid) {
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tsdbCheckTableTagVal(SKVRow *pKVRow, STSchema *pSchema) {
|
||||
for (size_t i = 0; i < kvRowNCols(pKVRow); i++) {
|
||||
SColIdx * pColIdx = kvRowColIdxAt(pKVRow, i);
|
||||
STColumn *pCol = tdGetColOfID(pSchema, pColIdx->colId);
|
||||
|
||||
if ((pCol == NULL) || (!IS_VAR_DATA_TYPE(pCol->type))) continue;
|
||||
|
||||
void *pValue = tdGetKVRowValOfCol(pKVRow, pCol->colId);
|
||||
if (varDataTLen(pValue) > pCol->bytes) {
|
||||
terrno = TSDB_CODE_TDB_IVLD_TAG_VAL;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -34,11 +34,9 @@
|
|||
.tid = (_checkInfo)->tableId.tid, \
|
||||
.uid = (_checkInfo)->tableId.uid})
|
||||
|
||||
|
||||
enum {
|
||||
TSDB_QUERY_TYPE_ALL = 1,
|
||||
TSDB_QUERY_TYPE_LAST = 2,
|
||||
TSDB_QUERY_TYPE_EXTERNAL = 3,
|
||||
};
|
||||
|
||||
typedef struct SQueryFilePos {
|
||||
|
@ -68,7 +66,7 @@ typedef struct STableCheckInfo {
|
|||
STableId tableId;
|
||||
TSKEY lastKey;
|
||||
STable* pTableObj;
|
||||
SBlockInfo* pCompInfo;
|
||||
SBlockInfo* pCompInfo;
|
||||
int32_t compSize;
|
||||
int32_t numOfBlocks:29; // number of qualified data blocks not the original blocks
|
||||
int8_t chosen:2; // indicate which iterator should move forward
|
||||
|
@ -78,8 +76,8 @@ typedef struct STableCheckInfo {
|
|||
} STableCheckInfo;
|
||||
|
||||
typedef struct STableBlockInfo {
|
||||
SBlock* compBlock;
|
||||
STableCheckInfo* pTableCheckInfo;
|
||||
SBlock *compBlock;
|
||||
STableCheckInfo *pTableCheckInfo;
|
||||
} STableBlockInfo;
|
||||
|
||||
typedef struct SBlockOrderSupporter {
|
||||
|
@ -111,6 +109,8 @@ typedef struct STsdbQueryHandle {
|
|||
bool checkFiles; // check file stage
|
||||
bool cachelastrow; // check if last row cached
|
||||
bool loadExternalRow; // load time window external data rows
|
||||
bool currentLoadExternalRows; // current load external rows
|
||||
int32_t loadType; // block load type
|
||||
void* qinfo; // query info handle, for debug purpose
|
||||
int32_t type; // query type: retrieve all data blocks, 2. retrieve only last row, 3. retrieve direct prev|next rows
|
||||
SDFileSet* pFileGroup;
|
||||
|
@ -147,6 +147,8 @@ static int32_t tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey,
|
|||
static int32_t tsdbCheckInfoCompar(const void* key1, const void* key2);
|
||||
static int32_t doGetExternalRow(STsdbQueryHandle* pQueryHandle, int16_t type, SMemRef* pMemRef);
|
||||
static void* doFreeColumnInfoData(SArray* pColumnInfoData);
|
||||
static void* destroyTableCheckInfo(SArray* pTableCheckInfo);
|
||||
static bool tsdbGetExternalRow(TsdbQueryHandleT pHandle);
|
||||
|
||||
static void tsdbInitDataBlockLoadInfo(SDataBlockLoadInfo* pBlockLoadInfo) {
|
||||
pBlockLoadInfo->slot = -1;
|
||||
|
@ -294,40 +296,48 @@ static SArray* createCheckInfoFromTableGroup(STsdbQueryHandle* pQueryHandle, STa
|
|||
taosArraySort(pTableCheckInfo, tsdbCheckInfoCompar);
|
||||
|
||||
size_t gsize = taosArrayGetSize(pTableCheckInfo);
|
||||
|
||||
|
||||
for (int32_t i = 0; i < gsize; ++i) {
|
||||
STableCheckInfo* pInfo = (STableCheckInfo*) taosArrayGet(pTableCheckInfo, i);
|
||||
|
||||
taosArrayPush(pTable, &pInfo->pTableObj);
|
||||
}
|
||||
|
||||
*psTable = pTable;
|
||||
|
||||
return pTableCheckInfo;
|
||||
}
|
||||
|
||||
static SArray* createCheckInfoFromCheckInfo(SArray* pTableCheckInfo, TSKEY skey, SArray** psTable) {
|
||||
size_t si = taosArrayGetSize(pTableCheckInfo);
|
||||
SArray* pNew = taosArrayInit(si, sizeof(STableCheckInfo));
|
||||
if (pNew == NULL) {
|
||||
return NULL;
|
||||
static void resetCheckInfo(STsdbQueryHandle* pQueryHandle) {
|
||||
size_t numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo);
|
||||
assert(numOfTables >= 1);
|
||||
|
||||
// todo apply the lastkey of table check to avoid to load header file
|
||||
for (int32_t i = 0; i < numOfTables; ++i) {
|
||||
STableCheckInfo* pCheckInfo = (STableCheckInfo*) taosArrayGet(pQueryHandle->pTableCheckInfo, i);
|
||||
pCheckInfo->lastKey = pQueryHandle->window.skey;
|
||||
pCheckInfo->iter = tSkipListDestroyIter(pCheckInfo->iter);
|
||||
pCheckInfo->iiter = tSkipListDestroyIter(pCheckInfo->iiter);
|
||||
pCheckInfo->initBuf = false;
|
||||
|
||||
if (ASCENDING_TRAVERSE(pQueryHandle->order)) {
|
||||
assert(pCheckInfo->lastKey >= pQueryHandle->window.skey);
|
||||
} else {
|
||||
assert(pCheckInfo->lastKey <= pQueryHandle->window.skey);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
SArray* pTable = taosArrayInit(si, sizeof(STable*));
|
||||
// only one table, not need to sort again
|
||||
static SArray* createCheckInfoFromCheckInfo(STableCheckInfo* pCheckInfo, TSKEY skey, SArray** psTable) {
|
||||
SArray* pNew = taosArrayInit(1, sizeof(STableCheckInfo));
|
||||
SArray* pTable = taosArrayInit(1, sizeof(STable*));
|
||||
|
||||
for (int32_t j = 0; j < si; ++j) {
|
||||
STableCheckInfo* pCheckInfo = (STableCheckInfo*) taosArrayGet(pTableCheckInfo, j);
|
||||
STableCheckInfo info = { .lastKey = skey, .pTableObj = pCheckInfo->pTableObj};
|
||||
STableCheckInfo info = { .lastKey = skey, .pTableObj = pCheckInfo->pTableObj};
|
||||
|
||||
info.tableId = pCheckInfo->tableId;
|
||||
taosArrayPush(pNew, &info);
|
||||
taosArrayPush(pTable, &pCheckInfo->pTableObj);
|
||||
}
|
||||
info.tableId = pCheckInfo->tableId;
|
||||
taosArrayPush(pNew, &info);
|
||||
taosArrayPush(pTable, &pCheckInfo->pTableObj);
|
||||
|
||||
*psTable = pTable;
|
||||
|
||||
// it is ordered already, no need to sort again.
|
||||
taosArraySort(pNew, tsdbCheckInfoCompar);
|
||||
return pNew;
|
||||
}
|
||||
|
||||
|
@ -351,14 +361,15 @@ static STsdbQueryHandle* tsdbQueryTablesImpl(STsdbRepo* tsdb, STsdbQueryCond* pC
|
|||
pQueryHandle->locateStart = false;
|
||||
pQueryHandle->pMemRef = pMemRef;
|
||||
pQueryHandle->loadExternalRow = pCond->loadExternalRows;
|
||||
pQueryHandle->currentLoadExternalRows = pCond->loadExternalRows;
|
||||
|
||||
pQueryHandle->loadType = pCond->type;
|
||||
|
||||
if (tsdbInitReadH(&pQueryHandle->rhelper, (STsdbRepo*)tsdb) != 0) {
|
||||
goto out_of_memory;
|
||||
}
|
||||
|
||||
//tsdbMayTakeMemSnapshot(pQueryHandle);
|
||||
assert(pCond != NULL && pCond->numOfCols > 0 && pMemRef != NULL);
|
||||
|
||||
if (ASCENDING_TRAVERSE(pCond->order)) {
|
||||
assert(pQueryHandle->window.skey <= pQueryHandle->window.ekey);
|
||||
} else {
|
||||
|
@ -388,7 +399,9 @@ static STsdbQueryHandle* tsdbQueryTablesImpl(STsdbRepo* tsdb, STsdbQueryCond* pC
|
|||
pQueryHandle->statis[i].colId = colInfo.info.colId;
|
||||
}
|
||||
|
||||
pQueryHandle->defaultLoadColumn = getDefaultLoadColumns(pQueryHandle, true);
|
||||
if (pCond->numOfCols > 0) {
|
||||
pQueryHandle->defaultLoadColumn = getDefaultLoadColumns(pQueryHandle, true);
|
||||
}
|
||||
|
||||
STsdbMeta* pMeta = tsdbGetMeta(tsdb);
|
||||
assert(pMeta != NULL);
|
||||
|
@ -433,6 +446,74 @@ TsdbQueryHandleT* tsdbQueryTables(STsdbRepo* tsdb, STsdbQueryCond* pCond, STable
|
|||
return (TsdbQueryHandleT) pQueryHandle;
|
||||
}
|
||||
|
||||
void tsdbResetQueryHandle(TsdbQueryHandleT queryHandle, STsdbQueryCond *pCond) {
|
||||
STsdbQueryHandle* pQueryHandle = queryHandle;
|
||||
|
||||
pQueryHandle->order = pCond->order;
|
||||
pQueryHandle->window = pCond->twindow;
|
||||
pQueryHandle->type = TSDB_QUERY_TYPE_ALL;
|
||||
pQueryHandle->cur.fid = -1;
|
||||
pQueryHandle->cur.win = TSWINDOW_INITIALIZER;
|
||||
pQueryHandle->checkFiles = true;
|
||||
pQueryHandle->activeIndex = 0; // current active table index
|
||||
pQueryHandle->locateStart = false;
|
||||
pQueryHandle->loadExternalRow = pCond->loadExternalRows;
|
||||
|
||||
if (ASCENDING_TRAVERSE(pCond->order)) {
|
||||
assert(pQueryHandle->window.skey <= pQueryHandle->window.ekey);
|
||||
} else {
|
||||
assert(pQueryHandle->window.skey >= pQueryHandle->window.ekey);
|
||||
}
|
||||
|
||||
// allocate buffer in order to load data blocks from file
|
||||
memset(pQueryHandle->statis, 0, sizeof(SDataStatis));
|
||||
|
||||
tsdbInitDataBlockLoadInfo(&pQueryHandle->dataBlockLoadInfo);
|
||||
tsdbInitCompBlockLoadInfo(&pQueryHandle->compBlockLoadInfo);
|
||||
|
||||
resetCheckInfo(pQueryHandle);
|
||||
}
|
||||
|
||||
void tsdbResetQueryHandleForNewTable(TsdbQueryHandleT queryHandle, STsdbQueryCond *pCond, STableGroupInfo* groupList) {
|
||||
STsdbQueryHandle* pQueryHandle = queryHandle;
|
||||
|
||||
pQueryHandle->order = pCond->order;
|
||||
pQueryHandle->window = pCond->twindow;
|
||||
pQueryHandle->type = TSDB_QUERY_TYPE_ALL;
|
||||
pQueryHandle->cur.fid = -1;
|
||||
pQueryHandle->cur.win = TSWINDOW_INITIALIZER;
|
||||
pQueryHandle->checkFiles = true;
|
||||
pQueryHandle->activeIndex = 0; // current active table index
|
||||
pQueryHandle->locateStart = false;
|
||||
pQueryHandle->loadExternalRow = pCond->loadExternalRows;
|
||||
|
||||
if (ASCENDING_TRAVERSE(pCond->order)) {
|
||||
assert(pQueryHandle->window.skey <= pQueryHandle->window.ekey);
|
||||
} else {
|
||||
assert(pQueryHandle->window.skey >= pQueryHandle->window.ekey);
|
||||
}
|
||||
|
||||
// allocate buffer in order to load data blocks from file
|
||||
memset(pQueryHandle->statis, 0, sizeof(SDataStatis));
|
||||
|
||||
tsdbInitDataBlockLoadInfo(&pQueryHandle->dataBlockLoadInfo);
|
||||
tsdbInitCompBlockLoadInfo(&pQueryHandle->compBlockLoadInfo);
|
||||
|
||||
SArray* pTable = NULL;
|
||||
STsdbMeta* pMeta = tsdbGetMeta(pQueryHandle->pTsdb);
|
||||
|
||||
pQueryHandle->pTableCheckInfo = destroyTableCheckInfo(pQueryHandle->pTableCheckInfo);
|
||||
|
||||
pQueryHandle->pTableCheckInfo = createCheckInfoFromTableGroup(pQueryHandle, groupList, pMeta, &pTable);
|
||||
if (pQueryHandle->pTableCheckInfo == NULL) {
|
||||
tsdbCleanupQueryHandle(pQueryHandle);
|
||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
pQueryHandle->prev = doFreeColumnInfoData(pQueryHandle->prev);
|
||||
pQueryHandle->next = doFreeColumnInfoData(pQueryHandle->next);
|
||||
}
|
||||
|
||||
TsdbQueryHandleT tsdbQueryLastRow(STsdbRepo *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupList, void* qinfo, SMemRef* pMemRef) {
|
||||
pCond->twindow = updateLastrowForEachGroup(groupList);
|
||||
|
||||
|
@ -469,13 +550,46 @@ SArray* tsdbGetQueriedTableList(TsdbQueryHandleT *pHandle) {
|
|||
return res;
|
||||
}
|
||||
|
||||
TsdbQueryHandleT tsdbQueryRowsInExternalWindow(STsdbRepo *tsdb, STsdbQueryCond* pCond, STableGroupInfo *groupList, void* qinfo, SMemRef* pRef) {
|
||||
STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, groupList, qinfo, pRef);
|
||||
pQueryHandle->loadExternalRow = true;
|
||||
if (pQueryHandle != NULL) {
|
||||
changeQueryHandleForInterpQuery(pQueryHandle);
|
||||
// leave only one table for each group
|
||||
static STableGroupInfo* trimTableGroup(STimeWindow* window, STableGroupInfo* pGroupList) {
|
||||
assert(pGroupList);
|
||||
size_t numOfGroup = taosArrayGetSize(pGroupList->pGroupList);
|
||||
|
||||
STableGroupInfo* pNew = calloc(1, sizeof(STableGroupInfo));
|
||||
pNew->pGroupList = taosArrayInit(numOfGroup, sizeof(SArray));
|
||||
|
||||
for(int32_t i = 0; i < numOfGroup; ++i) {
|
||||
SArray* oneGroup = taosArrayGetP(pGroupList->pGroupList, i);
|
||||
size_t numOfTables = taosArrayGetSize(oneGroup);
|
||||
|
||||
SArray* px = taosArrayInit(4, sizeof(STableKeyInfo));
|
||||
for (int32_t j = 0; j < numOfTables; ++j) {
|
||||
STableKeyInfo* pInfo = (STableKeyInfo*)taosArrayGet(oneGroup, j);
|
||||
if (window->skey <= pInfo->lastKey && ((STable*)pInfo->pTable)->lastKey != TSKEY_INITIAL_VAL) {
|
||||
taosArrayPush(px, pInfo);
|
||||
pNew->numOfTables += 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// there are no data in this group
|
||||
if (taosArrayGetSize(px) == 0) {
|
||||
taosArrayDestroy(px);
|
||||
} else {
|
||||
taosArrayPush(pNew->pGroupList, &px);
|
||||
}
|
||||
}
|
||||
|
||||
return pNew;
|
||||
}
|
||||
|
||||
TsdbQueryHandleT tsdbQueryRowsInExternalWindow(STsdbRepo *tsdb, STsdbQueryCond* pCond, STableGroupInfo *groupList, void* qinfo, SMemRef* pRef) {
|
||||
STableGroupInfo* pNew = trimTableGroup(&pCond->twindow, groupList);
|
||||
|
||||
STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, pNew, qinfo, pRef);
|
||||
pQueryHandle->loadExternalRow = true;
|
||||
pQueryHandle->currentLoadExternalRows = true;
|
||||
|
||||
return pQueryHandle;
|
||||
}
|
||||
|
||||
|
@ -769,77 +883,94 @@ static int32_t binarySearchForBlock(SBlock* pBlock, int32_t numOfBlocks, TSKEY s
|
|||
return midSlot;
|
||||
}
|
||||
|
||||
static int32_t loadBlockInfo(STsdbQueryHandle * pQueryHandle, int32_t index, int32_t* numOfBlocks) {
|
||||
int32_t code = 0;
|
||||
|
||||
STableCheckInfo* pCheckInfo = taosArrayGet(pQueryHandle->pTableCheckInfo, index);
|
||||
pCheckInfo->numOfBlocks = 0;
|
||||
|
||||
if (tsdbSetReadTable(&pQueryHandle->rhelper, pCheckInfo->pTableObj) != TSDB_CODE_SUCCESS) {
|
||||
code = terrno;
|
||||
return code;
|
||||
}
|
||||
|
||||
SBlockIdx* compIndex = pQueryHandle->rhelper.pBlkIdx;
|
||||
|
||||
// no data block in this file, try next file
|
||||
if (compIndex == NULL || compIndex->uid != pCheckInfo->tableId.uid) {
|
||||
return 0; // no data blocks in the file belongs to pCheckInfo->pTable
|
||||
}
|
||||
|
||||
if (pCheckInfo->compSize < (int32_t)compIndex->len) {
|
||||
assert(compIndex->len > 0);
|
||||
|
||||
char* t = realloc(pCheckInfo->pCompInfo, compIndex->len);
|
||||
if (t == NULL) {
|
||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
code = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
return code;
|
||||
}
|
||||
|
||||
pCheckInfo->pCompInfo = (SBlockInfo*)t;
|
||||
pCheckInfo->compSize = compIndex->len;
|
||||
}
|
||||
|
||||
tsdbLoadBlockInfo(&(pQueryHandle->rhelper), (void*)(pCheckInfo->pCompInfo));
|
||||
SBlockInfo* pCompInfo = pCheckInfo->pCompInfo;
|
||||
|
||||
TSKEY s = TSKEY_INITIAL_VAL, e = TSKEY_INITIAL_VAL;
|
||||
|
||||
if (ASCENDING_TRAVERSE(pQueryHandle->order)) {
|
||||
assert(pCheckInfo->lastKey <= pQueryHandle->window.ekey && pQueryHandle->window.skey <= pQueryHandle->window.ekey);
|
||||
} else {
|
||||
assert(pCheckInfo->lastKey >= pQueryHandle->window.ekey && pQueryHandle->window.skey >= pQueryHandle->window.ekey);
|
||||
}
|
||||
|
||||
s = MIN(pCheckInfo->lastKey, pQueryHandle->window.ekey);
|
||||
e = MAX(pCheckInfo->lastKey, pQueryHandle->window.ekey);
|
||||
|
||||
// discard the unqualified data block based on the query time window
|
||||
int32_t start = binarySearchForBlock(pCompInfo->blocks, compIndex->numOfBlocks, s, TSDB_ORDER_ASC);
|
||||
int32_t end = start;
|
||||
|
||||
if (s > pCompInfo->blocks[start].keyLast) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
// todo speedup the procedure of located end block
|
||||
while (end < (int32_t)compIndex->numOfBlocks && (pCompInfo->blocks[end].keyFirst <= e)) {
|
||||
end += 1;
|
||||
}
|
||||
|
||||
pCheckInfo->numOfBlocks = (end - start);
|
||||
|
||||
if (start > 0) {
|
||||
memmove(pCompInfo->blocks, &pCompInfo->blocks[start], pCheckInfo->numOfBlocks * sizeof(SBlock));
|
||||
}
|
||||
|
||||
(*numOfBlocks) += pCheckInfo->numOfBlocks;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t getFileCompInfo(STsdbQueryHandle* pQueryHandle, int32_t* numOfBlocks) {
|
||||
// load all the comp offset value for all tables in this file
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
*numOfBlocks = 0;
|
||||
size_t numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo);
|
||||
|
||||
for (int32_t i = 0; i < numOfTables; ++i) {
|
||||
STableCheckInfo* pCheckInfo = taosArrayGet(pQueryHandle->pTableCheckInfo, i);
|
||||
pCheckInfo->numOfBlocks = 0;
|
||||
size_t numOfTables = 0;
|
||||
if (pQueryHandle->loadType == BLOCK_LOAD_TABLE_SEQ_ORDER) {
|
||||
code = loadBlockInfo(pQueryHandle, pQueryHandle->activeIndex, numOfBlocks);
|
||||
} else if (pQueryHandle->loadType == BLOCK_LOAD_OFFSET_SEQ_ORDER) {
|
||||
numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo);
|
||||
|
||||
if (tsdbSetReadTable(&pQueryHandle->rhelper, pCheckInfo->pTableObj) != TSDB_CODE_SUCCESS) {
|
||||
code = terrno;
|
||||
break;
|
||||
}
|
||||
|
||||
SBlockIdx* compIndex = pQueryHandle->rhelper.pBlkIdx;
|
||||
|
||||
// no data block in this file, try next file
|
||||
if (compIndex == NULL || compIndex->uid != pCheckInfo->tableId.uid) {
|
||||
continue; // no data blocks in the file belongs to pCheckInfo->pTable
|
||||
}
|
||||
|
||||
if (pCheckInfo->compSize < (int32_t)compIndex->len) {
|
||||
assert(compIndex->len > 0);
|
||||
|
||||
char* t = realloc(pCheckInfo->pCompInfo, compIndex->len);
|
||||
if (t == NULL) {
|
||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
code = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
break;
|
||||
for (int32_t i = 0; i < numOfTables; ++i) {
|
||||
code = loadBlockInfo(pQueryHandle, i, numOfBlocks);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
pCheckInfo->pCompInfo = (SBlockInfo*) t;
|
||||
pCheckInfo->compSize = compIndex->len;
|
||||
}
|
||||
|
||||
tsdbLoadBlockInfo(&(pQueryHandle->rhelper), (void *)(pCheckInfo->pCompInfo));
|
||||
SBlockInfo* pCompInfo = pCheckInfo->pCompInfo;
|
||||
|
||||
TSKEY s = TSKEY_INITIAL_VAL, e = TSKEY_INITIAL_VAL;
|
||||
|
||||
if (ASCENDING_TRAVERSE(pQueryHandle->order)) {
|
||||
assert(pCheckInfo->lastKey <= pQueryHandle->window.ekey && pQueryHandle->window.skey <= pQueryHandle->window.ekey);
|
||||
} else {
|
||||
assert(pCheckInfo->lastKey >= pQueryHandle->window.ekey && pQueryHandle->window.skey >= pQueryHandle->window.ekey);
|
||||
}
|
||||
|
||||
s = MIN(pCheckInfo->lastKey, pQueryHandle->window.ekey);
|
||||
e = MAX(pCheckInfo->lastKey, pQueryHandle->window.ekey);
|
||||
|
||||
// discard the unqualified data block based on the query time window
|
||||
int32_t start = binarySearchForBlock(pCompInfo->blocks, compIndex->numOfBlocks, s, TSDB_ORDER_ASC);
|
||||
int32_t end = start;
|
||||
|
||||
if (s > pCompInfo->blocks[start].keyLast) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// todo speedup the procedure of located end block
|
||||
while (end < (int32_t)compIndex->numOfBlocks && (pCompInfo->blocks[end].keyFirst <= e)) {
|
||||
end += 1;
|
||||
}
|
||||
|
||||
pCheckInfo->numOfBlocks = (end - start);
|
||||
|
||||
if (start > 0) {
|
||||
memmove(pCompInfo->blocks, &pCompInfo->blocks[start], pCheckInfo->numOfBlocks * sizeof(SBlock));
|
||||
}
|
||||
|
||||
(*numOfBlocks) += pCheckInfo->numOfBlocks;
|
||||
} else {
|
||||
assert(0);
|
||||
}
|
||||
|
||||
return code;
|
||||
|
@ -1268,7 +1399,7 @@ static void copyOneRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity,
|
|||
case TSDB_DATA_TYPE_DOUBLE:
|
||||
SET_DOUBLE_PTR(pData, value);
|
||||
break;
|
||||
case TSDB_DATA_TYPE_TIMESTAMP:
|
||||
case TSDB_DATA_TYPE_TIMESTAMP:
|
||||
*(TSKEY *)pData = tdGetKey(*(TKEY *)value);
|
||||
break;
|
||||
default:
|
||||
|
@ -1730,6 +1861,7 @@ static int32_t createDataBlocksInfo(STsdbQueryHandle* pQueryHandle, int32_t numO
|
|||
memset(pQueryHandle->pDataBlockInfo, 0, size);
|
||||
*numOfAllocBlocks = numOfBlocks;
|
||||
|
||||
// access data blocks according to the offset of each block in asc/desc order.
|
||||
int32_t numOfTables = (int32_t)taosArrayGetSize(pQueryHandle->pTableCheckInfo);
|
||||
|
||||
SBlockOrderSupporter sup = {0};
|
||||
|
@ -1959,6 +2091,91 @@ static void moveToNextDataBlockInCurrentFile(STsdbQueryHandle* pQueryHandle) {
|
|||
cur->blockCompleted = false;
|
||||
}
|
||||
|
||||
int32_t tsdbGetFileBlocksDistInfo(TsdbQueryHandleT* queryHandle, STableBlockDist* pTableBlockInfo) {
|
||||
STsdbQueryHandle* pQueryHandle = (STsdbQueryHandle*) queryHandle;
|
||||
|
||||
pTableBlockInfo->totalSize = 0;
|
||||
STsdbFS* pFileHandle = REPO_FS(pQueryHandle->pTsdb);
|
||||
|
||||
// find the start data block in file
|
||||
pQueryHandle->locateStart = true;
|
||||
STsdbCfg* pCfg = &pQueryHandle->pTsdb->config;
|
||||
int32_t fid = getFileIdFromKey(pQueryHandle->window.skey, pCfg->daysPerFile, pCfg->precision);
|
||||
|
||||
tsdbRLockFS(pFileHandle);
|
||||
tsdbFSIterInit(&pQueryHandle->fileIter, pFileHandle, pQueryHandle->order);
|
||||
tsdbFSIterSeek(&pQueryHandle->fileIter, fid);
|
||||
tsdbUnLockFS(pFileHandle);
|
||||
|
||||
pTableBlockInfo->numOfFiles += 1;
|
||||
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t numOfBlocks = 0;
|
||||
int32_t numOfTables = (int32_t)taosArrayGetSize(pQueryHandle->pTableCheckInfo);
|
||||
STimeWindow win = TSWINDOW_INITIALIZER;
|
||||
|
||||
while (true) {
|
||||
numOfBlocks = 0;
|
||||
tsdbRLockFS(REPO_FS(pQueryHandle->pTsdb));
|
||||
|
||||
if ((pQueryHandle->pFileGroup = tsdbFSIterNext(&pQueryHandle->fileIter)) == NULL) {
|
||||
tsdbUnLockFS(REPO_FS(pQueryHandle->pTsdb));
|
||||
break;
|
||||
}
|
||||
|
||||
tsdbGetFidKeyRange(pCfg->daysPerFile, pCfg->precision, pQueryHandle->pFileGroup->fid, &win.skey, &win.ekey);
|
||||
|
||||
// current file are not overlapped with query time window, ignore remain files
|
||||
if ((ASCENDING_TRAVERSE(pQueryHandle->order) && win.skey > pQueryHandle->window.ekey) ||
|
||||
(!ASCENDING_TRAVERSE(pQueryHandle->order) && win.ekey < pQueryHandle->window.ekey)) {
|
||||
tsdbUnLockFS(REPO_FS(pQueryHandle->pTsdb));
|
||||
tsdbDebug("%p remain files are not qualified for qrange:%" PRId64 "-%" PRId64 ", ignore, %p", pQueryHandle,
|
||||
pQueryHandle->window.skey, pQueryHandle->window.ekey, pQueryHandle->qinfo);
|
||||
pQueryHandle->pFileGroup = NULL;
|
||||
break;
|
||||
}
|
||||
|
||||
pTableBlockInfo->numOfFiles += 1;
|
||||
if (tsdbSetAndOpenReadFSet(&pQueryHandle->rhelper, pQueryHandle->pFileGroup) < 0) {
|
||||
tsdbUnLockFS(REPO_FS(pQueryHandle->pTsdb));
|
||||
code = terrno;
|
||||
break;
|
||||
}
|
||||
|
||||
tsdbUnLockFS(REPO_FS(pQueryHandle->pTsdb));
|
||||
|
||||
if (tsdbLoadBlockIdx(&pQueryHandle->rhelper) < 0) {
|
||||
code = terrno;
|
||||
break;
|
||||
}
|
||||
|
||||
if ((code = getFileCompInfo(pQueryHandle, &numOfBlocks)) != TSDB_CODE_SUCCESS) {
|
||||
break;
|
||||
}
|
||||
|
||||
tsdbDebug("%p %d blocks found in file for %d table(s), fid:%d, %p", pQueryHandle, numOfBlocks, numOfTables,
|
||||
pQueryHandle->pFileGroup->fid, pQueryHandle->qinfo);
|
||||
|
||||
if (numOfBlocks == 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < numOfTables; ++i) {
|
||||
STableCheckInfo* pCheckInfo = taosArrayGet(pQueryHandle->pTableCheckInfo, i);
|
||||
|
||||
SBlock* pBlock = pCheckInfo->pCompInfo->blocks;
|
||||
for (int32_t j = 0; j < pCheckInfo->numOfBlocks; ++j) {
|
||||
pTableBlockInfo->totalSize += pBlock[j].len;
|
||||
|
||||
int32_t numOfRows = pBlock[j].numOfRows;
|
||||
taosArrayPush(pTableBlockInfo->dataBlockInfos, &numOfRows);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t getDataBlocksInFiles(STsdbQueryHandle* pQueryHandle, bool* exists) {
|
||||
STsdbFS* pFileHandle = REPO_FS(pQueryHandle->pTsdb);
|
||||
SQueryFilePos* cur = &pQueryHandle->cur;
|
||||
|
@ -2017,19 +2234,14 @@ static bool doHasDataInBuffer(STsdbQueryHandle* pQueryHandle) {
|
|||
pQueryHandle->activeIndex += 1;
|
||||
}
|
||||
|
||||
if (pQueryHandle->loadExternalRow && pQueryHandle->window.skey == pQueryHandle->window.ekey) {
|
||||
SMemRef* pMemRef = pQueryHandle->pMemRef;
|
||||
doGetExternalRow(pQueryHandle, TSDB_PREV_ROW, pMemRef);
|
||||
doGetExternalRow(pQueryHandle, TSDB_NEXT_ROW, pMemRef);
|
||||
}
|
||||
|
||||
// no data in memtable or imemtable, decrease the memory reference.
|
||||
tsdbMayUnTakeMemSnapshot(pQueryHandle);
|
||||
// TODO !!
|
||||
// tsdbMayUnTakeMemSnapshot(pQueryHandle);
|
||||
return false;
|
||||
}
|
||||
|
||||
//todo not unref yet, since it is not support multi-group interpolation query
|
||||
static void changeQueryHandleForInterpQuery(TsdbQueryHandleT pHandle) {
|
||||
static UNUSED_FUNC void changeQueryHandleForInterpQuery(TsdbQueryHandleT pHandle) {
|
||||
// filter the queried time stamp in the first place
|
||||
STsdbQueryHandle* pQueryHandle = (STsdbQueryHandle*) pHandle;
|
||||
|
||||
|
@ -2152,149 +2364,164 @@ static void destroyHelper(void* param) {
|
|||
free(param);
|
||||
}
|
||||
|
||||
// handle data in cache situation
|
||||
bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) {
|
||||
STsdbQueryHandle* pQueryHandle = (STsdbQueryHandle*) pHandle;
|
||||
|
||||
int64_t stime = taosGetTimestampUs();
|
||||
int64_t elapsedTime = stime;
|
||||
|
||||
size_t numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo);
|
||||
assert(numOfTables > 0);
|
||||
|
||||
if (pQueryHandle->type == TSDB_QUERY_TYPE_LAST && pQueryHandle->cachelastrow) {
|
||||
// the last row is cached in buffer, return it directly.
|
||||
// here note that the pQueryHandle->window must be the TS_INITIALIZER
|
||||
int32_t numOfCols = (int32_t)(QH_GET_NUM_OF_COLS(pQueryHandle));
|
||||
SQueryFilePos* cur = &pQueryHandle->cur;
|
||||
|
||||
SDataRow pRow = NULL;
|
||||
TSKEY key = TSKEY_INITIAL_VAL;
|
||||
int32_t step = ASCENDING_TRAVERSE(pQueryHandle->order)? 1:-1;
|
||||
|
||||
if (++pQueryHandle->activeIndex < numOfTables) {
|
||||
STableCheckInfo* pCheckInfo = taosArrayGet(pQueryHandle->pTableCheckInfo, pQueryHandle->activeIndex);
|
||||
int32_t ret = tsdbGetCachedLastRow(pCheckInfo->pTableObj, &pRow, &key);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
return false;
|
||||
}
|
||||
|
||||
copyOneRowFromMem(pQueryHandle, pQueryHandle->outputCapacity, 0, pRow, numOfCols, pCheckInfo->pTableObj, NULL);
|
||||
tfree(pRow);
|
||||
|
||||
// update the last key value
|
||||
pCheckInfo->lastKey = key + step;
|
||||
|
||||
cur->rows = 1; // only one row
|
||||
cur->lastKey = key + step;
|
||||
cur->mixBlock = true;
|
||||
cur->win.skey = key;
|
||||
cur->win.ekey = key;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool loadBlockOfActiveTable(STsdbQueryHandle* pQueryHandle) {
|
||||
if (pQueryHandle->checkFiles) {
|
||||
// check if the query range overlaps with the file data block
|
||||
bool exists = true;
|
||||
|
||||
int32_t code = getDataBlocksInFiles(pQueryHandle, &exists);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
pQueryHandle->activeIndex = 0;
|
||||
pQueryHandle->checkFiles = false;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
if (exists) {
|
||||
pQueryHandle->cost.checkForNextTime += (taosGetTimestampUs() - stime);
|
||||
if (pQueryHandle->currentLoadExternalRows && pQueryHandle->window.skey == pQueryHandle->window.ekey) {
|
||||
SColumnInfoData* pColInfo = taosArrayGet(pQueryHandle->pColumns, 0);
|
||||
assert(*(int64_t*)pColInfo->pData == pQueryHandle->window.skey);
|
||||
}
|
||||
|
||||
pQueryHandle->currentLoadExternalRows = false; // clear the flag, since the exact matched row is found.
|
||||
return exists;
|
||||
}
|
||||
|
||||
pQueryHandle->activeIndex = 0;
|
||||
pQueryHandle->checkFiles = false;
|
||||
}
|
||||
|
||||
// TODO: opt by consider the scan order
|
||||
bool ret = doHasDataInBuffer(pQueryHandle);
|
||||
terrno = TSDB_CODE_SUCCESS;
|
||||
if (hasMoreDataInCache(pQueryHandle)) {
|
||||
pQueryHandle->currentLoadExternalRows = false;
|
||||
return true;
|
||||
}
|
||||
|
||||
elapsedTime = taosGetTimestampUs() - stime;
|
||||
pQueryHandle->cost.checkForNextTime += elapsedTime;
|
||||
return ret;
|
||||
// current result is empty
|
||||
if (pQueryHandle->currentLoadExternalRows && pQueryHandle->window.skey == pQueryHandle->window.ekey && pQueryHandle->cur.rows == 0) {
|
||||
SMemRef* pMemRef = pQueryHandle->pMemRef;
|
||||
|
||||
doGetExternalRow(pQueryHandle, TSDB_PREV_ROW, pMemRef);
|
||||
doGetExternalRow(pQueryHandle, TSDB_NEXT_ROW, pMemRef);
|
||||
|
||||
bool result = tsdbGetExternalRow(pQueryHandle);
|
||||
|
||||
pQueryHandle->prev = doFreeColumnInfoData(pQueryHandle->prev);
|
||||
pQueryHandle->next = doFreeColumnInfoData(pQueryHandle->next);
|
||||
pQueryHandle->currentLoadExternalRows = false;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool tsdbNextDataBlockWithoutMerge(TsdbQueryHandleT* pHandle) {
|
||||
static bool loadCachedLastRow(STsdbQueryHandle* pQueryHandle) {
|
||||
// the last row is cached in buffer, return it directly.
|
||||
// here note that the pQueryHandle->window must be the TS_INITIALIZER
|
||||
int32_t numOfCols = (int32_t)(QH_GET_NUM_OF_COLS(pQueryHandle));
|
||||
size_t numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo);
|
||||
assert(numOfTables > 0 && numOfCols > 0);
|
||||
|
||||
SQueryFilePos* cur = &pQueryHandle->cur;
|
||||
|
||||
SDataRow pRow = NULL;
|
||||
TSKEY key = TSKEY_INITIAL_VAL;
|
||||
int32_t step = ASCENDING_TRAVERSE(pQueryHandle->order)? 1:-1;
|
||||
|
||||
if (++pQueryHandle->activeIndex < numOfTables) {
|
||||
STableCheckInfo* pCheckInfo = taosArrayGet(pQueryHandle->pTableCheckInfo, pQueryHandle->activeIndex);
|
||||
int32_t ret = tsdbGetCachedLastRow(pCheckInfo->pTableObj, &pRow, &key);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
return false;
|
||||
}
|
||||
|
||||
copyOneRowFromMem(pQueryHandle, pQueryHandle->outputCapacity, 0, pRow, numOfCols, pCheckInfo->pTableObj, NULL);
|
||||
tfree(pRow);
|
||||
|
||||
// update the last key value
|
||||
pCheckInfo->lastKey = key + step;
|
||||
|
||||
cur->rows = 1; // only one row
|
||||
cur->lastKey = key + step;
|
||||
cur->mixBlock = true;
|
||||
cur->win.skey = key;
|
||||
cur->win.ekey = key;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool loadDataBlockFromTableSeq(STsdbQueryHandle* pQueryHandle) {
|
||||
size_t numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo);
|
||||
assert(numOfTables > 0);
|
||||
|
||||
int64_t stime = taosGetTimestampUs();
|
||||
|
||||
while(pQueryHandle->activeIndex < numOfTables) {
|
||||
if (loadBlockOfActiveTable(pQueryHandle)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
STableCheckInfo* pCheckInfo = taosArrayGet(pQueryHandle->pTableCheckInfo, pQueryHandle->activeIndex);
|
||||
pCheckInfo->numOfBlocks = 0;
|
||||
|
||||
pQueryHandle->activeIndex += 1;
|
||||
pQueryHandle->locateStart = false;
|
||||
pQueryHandle->checkFiles = true;
|
||||
pQueryHandle->cur.rows = 0;
|
||||
pQueryHandle->currentLoadExternalRows = pQueryHandle->loadExternalRow;
|
||||
|
||||
terrno = TSDB_CODE_SUCCESS;
|
||||
|
||||
int64_t elapsedTime = taosGetTimestampUs() - stime;
|
||||
pQueryHandle->cost.checkForNextTime += elapsedTime;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
// handle data in cache situation
|
||||
bool tsdbNextDataBlock(TsdbQueryHandleT pHandle) {
|
||||
STsdbQueryHandle* pQueryHandle = (STsdbQueryHandle*) pHandle;
|
||||
|
||||
int64_t stime = taosGetTimestampUs();
|
||||
int64_t elapsedTime = stime;
|
||||
|
||||
size_t numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo);
|
||||
assert(numOfTables > 0);
|
||||
|
||||
if (pQueryHandle->type == TSDB_QUERY_TYPE_LAST && pQueryHandle->cachelastrow) {
|
||||
// the last row is cached in buffer, return it directly.
|
||||
// here note that the pQueryHandle->window must be the TS_INITIALIZER
|
||||
int32_t numOfCols = (int32_t)(QH_GET_NUM_OF_COLS(pQueryHandle));
|
||||
SQueryFilePos* cur = &pQueryHandle->cur;
|
||||
return loadCachedLastRow(pQueryHandle);
|
||||
}
|
||||
|
||||
SDataRow pRow = NULL;
|
||||
TSKEY key = TSKEY_INITIAL_VAL;
|
||||
int32_t step = ASCENDING_TRAVERSE(pQueryHandle->order)? 1:-1;
|
||||
if (pQueryHandle->loadType == BLOCK_LOAD_TABLE_SEQ_ORDER) {
|
||||
return loadDataBlockFromTableSeq(pQueryHandle);
|
||||
} else { // loadType == RR and Offset Order
|
||||
if (pQueryHandle->checkFiles) {
|
||||
// check if the query range overlaps with the file data block
|
||||
bool exists = true;
|
||||
|
||||
int32_t code = getDataBlocksInFiles(pQueryHandle, &exists);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
pQueryHandle->activeIndex = 0;
|
||||
pQueryHandle->checkFiles = false;
|
||||
|
||||
if (++pQueryHandle->activeIndex < numOfTables) {
|
||||
STableCheckInfo* pCheckInfo = taosArrayGet(pQueryHandle->pTableCheckInfo, pQueryHandle->activeIndex);
|
||||
int32_t ret = tsdbGetCachedLastRow(pCheckInfo->pTableObj, &pRow, &key);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
return false;
|
||||
}
|
||||
|
||||
copyOneRowFromMem(pQueryHandle, pQueryHandle->outputCapacity, 0, pRow, numOfCols, pCheckInfo->pTableObj, NULL);
|
||||
tfree(pRow);
|
||||
if (exists) {
|
||||
pQueryHandle->cost.checkForNextTime += (taosGetTimestampUs() - stime);
|
||||
return exists;
|
||||
}
|
||||
|
||||
// update the last key value
|
||||
pCheckInfo->lastKey = key + step;
|
||||
|
||||
cur->rows = 1; // only one row
|
||||
cur->lastKey = key + step;
|
||||
cur->mixBlock = true;
|
||||
cur->win.skey = key;
|
||||
cur->win.ekey = key;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
if (pQueryHandle->checkFiles) {
|
||||
// check if the query range overlaps with the file data block
|
||||
bool exists = true;
|
||||
|
||||
int32_t code = getDataBlocksInFiles(pQueryHandle, &exists);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
pQueryHandle->activeIndex = 0;
|
||||
pQueryHandle->checkFiles = false;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
if (exists) {
|
||||
pQueryHandle->cost.checkForNextTime += (taosGetTimestampUs() - stime);
|
||||
return exists;
|
||||
}
|
||||
// TODO: opt by consider the scan order
|
||||
bool ret = doHasDataInBuffer(pQueryHandle);
|
||||
terrno = TSDB_CODE_SUCCESS;
|
||||
|
||||
pQueryHandle->activeIndex = 0;
|
||||
pQueryHandle->checkFiles = false;
|
||||
elapsedTime = taosGetTimestampUs() - stime;
|
||||
pQueryHandle->cost.checkForNextTime += elapsedTime;
|
||||
return ret;
|
||||
}
|
||||
|
||||
elapsedTime = taosGetTimestampUs() - stime;
|
||||
pQueryHandle->cost.checkForNextTime += elapsedTime;
|
||||
return false;
|
||||
}
|
||||
|
||||
static int32_t doGetExternalRow(STsdbQueryHandle* pQueryHandle, int16_t type, SMemRef* pMemRef) {
|
||||
|
@ -2342,7 +2569,7 @@ static int32_t doGetExternalRow(STsdbQueryHandle* pQueryHandle, int16_t type, SM
|
|||
}
|
||||
|
||||
// load the previous row
|
||||
STsdbQueryCond cond = {.numOfCols = numOfCols, .loadExternalRows = false,};
|
||||
STsdbQueryCond cond = {.numOfCols = numOfCols, .loadExternalRows = false, .type = BLOCK_LOAD_OFFSET_SEQ_ORDER};
|
||||
if (type == TSDB_PREV_ROW) {
|
||||
cond.order = TSDB_ORDER_DESC;
|
||||
cond.twindow = (STimeWindow){pQueryHandle->window.skey, INT64_MIN};
|
||||
|
@ -2363,20 +2590,20 @@ static int32_t doGetExternalRow(STsdbQueryHandle* pQueryHandle, int16_t type, SM
|
|||
}
|
||||
|
||||
pSecQueryHandle = tsdbQueryTablesImpl(pQueryHandle->pTsdb, &cond, pQueryHandle->qinfo, pMemRef);
|
||||
|
||||
tfree(cond.colList);
|
||||
|
||||
// current table, only one table
|
||||
STableCheckInfo* pCurrent = taosArrayGet(pQueryHandle->pTableCheckInfo, pQueryHandle->activeIndex);
|
||||
|
||||
SArray* psTable = NULL;
|
||||
|
||||
pSecQueryHandle->pTableCheckInfo = createCheckInfoFromCheckInfo(pQueryHandle->pTableCheckInfo, pSecQueryHandle->window.skey, &psTable);
|
||||
pSecQueryHandle->pTableCheckInfo = createCheckInfoFromCheckInfo(pCurrent, pSecQueryHandle->window.skey, &psTable);
|
||||
if (pSecQueryHandle->pTableCheckInfo == NULL) {
|
||||
terrno = TSDB_CODE_QRY_OUT_OF_MEMORY;
|
||||
goto out_of_memory;
|
||||
}
|
||||
|
||||
|
||||
tsdbMayTakeMemSnapshot(pSecQueryHandle, psTable);
|
||||
|
||||
tsdbMayTakeMemSnapshot(pSecQueryHandle, psTable);
|
||||
if (!tsdbNextDataBlock((void*)pSecQueryHandle)) {
|
||||
// no result in current query, free the corresponding result rows structure
|
||||
if (type == TSDB_PREV_ROW) {
|
||||
|
@ -2406,10 +2633,35 @@ out_of_memory:
|
|||
return terrno;
|
||||
}
|
||||
|
||||
SArray* tsdbGetExternalRow(TsdbQueryHandleT *pHandle, SMemRef* pMemRef, int16_t type) {
|
||||
bool tsdbGetExternalRow(TsdbQueryHandleT pHandle) {
|
||||
STsdbQueryHandle* pQueryHandle = (STsdbQueryHandle*) pHandle;
|
||||
assert(type == TSDB_PREV_ROW || type == TSDB_NEXT_ROW);
|
||||
return (type == TSDB_PREV_ROW)? pQueryHandle->prev:pQueryHandle->next;
|
||||
SQueryFilePos* cur = &pQueryHandle->cur;
|
||||
|
||||
cur->fid = INT32_MIN;
|
||||
cur->mixBlock = true;
|
||||
if (pQueryHandle->prev == NULL || pQueryHandle->next == NULL) {
|
||||
cur->rows = 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
int32_t numOfCols = (int32_t) QH_GET_NUM_OF_COLS(pQueryHandle);
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
SColumnInfoData* pColInfoData = taosArrayGet(pQueryHandle->pColumns, i);
|
||||
SColumnInfoData* first = taosArrayGet(pQueryHandle->prev, i);
|
||||
|
||||
memcpy(pColInfoData->pData, first->pData, pColInfoData->info.bytes);
|
||||
|
||||
SColumnInfoData* sec = taosArrayGet(pQueryHandle->next, i);
|
||||
memcpy(((char*)pColInfoData->pData) + pColInfoData->info.bytes, sec->pData, pColInfoData->info.bytes);
|
||||
|
||||
if (i == 0 && pColInfoData->info.type == TSDB_DATA_TYPE_TIMESTAMP) {
|
||||
cur->win.skey = *(TSKEY*)pColInfoData->pData;
|
||||
cur->win.ekey = *(TSKEY*)(((char*)pColInfoData->pData) + TSDB_KEYSIZE);
|
||||
}
|
||||
}
|
||||
|
||||
cur->rows = 2;
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2949,10 +3201,10 @@ int32_t tsdbQuerySTableByTagCond(STsdbRepo* tsdb, uint64_t uid, TSKEY skey, cons
|
|||
goto _error;
|
||||
}
|
||||
|
||||
pGroupInfo->numOfTables = taosArrayGetSize(res);
|
||||
pGroupInfo->numOfTables = (uint32_t) taosArrayGetSize(res);
|
||||
pGroupInfo->pGroupList = createTableGroup(res, pTagSchema, pColIndex, numOfCols, skey);
|
||||
|
||||
tsdbDebug("%p no table name/tag condition, all tables belong to one group, numOfTables:%" PRIzu "", tsdb, pGroupInfo->numOfTables);
|
||||
tsdbDebug("%p no table name/tag condition, all tables belong to one group, numOfTables:%u", tsdb, pGroupInfo->numOfTables);
|
||||
taosArrayDestroy(res);
|
||||
|
||||
if (tsdbUnlockRepoMeta(tsdb) < 0) goto _error;
|
||||
|
@ -2994,10 +3246,10 @@ int32_t tsdbQuerySTableByTagCond(STsdbRepo* tsdb, uint64_t uid, TSKEY skey, cons
|
|||
} END_TRY
|
||||
|
||||
doQueryTableList(pTable, res, expr);
|
||||
pGroupInfo->numOfTables = taosArrayGetSize(res);
|
||||
pGroupInfo->numOfTables = (uint32_t)taosArrayGetSize(res);
|
||||
pGroupInfo->pGroupList = createTableGroup(res, pTagSchema, pColIndex, numOfCols, skey);
|
||||
|
||||
tsdbDebug("%p stable tid:%d, uid:%"PRIu64" query, numOfTables:%" PRIzu ", belong to %" PRIzu " groups", tsdb, pTable->tableId.tid,
|
||||
tsdbDebug("%p stable tid:%d, uid:%"PRIu64" query, numOfTables:%u, belong to %" PRIzu " groups", tsdb, pTable->tableId.tid,
|
||||
pTable->tableId.uid, pGroupInfo->numOfTables, taosArrayGetSize(pGroupInfo->pGroupList));
|
||||
|
||||
taosArrayDestroy(res);
|
||||
|
@ -3074,7 +3326,7 @@ int32_t tsdbGetTableGroupFromIdList(STsdbRepo* tsdb, SArray* pTableIdList, STabl
|
|||
return terrno;
|
||||
}
|
||||
|
||||
pGroupInfo->numOfTables = taosArrayGetSize(group);
|
||||
pGroupInfo->numOfTables = (uint32_t) taosArrayGetSize(group);
|
||||
if (pGroupInfo->numOfTables > 0) {
|
||||
taosArrayPush(pGroupInfo->pGroupList, &group);
|
||||
} else {
|
||||
|
@ -3099,23 +3351,26 @@ static void* doFreeColumnInfoData(SArray* pColumnInfoData) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static void* destroyTableCheckInfo(SArray* pTableCheckInfo) {
|
||||
size_t size = taosArrayGetSize(pTableCheckInfo);
|
||||
for (int32_t i = 0; i < size; ++i) {
|
||||
STableCheckInfo* p = taosArrayGet(pTableCheckInfo, i);
|
||||
destroyTableMemIterator(p);
|
||||
|
||||
tfree(p->pCompInfo);
|
||||
}
|
||||
|
||||
taosArrayDestroy(pTableCheckInfo);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void tsdbCleanupQueryHandle(TsdbQueryHandleT queryHandle) {
|
||||
STsdbQueryHandle* pQueryHandle = (STsdbQueryHandle*)queryHandle;
|
||||
if (pQueryHandle == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (pQueryHandle->pTableCheckInfo != NULL) {
|
||||
size_t size = taosArrayGetSize(pQueryHandle->pTableCheckInfo);
|
||||
for (int32_t i = 0; i < size; ++i) {
|
||||
STableCheckInfo* pTableCheckInfo = taosArrayGet(pQueryHandle->pTableCheckInfo, i);
|
||||
destroyTableMemIterator(pTableCheckInfo);
|
||||
|
||||
tfree(pTableCheckInfo->pCompInfo);
|
||||
}
|
||||
taosArrayDestroy(pQueryHandle->pTableCheckInfo);
|
||||
}
|
||||
|
||||
pQueryHandle->pTableCheckInfo = destroyTableCheckInfo(pQueryHandle->pTableCheckInfo);
|
||||
pQueryHandle->pColumns = doFreeColumnInfoData(pQueryHandle->pColumns);
|
||||
|
||||
taosArrayDestroy(pQueryHandle->defaultLoadColumn);
|
||||
|
|
|
@ -25,7 +25,8 @@ extern "C" {
|
|||
|
||||
#define TARRAY_MIN_SIZE 8
|
||||
#define TARRAY_GET_ELEM(array, index) ((void*)((char*)((array)->pData) + (index) * (array)->elemSize))
|
||||
#define TARRAY_ELEM_IDX(array, ele) (POINTER_DISTANCE(ele, (array)->pData) / (array)->elemSize)
|
||||
#define TARRAY_ELEM_IDX(array, ele) (POINTER_DISTANCE(ele, (array)->pData) / (array)->elemSize)
|
||||
#define TARRAY_GET_START(array) ((array)->pData)
|
||||
|
||||
typedef struct SArray {
|
||||
size_t size;
|
||||
|
|
|
@ -73,14 +73,14 @@ int main( int argc, char** argv ) {
|
|||
}
|
||||
*/
|
||||
|
||||
typedef struct {
|
||||
typedef struct SBufferReader {
|
||||
bool endian;
|
||||
const char* data;
|
||||
size_t pos;
|
||||
size_t size;
|
||||
} SBufferReader;
|
||||
|
||||
typedef struct {
|
||||
typedef struct SBufferWriter {
|
||||
bool endian;
|
||||
char* data;
|
||||
size_t pos;
|
||||
|
|
|
@ -442,4 +442,4 @@ void vnodeWaitReadCompleted(SVnodeObj *pVnode) {
|
|||
vTrace("vgId:%d, queued rmsg num:%d", pVnode->vgId, pVnode->queuedRMsg);
|
||||
taosMsleep(10);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1785,7 +1785,7 @@ class TdSuperTable:
|
|||
'top(speed, 50)', # TODO: not supported?
|
||||
'bottom(speed, 50)', # TODO: not supported?
|
||||
'apercentile(speed, 10)', # TODO: TD-1316
|
||||
'last_row(speed)',
|
||||
# 'last_row(speed)', # TODO: commented out per TD-3231, we should re-create
|
||||
# Transformation Functions
|
||||
# 'diff(speed)', # TODO: no supported?!
|
||||
'spread(speed)'
|
||||
|
|
|
@ -183,6 +183,7 @@ python3 ./test.py -f query/isNullTest.py
|
|||
python3 ./test.py -f query/queryWithTaosdKilled.py
|
||||
python3 ./test.py -f query/floatCompare.py
|
||||
python3 ./test.py -f query/queryGroupbySort.py
|
||||
python3 ./test.py -f query/queryBetweenAnd.py
|
||||
|
||||
#stream
|
||||
python3 ./test.py -f stream/metric_1.py
|
||||
|
@ -232,6 +233,7 @@ python3 ./test.py -f functions/function_top.py -r 1
|
|||
python3 ./test.py -f functions/function_twa.py -r 1
|
||||
python3 ./test.py -f functions/function_twa_test2.py
|
||||
python3 ./test.py -f functions/all_null_value.py
|
||||
python3 ./test.py -f functions/function_percentile2.py
|
||||
python3 queryCount.py
|
||||
python3 ./test.py -f query/queryGroupbyWithInterval.py
|
||||
python3 client/twoClients.py
|
||||
|
|
|
@ -0,0 +1,57 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
import numpy as np
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
self.rowNum = 1000000
|
||||
self.ts = 1537146000000
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
tdSql.execute("create table test(ts timestamp, col1 int, col2 float)")
|
||||
for i in range(1000):
|
||||
sql = "insert into test values"
|
||||
batchSize = int (self.rowNum / 1000)
|
||||
for j in range (batchSize):
|
||||
currTime = self.ts + batchSize * i + j
|
||||
sql += "(%d, 1, 2.37)" % currTime
|
||||
tdSql.execute(sql)
|
||||
|
||||
tdSql.query("select percentile(col1, 20) from test")
|
||||
tdSql.checkData(0, 0, 1)
|
||||
|
||||
tdSql.query("select percentile(col2, 20) from test")
|
||||
tdSql.checkData(0, 0, 2.3699998)
|
||||
|
||||
tdSql.query("select apercentile(col1, 20) from test")
|
||||
tdSql.checkData(0, 0, 1)
|
||||
|
||||
tdSql.query("select apercentile(col2, 20) from test")
|
||||
tdSql.checkData(0, 0, 2.3699998)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -26,6 +26,8 @@ class TDTestCase:
|
|||
|
||||
self.rowNum = 10
|
||||
self.ts = 1537146000000
|
||||
self.stb_prefix = 's'
|
||||
self.subtb_prefix = 't'
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
@ -85,6 +87,33 @@ class TDTestCase:
|
|||
|
||||
tdSql.query("select stddev(col6) from test1")
|
||||
tdSql.checkData(0, 0, np.std(floatData))
|
||||
|
||||
#add for td-3276
|
||||
sql="create table s (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool,c8 binary(20),c9 nchar(20),c11 int unsigned,c12 smallint unsigned,c13 tinyint unsigned,c14 bigint unsigned) \
|
||||
tags(t1 int, t2 float, t3 bigint, t4 smallint, t5 tinyint, t6 double, t7 bool,t8 binary(20),t9 nchar(20), t10 int unsigned , t11 smallint unsigned , t12 tinyint unsigned , t13 bigint unsigned)"
|
||||
tdSql.execute(sql)
|
||||
for j in range(2):
|
||||
if j % 2 == 0:
|
||||
sql = "create table %s using %s tags(NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL)" % \
|
||||
(self.subtb_prefix+str(j)+'_'+str(j),self.stb_prefix)
|
||||
else:
|
||||
sql = "create table %s using %s tags(%d,%d,%d,%d,%d,%d,%d,'%s','%s',%d,%d,%d,%d)" % \
|
||||
(self.subtb_prefix+str(j)+'_'+str(j),self.stb_prefix,j,j/2.0,j%41,j%51,j%53,j*1.0,j%2,'taos'+str(j),'涛思'+str(j), j%43, j%23 , j%17 , j%3167)
|
||||
tdSql.execute(sql)
|
||||
for i in range(10):
|
||||
if i % 5 == 0 :
|
||||
ret = tdSql.execute(
|
||||
"insert into %s values (%d , NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL)" %
|
||||
(self.subtb_prefix+str(j)+'_'+str(j), self.ts+i))
|
||||
else:
|
||||
ret = tdSql.execute(
|
||||
"insert into %s values (%d , %d,%d,%d,%d,%d,%d,%d,'%s','%s',%d,%d,%d,%d)" %
|
||||
(self.subtb_prefix+str(j)+'_'+str(j), self.ts+i, i%100, i/2.0, i%41, i%51, i%53, i*1.0, i%2,'taos'+str(i),'涛思'+str(i), i%43, i%23 , i%17 , i%3167))
|
||||
|
||||
for i in range(13):
|
||||
tdSql.query('select stddev(c4) from s group by t%s' % str(i+1) )
|
||||
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
|
|
|
@ -79,6 +79,7 @@ python3 test.py -f query/queryFillTest.py
|
|||
|
||||
# tools
|
||||
python3 test.py -f tools/taosdemoTest.py
|
||||
python3 test.py -f tools/taosdemoTestWithoutMetric.py
|
||||
python3 test.py -f tools/taosdumpTest.py
|
||||
python3 test.py -f tools/lowaTest.py
|
||||
#python3 test.py -f tools/taosdemoTest2.py
|
||||
|
|
|
@ -0,0 +1,206 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import taos
|
||||
import sys
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
tdLog.printNoPrefix("==========step1:create table")
|
||||
tdSql.execute(
|
||||
'''create table if not exists supt
|
||||
(ts timestamp, c1 int, c2 float, c3 bigint, c4 double, c5 smallint, c6 tinyint)
|
||||
tags(location binary(64), type int, isused bool , family nchar(64))'''
|
||||
)
|
||||
tdSql.execute("create table t1 using supt tags('beijing', 1, 1, '自行车')")
|
||||
tdSql.execute("create table t2 using supt tags('shanghai', 2, 0, '拖拉机')")
|
||||
|
||||
tdLog.printNoPrefix("==========step2:insert data")
|
||||
for i in range(10):
|
||||
tdSql.execute(
|
||||
f"insert into t1 values (now+{i}m, {32767+i}, {20.0+i/10}, {2**31+i}, {3.4*10**38+i/10}, {127+i}, {i})"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into t2 values (now-{i}m, {-32767-i}, {20.0-i/10}, {-i-2**31}, {-i/10-3.4*10**38}, {-127-i}, {-i})"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into t1 values (now+11m, {2**31-1}, {pow(10,37)*34}, {pow(2,63)-1}, {1.7*10**308}, 32767, 127)"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into t2 values (now-11m, {1-2**31}, {-3.4*10**38}, {1-2**63}, {-1.7*10**308}, -32767, -127)"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into t2 values (now-12m, null , {-3.4*10**38}, null , {-1.7*10**308}, null , null)"
|
||||
)
|
||||
|
||||
tdLog.printNoPrefix("==========step3:query timestamp type")
|
||||
|
||||
tdSql.query("select * from t1 where ts between now-1m and now+10m")
|
||||
tdSql.checkRows(10)
|
||||
tdSql.query("select * from t1 where ts between '2021-01-01 00:00:00.000' and '2121-01-01 00:00:00.000'")
|
||||
tdSql.checkRows(11)
|
||||
tdSql.query("select * from t1 where ts between '1969-01-01 00:00:00.000' and '1969-12-31 23:59:59.999'")
|
||||
tdSql.checkRows(0)
|
||||
tdSql.query("select * from t1 where ts between -2793600 and 31507199")
|
||||
tdSql.checkRows(0)
|
||||
tdSql.query("select * from t1 where ts between 1609430400000 and 4765104000000")
|
||||
tdSql.checkRows(11)
|
||||
|
||||
tdLog.printNoPrefix("==========step4:query int type")
|
||||
|
||||
tdSql.query("select * from t1 where c1 between 32767 and 32776")
|
||||
tdSql.checkRows(10)
|
||||
tdSql.query("select * from t1 where c1 between 32766.9 and 32776.1")
|
||||
tdSql.checkRows(10)
|
||||
tdSql.query("select * from t1 where c1 between 32776 and 32767")
|
||||
tdSql.checkRows(0)
|
||||
tdSql.error("select * from t1 where c1 between 'a' and 'e'")
|
||||
# tdSql.query("select * from t1 where c1 between 0x64 and 0x69")
|
||||
# tdSql.checkRows(6)
|
||||
tdSql.error("select * from t1 where c1 not between 100 and 106")
|
||||
tdSql.query(f"select * from t1 where c1 between {2**31-2} and {2**31+1}")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.error(f"select * from t2 where c1 between null and {1-2**31}")
|
||||
# tdSql.checkRows(3)
|
||||
tdSql.query(f"select * from t2 where c1 between {-2**31} and {1-2**31}")
|
||||
tdSql.checkRows(1)
|
||||
|
||||
tdLog.printNoPrefix("==========step5:query float type")
|
||||
|
||||
tdSql.query("select * from t1 where c2 between 20.0 and 21.0")
|
||||
tdSql.checkRows(10)
|
||||
tdSql.query(f"select * from t1 where c2 between {-3.4*10**38-1} and {3.4*10**38+1}")
|
||||
tdSql.checkRows(11)
|
||||
tdSql.query("select * from t1 where c2 between 21.0 and 20.0")
|
||||
tdSql.checkRows(0)
|
||||
tdSql.error("select * from t1 where c2 between 'DC3' and 'SYN'")
|
||||
tdSql.error("select * from t1 where c2 not between 0.1 and 0.2")
|
||||
# tdSql.query(f"select * from t1 where c2 between {pow(10,38)*3.4} and {pow(10,38)*3.4+1}")
|
||||
# tdSql.checkRows(1)
|
||||
tdSql.query(f"select * from t2 where c2 between {-3.4*10**38-1} and {-3.4*10**38}")
|
||||
tdSql.checkRows(0)
|
||||
tdSql.error(f"select * from t2 where c2 between null and {-3.4*10**38}")
|
||||
# tdSql.checkRows(3)
|
||||
|
||||
tdLog.printNoPrefix("==========step6:query bigint type")
|
||||
|
||||
tdSql.query(f"select * from t1 where c3 between {2**31} and {2**31+10}")
|
||||
tdSql.checkRows(10)
|
||||
tdSql.error(f"select * from t1 where c3 between {-2**63} and {2**63}")
|
||||
# tdSql.checkRows(11)
|
||||
tdSql.query(f"select * from t1 where c3 between {2**31+10} and {2**31}")
|
||||
tdSql.checkRows(0)
|
||||
tdSql.error("select * from t1 where c3 between 'a' and 'z'")
|
||||
tdSql.error("select * from t1 where c3 not between 1 and 2")
|
||||
tdSql.query(f"select * from t1 where c3 between {2**63-2} and {2**63-1}")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.error(f"select * from t2 where c3 between {-2**63} and {1-2**63}")
|
||||
# tdSql.checkRows(3)
|
||||
tdSql.error(f"select * from t2 where c3 between null and {1-2**63}")
|
||||
# tdSql.checkRows(2)
|
||||
|
||||
tdLog.printNoPrefix("==========step7:query double type")
|
||||
|
||||
tdSql.query(f"select * from t1 where c4 between {3.4*10**38} and {3.4*10**38+10}")
|
||||
tdSql.checkRows(10)
|
||||
tdSql.query(f"select * from t1 where c4 between {1.7*10**308+1} and {1.7*10**308+2}")
|
||||
# 因为精度原因,在超出bigint边界后,数值不能进行准确的判断
|
||||
# tdSql.checkRows(0)
|
||||
tdSql.query(f"select * from t1 where c4 between {3.4*10**38+10} and {3.4*10**38}")
|
||||
# tdSql.checkRows(0)
|
||||
tdSql.error("select * from t1 where c4 between 'a' and 'z'")
|
||||
tdSql.error("select * from t1 where c4 not between 1 and 2")
|
||||
tdSql.query(f"select * from t1 where c4 between {1.7*10**308} and {1.7*10**308+1}")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.query(f"select * from t2 where c4 between {-1.7*10**308-1} and {-1.7*10**308}")
|
||||
# tdSql.checkRows(3)
|
||||
tdSql.error(f"select * from t2 where c4 between null and {-1.7*10**308}")
|
||||
# tdSql.checkRows(3)
|
||||
|
||||
tdLog.printNoPrefix("==========step8:query smallint type")
|
||||
|
||||
tdSql.query("select * from t1 where c5 between 127 and 136")
|
||||
tdSql.checkRows(10)
|
||||
tdSql.query("select * from t1 where c5 between 126.9 and 135.9")
|
||||
tdSql.checkRows(9)
|
||||
tdSql.query("select * from t1 where c5 between 136 and 127")
|
||||
tdSql.checkRows(0)
|
||||
tdSql.error("select * from t1 where c5 between '~' and 'ˆ'")
|
||||
tdSql.error("select * from t1 where c5 not between 1 and 2")
|
||||
tdSql.query("select * from t1 where c5 between 32767 and 32768")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.query("select * from t2 where c5 between -32768 and -32767")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.error("select * from t2 where c5 between null and -32767")
|
||||
# tdSql.checkRows(1)
|
||||
|
||||
tdLog.printNoPrefix("==========step9:query tinyint type")
|
||||
|
||||
tdSql.query("select * from t1 where c6 between 0 and 9")
|
||||
tdSql.checkRows(10)
|
||||
tdSql.query("select * from t1 where c6 between -1.1 and 8.9")
|
||||
tdSql.checkRows(9)
|
||||
tdSql.query("select * from t1 where c6 between 9 and 0")
|
||||
tdSql.checkRows(0)
|
||||
tdSql.error("select * from t1 where c6 between 'NUL' and 'HT'")
|
||||
tdSql.error("select * from t1 where c6 not between 1 and 2")
|
||||
tdSql.query("select * from t1 where c6 between 127 and 128")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.query("select * from t2 where c6 between -128 and -127")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.error("select * from t2 where c6 between null and -127")
|
||||
# tdSql.checkRows(3)
|
||||
|
||||
tdLog.printNoPrefix("==========step10:invalid query type")
|
||||
|
||||
tdSql.query("select * from supt where location between 'beijing' and 'shanghai'")
|
||||
tdSql.checkRows(23)
|
||||
# 非0值均解析为1,因此"between 负值 and o"解析为"between 1 and 0"
|
||||
tdSql.query("select * from supt where isused between 0 and 1")
|
||||
tdSql.checkRows(23)
|
||||
tdSql.query("select * from supt where isused between -1 and 0")
|
||||
tdSql.checkRows(0)
|
||||
tdSql.error("select * from supt where isused between false and true")
|
||||
tdSql.query("select * from supt where family between '拖拉机' and '自行车'")
|
||||
tdSql.checkRows(23)
|
||||
|
||||
tdLog.printNoPrefix("==========step11:query HEX/OCT/BIN type")
|
||||
|
||||
tdSql.error("select * from t1 where c6 between 0x7f and 0x80") # check filter HEX
|
||||
tdSql.error("select * from t1 where c6 between 0b1 and 0b11111") # check filter BIN
|
||||
tdSql.error("select * from t1 where c6 between 0b1 and 0x80")
|
||||
tdSql.error("select * from t1 where c6=0b1")
|
||||
tdSql.error("select * from t1 where c6=0x1")
|
||||
# 八进制数据会按照十进制数据进行判定
|
||||
tdSql.query("select * from t1 where c6 between 01 and 0200") # check filter OCT
|
||||
tdSql.checkRows(10)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,72 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import os
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
from util.dnodes import *
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
self.numberOfTables = 100
|
||||
self.numberOfRecords = 1000
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root) - len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
def run(self):
|
||||
buildPath = self.getBuildPath()
|
||||
if (buildPath == ""):
|
||||
tdLog.exit("taosd not found!")
|
||||
else:
|
||||
tdLog.info("taosd found in %s" % buildPath)
|
||||
binPath = buildPath + "/build/bin/"
|
||||
os.system("%staosdemo -y -t %d -n %d -x" %
|
||||
(binPath, self.numberOfTables, self.numberOfRecords))
|
||||
|
||||
tdSql.query("show databases")
|
||||
for i in range(18):
|
||||
print(tdSql.getData(0, i) )
|
||||
tdSql.checkData(0, 2, self.numberOfTables)
|
||||
|
||||
tdSql.execute("use test")
|
||||
tdSql.query(
|
||||
"select count(*) from test.t%d" % (self.numberOfTables -1))
|
||||
tdSql.checkData(0, 0, self.numberOfRecords)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -136,7 +136,8 @@ class TDDnode:
|
|||
"monitorDebugFlag":"135",
|
||||
"udebugFlag":"135",
|
||||
"jnidebugFlag":"135",
|
||||
"qdebugFlag":"135"
|
||||
"qdebugFlag":"135",
|
||||
"maxSQLLength":"1048576"
|
||||
}
|
||||
|
||||
def init(self, path):
|
||||
|
|
|
@ -94,8 +94,9 @@ while $i < 10
|
|||
$i = $i + 1
|
||||
endw
|
||||
|
||||
print ==> sleep 8 seconds to renew cache
|
||||
sleep 8000
|
||||
print ==> sleep 1 seconds to renew cache
|
||||
sql reset query cache
|
||||
sleep 1000
|
||||
|
||||
print =============== step5
|
||||
|
||||
|
|
|
@ -55,7 +55,7 @@ system sh/cfg.sh -n dnode1 -c walLevel -v 1
|
|||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
print =============== step3
|
||||
print ==> sleep 8 seconds to renew cache
|
||||
print ==> sleep 1 seconds to renew cache
|
||||
sql reset query cache
|
||||
sleep 1000
|
||||
|
||||
|
|
|
@ -39,10 +39,9 @@ system sh/cfg.sh -n dnode1 -c walLevel -v 1
|
|||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
print =============== step3
|
||||
print ==> sleep 8 seconds to renew cache
|
||||
sleep 2000
|
||||
print ==> sleep 1 seconds to renew cache
|
||||
sql reset query cache
|
||||
sleep 18000
|
||||
sleep 1000
|
||||
|
||||
print =============== step4
|
||||
sql create database $db
|
||||
|
|
|
@ -740,11 +740,7 @@ endi
|
|||
if $data02 != 1 then
|
||||
return -1
|
||||
endi
|
||||
#numofvgroups
|
||||
sql show t1.vgroups;
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql show t1.stables;
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
|
@ -773,11 +769,8 @@ endi
|
|||
if $data02 != 2 then
|
||||
return -1
|
||||
endi
|
||||
#numofvgroups
|
||||
sql show t1.vgroups;
|
||||
if $rows != 2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
||||
sql show t1.stables;
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
|
@ -806,11 +799,7 @@ endi
|
|||
if $data02 != 3 then
|
||||
return -1
|
||||
endi
|
||||
#numofvgroups
|
||||
sql show t1.vgroups;
|
||||
if $rows != 3 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql show t1.stables;
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
|
|
|
@ -93,6 +93,7 @@ $halfTbNum = $tbNum / 2
|
|||
$nchar = 'nchar . $c
|
||||
$nchar = $nchar . '
|
||||
|
||||
$ts = $ts + 1
|
||||
sql insert into $tb5 values ( $ts , NULL , $c , NULL , $c , NULL , $c , NULL, NULL , $nchar ) $tb6 values ( $ts , NULL , $c , NULL , $c , NULL , $c , NULL, NULL , $nchar ) $tb7 values ( $ts , NULL , $c , NULL , $c , NULL , $c , NULL, NULL , $nchar ) $tb8 values ( $ts , NULL , $c , NULL , $c , NULL , $c , NULL, NULL , $nchar ) $tb9 values ( $ts , NULL , $c , NULL , $c , NULL , $c , NULL, NULL , $nchar )
|
||||
$x = $x + 1
|
||||
endw
|
||||
|
|
|
@ -426,7 +426,7 @@ if $data02 != 9.000000020 then
|
|||
endi
|
||||
|
||||
# all possible function in the arithmetic expression, add more
|
||||
sql select min(c1) * max(c2) /4, sum(c1) * apercentile(c2, 20), apercentile(c4, 33) + 52/9, spread(c5)/min(c2), count(1)/sum(c1), avg(c2)*count(c2) from $stb where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-11-25 19:30:00.000';
|
||||
sql select min(c1) * max(c2) /4, sum(c1) * apercentile(c2, 20), apercentile(c4, 33) + 52/9, spread(c5)/min(c2), count(1)/sum(c1), avg(c2)*count(c2) from $stb where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-11-25 19:30:01.000';
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
@ -462,7 +462,7 @@ if $rows != 0 then
|
|||
endi
|
||||
|
||||
# no result return [d.3]
|
||||
sql select sum(c2) - avg(c2) from $stb where ts > '2018-11-25 19:30:00.000'
|
||||
sql select sum(c2) - avg(c2) from $stb where ts > '2018-11-25 19:30:01.000'
|
||||
if $rows != 0 then
|
||||
return -1
|
||||
endi
|
||||
|
@ -520,35 +520,35 @@ if $data91 != 9 then
|
|||
endi
|
||||
|
||||
# in group by column
|
||||
sql select apercentile(c6, 50)-first(c6)+last(c5)*12, last(c5)*12 from ca_stb0 group by c2;
|
||||
if $rows != 10 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data00 != 0.000000000 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data01 != 0.000000000 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data10 != 12.000000000 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data11 != 12.000000000 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data20 != 24.000000000 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data21 != 24.000000000 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
#sql select apercentile(c6, 50)-first(c6)+last(c5)*12, last(c5)*12 from ca_stb0 group by c2;
|
||||
#if $rows != 10 then
|
||||
# return -1
|
||||
#endi
|
||||
#
|
||||
#if $data00 != 0.000000000 then
|
||||
# return -1
|
||||
#endi
|
||||
#
|
||||
#if $data01 != 0.000000000 then
|
||||
# return -1
|
||||
#endi
|
||||
#
|
||||
#if $data10 != 12.000000000 then
|
||||
# return -1
|
||||
#endi
|
||||
#
|
||||
#if $data11 != 12.000000000 then
|
||||
# return -1
|
||||
#endi
|
||||
#
|
||||
#if $data20 != 24.000000000 then
|
||||
# return -1
|
||||
#endi
|
||||
#
|
||||
#if $data21 != 24.000000000 then
|
||||
# return -1
|
||||
#endi
|
||||
#
|
||||
sql_error select first(c6) - last(c6) *12 / count(*) from $stb group by c3;
|
||||
|
||||
sql select first(c6) - last(c6) *12 / count(*) from $stb group by c5;
|
||||
|
|
|
@ -313,6 +313,7 @@ if $rows != 9 then
|
|||
return -1
|
||||
endi
|
||||
if $data01 != 0 then
|
||||
print expect 0, actual:$data01
|
||||
return -1
|
||||
endi
|
||||
if $data11 != 6 then
|
||||
|
@ -979,10 +980,6 @@ if $data00 != @20-01-01 01:01:00.000@ then
|
|||
return -1
|
||||
endi
|
||||
|
||||
if $data00 != @20-01-01 01:01:00.000@ then
|
||||
return -1
|
||||
endi
|
||||
if $data1
|
||||
if $data01 != 2.000000000 then
|
||||
return -1
|
||||
endi
|
||||
|
|
|
@ -775,8 +775,24 @@ if $rows != 1 then
|
|||
return -1
|
||||
endi
|
||||
|
||||
|
||||
if $data00 != 0.000000000 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select count(tbname) from st1
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data00 != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select count(id) from st1
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data00 != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
|
|
@ -610,6 +610,11 @@ print =================>TD-2665
|
|||
sql_error create table txx as select avg(c) as t from st;
|
||||
sql_error create table txx1 as select avg(c) as t from t1;
|
||||
|
||||
sql select stddev(c),stddev(c) from st group by c;
|
||||
if $rows != 4 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print =================>TD-2236
|
||||
sql select first(ts),last(ts) from t1 group by c;
|
||||
if $rows != 4 then
|
||||
|
|
|
@ -579,7 +579,7 @@ $tb = $tbPrefix . 0
|
|||
|
||||
## interp(*) from stb + group by + fill(none)
|
||||
$t = $ts0 + 1000
|
||||
sql select interp(*) from $stb where ts = $t fill(NULL) group by tbname
|
||||
sql select interp(*) from $stb where ts = $t fill(NULL) group by tbname
|
||||
if $rows != $tbNum then
|
||||
return -1
|
||||
endi
|
||||
|
|
|
@ -347,6 +347,7 @@ $val = $rowNum + $rowNum
|
|||
print $val
|
||||
print $rows
|
||||
if $rows != $val then
|
||||
print expect $val , actual:$rows
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
|
|
@ -0,0 +1,99 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 2
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
sleep 100
|
||||
sql connect
|
||||
print ======================== dnode1 start
|
||||
|
||||
$db = testdb
|
||||
|
||||
sql create database $db
|
||||
sql use $db
|
||||
|
||||
sql create stable st2 (ts timestamp, f1 int, f2 float, f3 double, f4 bigint, f5 smallint, f6 tinyint, f7 bool, f8 binary(10), f9 nchar(10)) tags (id1 int, id2 float, id3 nchar(10), id4 double, id5 smallint, id6 bigint, id7 binary(10))
|
||||
|
||||
sql create table tb1 using st2 tags (1,1.0,"1",1.0,1,1,"1");
|
||||
|
||||
sql insert into tb1 values (now-200s,1,1.0,1.0,1,1,1,true,"1","1")
|
||||
sql insert into tb1 values (now-100s,2,2.0,2.0,2,2,2,true,"2","2")
|
||||
sql insert into tb1 values (now,3,3.0,3.0,3,3,3,true,"3","3")
|
||||
sql insert into tb1 values (now+100s,4,4.0,4.0,4,4,4,true,"4","4")
|
||||
sql insert into tb1 values (now+200s,4,4.0,4.0,4,4,4,true,"4","4")
|
||||
sql insert into tb1 values (now+300s,4,4.0,4.0,4,4,4,true,"4","4")
|
||||
sql insert into tb1 values (now+400s,4,4.0,4.0,4,4,4,true,"4","4")
|
||||
sql insert into tb1 values (now+500s,4,4.0,4.0,4,4,4,true,"4","4")
|
||||
|
||||
sql select f1,last(*) from st2 group by f1;
|
||||
|
||||
if $rows != 4 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data00 != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data02 != 1 then
|
||||
print $data02
|
||||
return -1
|
||||
endi
|
||||
if $data03 != 1.00000 then
|
||||
return -1
|
||||
endi
|
||||
if $data04 != 1.000000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data05 != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data06 != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data07 != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data08 != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data09 != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select f1,last(f1,st2.*) from st2 group by f1;
|
||||
if $rows != 4 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data00 != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data01 != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data03 != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data04 != 1.00000 then
|
||||
return -1
|
||||
endi
|
||||
if $data05 != 1.000000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data06 != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data07 != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data08 != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data09 != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -16,6 +16,9 @@ $stb = $stbPrefix . $i
|
|||
|
||||
sql use $db
|
||||
|
||||
print ========>TD-3231 last_row with group by column error
|
||||
sql_error select last_row(c1) from $stb group by c1;
|
||||
|
||||
##### select lastrow from STable with two vnodes, timestamp decreases from tables in vnode0 to tables in vnode1
|
||||
sql select last_row(*) from $stb
|
||||
if $rows != 1 then
|
||||
|
@ -224,4 +227,4 @@ sql create table tu(ts timestamp, k int)
|
|||
sql select last_row(*) from tu
|
||||
if $row != 0 then
|
||||
return -1
|
||||
endi
|
||||
endi
|
||||
|
|
|
@ -538,6 +538,7 @@ $offset = $offset + 1
|
|||
sql select max(c1), min(c2), avg(c3), count(c4), sum(c5), spread(c6), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu and t1 > 1 and t1 < 8 interval(5m) limit $offset offset $offset
|
||||
$val = $rowNum - $offset
|
||||
if $rows != $val then
|
||||
print expect $val, actual:$rows
|
||||
return -1
|
||||
endi
|
||||
if $data00 != @18-10-22 02:30:00.000@ then
|
||||
|
|
|
@ -107,3 +107,6 @@ sleep 100
|
|||
run general/parser/function.sim
|
||||
sleep 100
|
||||
run general/parser/stableOp.sim
|
||||
sleep 100
|
||||
run general/parser/slimit_alter_tags.sim
|
||||
|
||||
|
|
|
@ -73,6 +73,60 @@ if $row != 100 then
|
|||
return -1
|
||||
endi
|
||||
|
||||
sql select bottom(c3, 5) from tb_tb1 interval(1y);
|
||||
if $rows != 5 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data01 != 0.00000 then
|
||||
print expect 0.00000, actual:$data01
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data11 != 0.00000 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data21 != 0.00000 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data31 != 0.00000 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select top(c4, 5) from tb_tb1 interval(1y);
|
||||
if $rows != 5 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data01 != 9.000000000 then
|
||||
print expect 9.000000000, acutal:$data01
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data11 != 9.000000000 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data21 != 9.000000000 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data31 != 9.000000000 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select top(c3, 5) from tb_tb1 interval(40h)
|
||||
if $rows != 25 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data01 != 9.00000 then
|
||||
print expect 9.00000, actual:$data01
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select last(*) from tb_tb9
|
||||
if $row != 1 then
|
||||
return -1
|
||||
|
|
|
@ -100,8 +100,9 @@ system sh/cfg.sh -n dnode1 -c walLevel -v 1
|
|||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
print =============== step5
|
||||
print ==> sleep 8 seconds to renew cache
|
||||
sleep 8000
|
||||
print ==> renew cache
|
||||
sql reset query cache
|
||||
sleep 1000
|
||||
|
||||
|
||||
print =============== step6
|
||||
|
|
|
@ -22,8 +22,6 @@
|
|||
./test.sh -f general/http/grafana_bug.sim
|
||||
./test.sh -f general/http/grafana.sim
|
||||
|
||||
|
||||
|
||||
./test.sh -f general/insert/basic.sim
|
||||
./test.sh -f general/insert/insert_drop.sim
|
||||
./test.sh -f general/insert/query_block1_memory.sim
|
||||
|
|
Loading…
Reference in New Issue