Merge remote-tracking branch 'origin/develop' into feature/crash_gen
This commit is contained in:
commit
d58434c366
10
.travis.yml
10
.travis.yml
|
@ -36,6 +36,8 @@ matrix:
|
|||
- psmisc
|
||||
|
||||
before_script:
|
||||
- export TZ=Asia/Harbin
|
||||
- date
|
||||
- cd ${TRAVIS_BUILD_DIR}
|
||||
- mkdir debug
|
||||
- cd debug
|
||||
|
@ -150,6 +152,8 @@ matrix:
|
|||
- DESC="trusty/gcc-4.8 build"
|
||||
|
||||
before_script:
|
||||
- export TZ=Asia/Harbin
|
||||
- date
|
||||
- cd ${TRAVIS_BUILD_DIR}
|
||||
- mkdir debug
|
||||
- cd debug
|
||||
|
@ -173,6 +177,8 @@ matrix:
|
|||
- cmake
|
||||
|
||||
before_script:
|
||||
- export TZ=Asia/Harbin
|
||||
- date
|
||||
- cd ${TRAVIS_BUILD_DIR}
|
||||
- mkdir debug
|
||||
- cd debug
|
||||
|
@ -197,6 +203,8 @@ matrix:
|
|||
- cmake
|
||||
|
||||
before_script:
|
||||
- export TZ=Asia/Harbin
|
||||
- date
|
||||
- cd ${TRAVIS_BUILD_DIR}
|
||||
- mkdir debug
|
||||
- cd debug
|
||||
|
@ -225,6 +233,8 @@ matrix:
|
|||
- DESC="trusty/gcc-4.8 build"
|
||||
|
||||
before_script:
|
||||
- export TZ=Asia/Harbin
|
||||
- date
|
||||
- cd ${TRAVIS_BUILD_DIR}
|
||||
- mkdir debug
|
||||
- cd debug
|
||||
|
|
|
@ -84,7 +84,7 @@ typedef struct SRetrieveSupport {
|
|||
SColumnModel * pFinalColModel; // colModel for final result
|
||||
SSubqueryState * pState;
|
||||
int32_t subqueryIndex; // index of current vnode in vnode list
|
||||
SSqlObj * pParentSqlObj;
|
||||
SSqlObj * pParentSql;
|
||||
tFilePage * localBuffer; // temp buffer, there is a buffer for each vnode to
|
||||
uint32_t numOfRetry; // record the number of retry times
|
||||
pthread_mutex_t queryMutex;
|
||||
|
|
|
@ -36,6 +36,8 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql);
|
|||
|
||||
int32_t tscHandleMultivnodeInsert(SSqlObj *pSql);
|
||||
|
||||
int32_t tscHandleInsertRetry(SSqlObj* pSql);
|
||||
|
||||
void tscBuildResFromSubqueries(SSqlObj *pSql);
|
||||
void **doSetResultRowData(SSqlObj *pSql, bool finalResult);
|
||||
|
||||
|
|
|
@ -213,8 +213,7 @@ typedef struct SQueryInfo {
|
|||
typedef struct {
|
||||
int command;
|
||||
uint8_t msgType;
|
||||
bool autoCreated; // if the table is missing, on-the-fly create it. during getmeterMeta
|
||||
int8_t dataSourceType; // load data from file or not
|
||||
bool autoCreated; // create table if it is not existed during retrieve table meta in mnode
|
||||
|
||||
union {
|
||||
int32_t count;
|
||||
|
@ -222,18 +221,23 @@ typedef struct {
|
|||
};
|
||||
|
||||
int32_t insertType;
|
||||
int32_t clauseIndex; // index of multiple subclause query
|
||||
int32_t clauseIndex; // index of multiple subclause query
|
||||
|
||||
char * curSql; // current sql, resume position of sql after parsing paused
|
||||
int8_t parseFinished;
|
||||
|
||||
short numOfCols;
|
||||
uint32_t allocSize;
|
||||
char * payload;
|
||||
int32_t payloadLen;
|
||||
SQueryInfo **pQueryInfo;
|
||||
int32_t numOfClause;
|
||||
char * curSql; // current sql, resume position of sql after parsing paused
|
||||
void * pTableList; // referred table involved in sql
|
||||
int32_t batchSize; // for parameter ('?') binding and batch processing
|
||||
int32_t numOfParams;
|
||||
|
||||
int8_t dataSourceType; // load data from file or not
|
||||
int8_t submitSchema; // submit block is built with table schema
|
||||
SHashObj *pTableList; // referred table involved in sql
|
||||
SArray *pDataBlocks; // SArray<STableDataBlocks*> submit data blocks after parsing sql
|
||||
} SSqlCmd;
|
||||
|
||||
|
|
|
@ -431,6 +431,8 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
|
|||
return;
|
||||
}
|
||||
|
||||
tscDebug("%p get tableMeta successfully", pSql);
|
||||
|
||||
if (pSql->pStream == NULL) {
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
|
||||
|
||||
|
@ -446,20 +448,20 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
|
|||
assert(code == TSDB_CODE_SUCCESS);
|
||||
}
|
||||
|
||||
|
||||
assert((tscGetNumOfTags(pTableMetaInfo->pTableMeta) != 0) && pTableMetaInfo->vgroupIndex >= 0 && pSql->param != NULL);
|
||||
assert((tscGetNumOfTags(pTableMetaInfo->pTableMeta) != 0) && pSql->param != NULL);
|
||||
|
||||
SRetrieveSupport *trs = (SRetrieveSupport *)pSql->param;
|
||||
SSqlObj * pParObj = trs->pParentSqlObj;
|
||||
SSqlObj * pParObj = trs->pParentSql;
|
||||
|
||||
assert(pParObj->signature == pParObj && trs->subqueryIndex == pTableMetaInfo->vgroupIndex &&
|
||||
tscGetNumOfTags(pTableMetaInfo->pTableMeta) != 0);
|
||||
|
||||
// NOTE: the vgroupInfo for the queried super table must be existed here.
|
||||
assert(pTableMetaInfo->vgroupList != NULL);
|
||||
assert(pParObj->signature == pParObj && trs->subqueryIndex == pTableMetaInfo->vgroupIndex &&
|
||||
pTableMetaInfo->vgroupIndex >= 0 && pTableMetaInfo->vgroupList != NULL);
|
||||
|
||||
if ((code = tscProcessSql(pSql)) == TSDB_CODE_SUCCESS) {
|
||||
return;
|
||||
}
|
||||
|
||||
goto _error;
|
||||
} else { // continue to process normal async query
|
||||
if (pCmd->parseFinished) {
|
||||
tscDebug("%p update table meta in local cache, continue to process sql and send corresponding query", pSql);
|
||||
|
@ -472,18 +474,41 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
|
|||
assert(code == TSDB_CODE_SUCCESS);
|
||||
}
|
||||
|
||||
// if failed to process sql, go to error handler
|
||||
if ((code = tscProcessSql(pSql)) == TSDB_CODE_SUCCESS) {
|
||||
return;
|
||||
// in case of insert, redo parsing the sql string and build new submit data block for two reasons:
|
||||
// 1. the table Id(tid & uid) may have been update, the submit block needs to be updated
|
||||
// 2. vnode may need the schema information along with submit block to update its local table schema.
|
||||
if (pCmd->command == TSDB_SQL_INSERT) {
|
||||
tscDebug("%p redo parse sql string to build submit block", pSql);
|
||||
|
||||
pCmd->parseFinished = false;
|
||||
if ((code = tsParseSql(pSql, true)) == TSDB_CODE_SUCCESS) {
|
||||
/*
|
||||
* Discard previous built submit blocks, and then parse the sql string again and build up all submit blocks,
|
||||
* and send the required submit block according to index value in supporter to server.
|
||||
*/
|
||||
pSql->fp = pSql->fetchFp; // restore the fp
|
||||
if ((code = tscHandleInsertRetry(pSql)) == TSDB_CODE_SUCCESS) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
} else {// in case of other query type, continue
|
||||
if ((code = tscProcessSql(pSql)) == TSDB_CODE_SUCCESS) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
// // todo update the submit message according to the new table meta
|
||||
// // 1. table uid, 2. ip address
|
||||
// code = tscSendMsgToServer(pSql);
|
||||
// if (code == TSDB_CODE_SUCCESS) return;
|
||||
|
||||
goto _error;
|
||||
} else {
|
||||
tscDebug("%p continue parse sql after get table meta", pSql);
|
||||
|
||||
code = tsParseSql(pSql, false);
|
||||
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
|
||||
return;
|
||||
} else if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _error;
|
||||
}
|
||||
|
||||
if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_STMT_INSERT)) {
|
||||
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
|
||||
code = tscGetTableMeta(pSql, pTableMetaInfo);
|
||||
|
@ -492,45 +517,49 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
|
|||
} else {
|
||||
assert(code == TSDB_CODE_SUCCESS);
|
||||
}
|
||||
|
||||
(*pSql->fp)(pSql->param, pSql, code);
|
||||
return;
|
||||
}
|
||||
|
||||
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) return;
|
||||
|
||||
// proceed to invoke the tscDoQuery();
|
||||
}
|
||||
}
|
||||
|
||||
} else { // stream computing
|
||||
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
|
||||
code = tscGetTableMeta(pSql, pTableMetaInfo);
|
||||
pRes->code = code;
|
||||
|
||||
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) return;
|
||||
code = tscGetTableMeta(pSql, pTableMetaInfo);
|
||||
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
|
||||
return;
|
||||
} else if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _error;
|
||||
}
|
||||
|
||||
if (code == TSDB_CODE_SUCCESS && UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
|
||||
code = tscGetSTableVgroupInfo(pSql, pCmd->clauseIndex);
|
||||
pRes->code = code;
|
||||
|
||||
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) return;
|
||||
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
|
||||
return;
|
||||
} else if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _error;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
pSql->res.code = code;
|
||||
tscQueueAsyncRes(pSql);
|
||||
return;
|
||||
}
|
||||
|
||||
if (pSql->pStream) {
|
||||
tscDebug("%p stream:%p meta is updated, start new query, command:%d", pSql, pSql->pStream, pSql->cmd.command);
|
||||
if (!pSql->cmd.parseFinished) {
|
||||
tsParseSql(pSql, false);
|
||||
sem_post(&pSql->rspSem);
|
||||
}
|
||||
|
||||
return;
|
||||
} else {
|
||||
tscDebug("%p get tableMeta successfully", pSql);
|
||||
}
|
||||
|
||||
tscDoQuery(pSql);
|
||||
return;
|
||||
|
||||
_error:
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
pSql->res.code = code;
|
||||
tscQueueAsyncRes(pSql);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1259,8 +1259,6 @@ int tsParseInsertSql(SSqlObj *pSql) {
|
|||
if ((code = tscMergeTableDataBlocks(pSql, pCmd->pDataBlocks)) != TSDB_CODE_SUCCESS) {
|
||||
goto _error;
|
||||
}
|
||||
} else {
|
||||
pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
|
||||
}
|
||||
|
||||
code = TSDB_CODE_SUCCESS;
|
||||
|
|
|
@ -347,8 +347,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcIpSet *pIpSet) {
|
|||
int doProcessSql(SSqlObj *pSql) {
|
||||
SSqlCmd *pCmd = &pSql->cmd;
|
||||
SSqlRes *pRes = &pSql->res;
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
|
||||
if (pCmd->command == TSDB_SQL_SELECT ||
|
||||
pCmd->command == TSDB_SQL_FETCH ||
|
||||
pCmd->command == TSDB_SQL_RETRIEVE ||
|
||||
|
@ -365,10 +364,13 @@ int doProcessSql(SSqlObj *pSql) {
|
|||
return pRes->code;
|
||||
}
|
||||
|
||||
code = tscSendMsgToServer(pSql);
|
||||
int32_t code = tscSendMsgToServer(pSql);
|
||||
|
||||
// NOTE: if code is TSDB_CODE_SUCCESS, pSql may have been released here already by other threads.
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
pRes->code = code;
|
||||
tscQueueAsyncRes(pSql);
|
||||
return pRes->code;
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
typedef struct SInsertSupporter {
|
||||
SSubqueryState* pState;
|
||||
SSqlObj* pSql;
|
||||
int32_t index;
|
||||
} SInsertSupporter;
|
||||
|
||||
static void freeJoinSubqueryObj(SSqlObj* pSql);
|
||||
|
@ -1414,7 +1415,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
|
|||
}
|
||||
|
||||
trs->subqueryIndex = i;
|
||||
trs->pParentSqlObj = pSql;
|
||||
trs->pParentSql = pSql;
|
||||
trs->pFinalColModel = pModel;
|
||||
|
||||
pthread_mutexattr_t mutexattr;
|
||||
|
@ -1499,7 +1500,7 @@ static void tscAbortFurtherRetryRetrieval(SRetrieveSupport *trsupport, TAOS_RES
|
|||
tscError("sub:%p failed to flush data to disk, reason:%s", tres, tstrerror(code));
|
||||
#endif
|
||||
|
||||
SSqlObj* pParentSql = trsupport->pParentSqlObj;
|
||||
SSqlObj* pParentSql = trsupport->pParentSql;
|
||||
|
||||
pParentSql->res.code = code;
|
||||
trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY;
|
||||
|
@ -1508,8 +1509,45 @@ static void tscAbortFurtherRetryRetrieval(SRetrieveSupport *trsupport, TAOS_RES
|
|||
tscHandleSubqueryError(trsupport, tres, pParentSql->res.code);
|
||||
}
|
||||
|
||||
/*
|
||||
* current query failed, and the retry count is less than the available
|
||||
* count, retry query clear previous retrieved data, then launch a new sub query
|
||||
*/
|
||||
static int32_t tscReissueSubquery(SRetrieveSupport *trsupport, SSqlObj *pSql, int32_t code) {
|
||||
SSqlObj *pParentSql = trsupport->pParentSql;
|
||||
int32_t subqueryIndex = trsupport->subqueryIndex;
|
||||
|
||||
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
|
||||
SCMVgroupInfo* pVgroup = &pTableMetaInfo->vgroupList->vgroups[0];
|
||||
|
||||
tExtMemBufferClear(trsupport->pExtMemBuffer[subqueryIndex]);
|
||||
|
||||
// clear local saved number of results
|
||||
trsupport->localBuffer->num = 0;
|
||||
pthread_mutex_unlock(&trsupport->queryMutex);
|
||||
|
||||
tscTrace("%p sub:%p retrieve failed, code:%s, orderOfSub:%d, retry:%d", trsupport->pParentSql, pSql,
|
||||
tstrerror(code), subqueryIndex, trsupport->numOfRetry);
|
||||
|
||||
SSqlObj *pNew = tscCreateSqlObjForSubquery(trsupport->pParentSql, trsupport, pSql);
|
||||
|
||||
// todo add to async res or not??
|
||||
if (pNew == NULL) {
|
||||
tscError("%p sub:%p failed to create new subquery due to out of memory, abort retry, vgId:%d, orderOfSub:%d",
|
||||
trsupport->pParentSql, pSql, pVgroup->vgId, trsupport->subqueryIndex);
|
||||
|
||||
pParentSql->res.code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY;
|
||||
|
||||
return pParentSql->res.code;
|
||||
}
|
||||
|
||||
taos_free_result(pSql);
|
||||
return tscProcessSql(pNew);
|
||||
}
|
||||
|
||||
void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numOfRows) {
|
||||
SSqlObj *pParentSql = trsupport->pParentSqlObj;
|
||||
SSqlObj *pParentSql = trsupport->pParentSql;
|
||||
int32_t subqueryIndex = trsupport->subqueryIndex;
|
||||
|
||||
assert(pSql != NULL);
|
||||
|
@ -1528,38 +1566,16 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO
|
|||
tscDebug("%p query is cancelled, sub:%p, orderOfSub:%d abort retrieve, code:%d", pParentSql, pSql,
|
||||
subqueryIndex, pParentSql->res.code);
|
||||
}
|
||||
|
||||
|
||||
if (numOfRows >= 0) { // current query is successful, but other sub query failed, still abort current query.
|
||||
tscDebug("%p sub:%p retrieve numOfRows:%d,orderOfSub:%d", pParentSql, pSql, numOfRows, subqueryIndex);
|
||||
tscError("%p sub:%p abort further retrieval due to other queries failure,orderOfSub:%d,code:%d", pParentSql, pSql,
|
||||
subqueryIndex, pParentSql->res.code);
|
||||
} else {
|
||||
if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY && pParentSql->res.code == TSDB_CODE_SUCCESS) {
|
||||
/*
|
||||
* current query failed, and the retry count is less than the available
|
||||
* count, retry query clear previous retrieved data, then launch a new sub query
|
||||
*/
|
||||
tExtMemBufferClear(trsupport->pExtMemBuffer[subqueryIndex]);
|
||||
|
||||
// clear local saved number of results
|
||||
trsupport->localBuffer->num = 0;
|
||||
pthread_mutex_unlock(&trsupport->queryMutex);
|
||||
|
||||
tscDebug("%p sub:%p retrieve failed, code:%s, orderOfSub:%d, retry:%d", trsupport->pParentSqlObj, pSql,
|
||||
tstrerror(numOfRows), subqueryIndex, trsupport->numOfRetry);
|
||||
|
||||
SSqlObj *pNew = tscCreateSqlObjForSubquery(trsupport->pParentSqlObj, trsupport, pSql);
|
||||
if (pNew == NULL) {
|
||||
tscError("%p sub:%p failed to create new subquery sqlObj due to out of memory, abort retry",
|
||||
trsupport->pParentSqlObj, pSql);
|
||||
|
||||
pParentSql->res.code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY;
|
||||
if (tscReissueSubquery(trsupport, pSql, numOfRows) == TSDB_CODE_SUCCESS) {
|
||||
return;
|
||||
}
|
||||
|
||||
tscProcessSql(pNew);
|
||||
return;
|
||||
} else { // reach the maximum retry count, abort
|
||||
atomic_val_compare_exchange_32(&pParentSql->res.code, TSDB_CODE_SUCCESS, numOfRows);
|
||||
tscError("%p sub:%p retrieve failed,code:%s,orderOfSub:%d failed.no more retry,set global code:%s", pParentSql, pSql,
|
||||
|
@ -1600,7 +1616,7 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO
|
|||
|
||||
static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* pSql) {
|
||||
int32_t idx = trsupport->subqueryIndex;
|
||||
SSqlObj * pPObj = trsupport->pParentSqlObj;
|
||||
SSqlObj * pParentSql = trsupport->pParentSql;
|
||||
tOrderDescriptor *pDesc = trsupport->pOrderDescriptor;
|
||||
|
||||
SSubqueryState* pState = trsupport->pState;
|
||||
|
@ -1610,7 +1626,7 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p
|
|||
|
||||
// data in from current vnode is stored in cache and disk
|
||||
uint32_t numOfRowsFromSubquery = trsupport->pExtMemBuffer[idx]->numOfTotalElems + trsupport->localBuffer->num;
|
||||
tscDebug("%p sub:%p all data retrieved from ip:%s, vgId:%d, numOfRows:%d, orderOfSub:%d", pPObj, pSql,
|
||||
tscDebug("%p sub:%p all data retrieved from ip:%s, vgId:%d, numOfRows:%d, orderOfSub:%d", pParentSql, pSql,
|
||||
pTableMetaInfo->vgroupList->vgroups[0].ipAddr[0].fqdn, pTableMetaInfo->vgroupList->vgroups[0].vgId,
|
||||
numOfRowsFromSubquery, idx);
|
||||
|
||||
|
@ -1624,15 +1640,14 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p
|
|||
trsupport->localBuffer->num, colInfo);
|
||||
#endif
|
||||
|
||||
if (tsTotalTmpDirGB != 0 && tsAvailTmpDirGB < tsMinimalTmpDirGB) {
|
||||
tscError("%p sub:%p client disk space remain %.3f GB, need at least %.3f GB, stop query", pPObj, pSql,
|
||||
tsAvailTmpDirGB, tsMinimalTmpDirGB);
|
||||
tscAbortFurtherRetryRetrieval(trsupport, pSql, TSDB_CODE_TSC_NO_DISKSPACE);
|
||||
return;
|
||||
if (tsTotalTmpDirGB != 0 && tsAvailTmpDirectorySpace < tsReservedTmpDirectorySpace) {
|
||||
tscError("%p sub:%p client disk space remain %.3f GB, need at least %.3f GB, stop query", pParentSql, pSql,
|
||||
tsAvailTmpDirectorySpace, tsReservedTmpDirectorySpace);
|
||||
return tscAbortFurtherRetryRetrieval(trsupport, pSql, TSDB_CODE_TSC_NO_DISKSPACE);
|
||||
}
|
||||
|
||||
// each result for a vnode is ordered as an independant list,
|
||||
// then used as an input of loser tree for disk-based merge routine
|
||||
// then used as an input of loser tree for disk-based merge
|
||||
int32_t code = tscFlushTmpBuffer(trsupport->pExtMemBuffer[idx], pDesc, trsupport->localBuffer, pQueryInfo->groupbyExpr.orderType);
|
||||
if (code != 0) { // set no disk space error info, and abort retry
|
||||
return tscAbortFurtherRetryRetrieval(trsupport, pSql, code);
|
||||
|
@ -1640,7 +1655,7 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p
|
|||
|
||||
int32_t remain = -1;
|
||||
if ((remain = atomic_sub_fetch_32(&pState->numOfRemain, 1)) > 0) {
|
||||
tscDebug("%p sub:%p orderOfSub:%d freed, finished subqueries:%d", pPObj, pSql, trsupport->subqueryIndex,
|
||||
tscDebug("%p sub:%p orderOfSub:%d freed, finished subqueries:%d", pParentSql, pSql, trsupport->subqueryIndex,
|
||||
pState->numOfTotal - remain);
|
||||
|
||||
return tscFreeSubSqlObj(trsupport, pSql);
|
||||
|
@ -1649,29 +1664,29 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p
|
|||
// all sub-queries are returned, start to local merge process
|
||||
pDesc->pColumnModel->capacity = trsupport->pExtMemBuffer[idx]->numOfElemsPerPage;
|
||||
|
||||
tscDebug("%p retrieve from %d vnodes completed.final NumOfRows:%" PRId64 ",start to build loser tree", pPObj,
|
||||
tscDebug("%p retrieve from %d vnodes completed.final NumOfRows:%" PRId64 ",start to build loser tree", pParentSql,
|
||||
pState->numOfTotal, pState->numOfRetrievedRows);
|
||||
|
||||
SQueryInfo *pPQueryInfo = tscGetQueryInfoDetail(&pPObj->cmd, 0);
|
||||
SQueryInfo *pPQueryInfo = tscGetQueryInfoDetail(&pParentSql->cmd, 0);
|
||||
tscClearInterpInfo(pPQueryInfo);
|
||||
|
||||
tscCreateLocalReducer(trsupport->pExtMemBuffer, pState->numOfTotal, pDesc, trsupport->pFinalColModel, pPObj);
|
||||
tscDebug("%p build loser tree completed", pPObj);
|
||||
tscCreateLocalReducer(trsupport->pExtMemBuffer, pState->numOfTotal, pDesc, trsupport->pFinalColModel, pParentSql);
|
||||
tscDebug("%p build loser tree completed", pParentSql);
|
||||
|
||||
pPObj->res.precision = pSql->res.precision;
|
||||
pPObj->res.numOfRows = 0;
|
||||
pPObj->res.row = 0;
|
||||
pParentSql->res.precision = pSql->res.precision;
|
||||
pParentSql->res.numOfRows = 0;
|
||||
pParentSql->res.row = 0;
|
||||
|
||||
// only free once
|
||||
tfree(trsupport->pState);
|
||||
tscFreeSubSqlObj(trsupport, pSql);
|
||||
|
||||
// set the command flag must be after the semaphore been correctly set.
|
||||
pPObj->cmd.command = TSDB_SQL_RETRIEVE_LOCALMERGE;
|
||||
if (pPObj->res.code == TSDB_CODE_SUCCESS) {
|
||||
(*pPObj->fp)(pPObj->param, pPObj, 0);
|
||||
pParentSql->cmd.command = TSDB_SQL_RETRIEVE_LOCALMERGE;
|
||||
if (pParentSql->res.code == TSDB_CODE_SUCCESS) {
|
||||
(*pParentSql->fp)(pParentSql->param, pParentSql, 0);
|
||||
} else {
|
||||
tscQueueAsyncRes(pPObj);
|
||||
tscQueueAsyncRes(pParentSql);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1679,22 +1694,48 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR
|
|||
SRetrieveSupport *trsupport = (SRetrieveSupport *)param;
|
||||
tOrderDescriptor *pDesc = trsupport->pOrderDescriptor;
|
||||
int32_t idx = trsupport->subqueryIndex;
|
||||
SSqlObj * pPObj = trsupport->pParentSqlObj;
|
||||
SSqlObj * pParentSql = trsupport->pParentSql;
|
||||
|
||||
SSqlObj *pSql = (SSqlObj *)tres;
|
||||
if (pSql == NULL) { // sql object has been released in error process, return immediately
|
||||
tscDebug("%p subquery has been released, idx:%d, abort", pPObj, idx);
|
||||
tscDebug("%p subquery has been released, idx:%d, abort", pParentSql, idx);
|
||||
return;
|
||||
}
|
||||
|
||||
SSubqueryState* pState = trsupport->pState;
|
||||
assert(pState->numOfRemain <= pState->numOfTotal && pState->numOfRemain >= 0 && pPObj->numOfSubs == pState->numOfTotal);
|
||||
assert(pState->numOfRemain <= pState->numOfTotal && pState->numOfRemain >= 0 && pParentSql->numOfSubs == pState->numOfTotal);
|
||||
|
||||
// query process and cancel query process may execute at the same time
|
||||
pthread_mutex_lock(&trsupport->queryMutex);
|
||||
|
||||
if (numOfRows < 0 || pPObj->res.code != TSDB_CODE_SUCCESS) {
|
||||
return tscHandleSubqueryError(trsupport, pSql, numOfRows);
|
||||
|
||||
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
|
||||
SCMVgroupInfo* pVgroup = &pTableMetaInfo->vgroupList->vgroups[0];
|
||||
|
||||
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
|
||||
trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY;
|
||||
tscTrace("%p query cancelled or failed, sub:%p, vgId:%d, orderOfSub:%d, code:%s, global code:%s",
|
||||
pParentSql, pSql, pVgroup->vgId, trsupport->subqueryIndex, tstrerror(numOfRows), tstrerror(pParentSql->res.code));
|
||||
|
||||
tscHandleSubqueryError(param, tres, numOfRows);
|
||||
return;
|
||||
}
|
||||
|
||||
if (taos_errno(pSql) != TSDB_CODE_SUCCESS) {
|
||||
assert(numOfRows == taos_errno(pSql));
|
||||
|
||||
if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY) {
|
||||
tscTrace("%p sub:%p failed code:%s, retry:%d", pParentSql, pSql, tstrerror(numOfRows), trsupport->numOfRetry);
|
||||
|
||||
if (tscReissueSubquery(trsupport, pSql, numOfRows) == TSDB_CODE_SUCCESS) {
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
tscTrace("%p sub:%p reach the max retry times, set global code:%s", pParentSql, pSql, tstrerror(numOfRows));
|
||||
atomic_val_compare_exchange_32(&pParentSql->res.code, TSDB_CODE_SUCCESS, numOfRows); // set global code and abort
|
||||
}
|
||||
|
||||
tscHandleSubqueryError(param, tres, numOfRows);
|
||||
return;
|
||||
}
|
||||
|
||||
SSqlRes * pRes = &pSql->res;
|
||||
|
@ -1704,14 +1745,13 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR
|
|||
assert(pRes->numOfRows == numOfRows);
|
||||
int64_t num = atomic_add_fetch_64(&pState->numOfRetrievedRows, numOfRows);
|
||||
|
||||
tscDebug("%p sub:%p retrieve numOfRows:%" PRId64 " totalNumOfRows:%" PRIu64 " from ip:%s, orderOfSub:%d", pPObj, pSql,
|
||||
tscDebug("%p sub:%p retrieve numOfRows:%" PRId64 " totalNumOfRows:%" PRIu64 " from ip:%s, orderOfSub:%d", pParentSql, pSql,
|
||||
pRes->numOfRows, pState->numOfRetrievedRows, pSql->ipList.fqdn[pSql->ipList.inUse], idx);
|
||||
|
||||
if (num > tsMaxNumOfOrderedResults && tscIsProjectionQueryOnSTable(pQueryInfo, 0)) {
|
||||
tscError("%p sub:%p num of OrderedRes is too many, max allowed:%" PRId32 " , current:%" PRId64,
|
||||
pPObj, pSql, tsMaxNumOfOrderedResults, num);
|
||||
tscAbortFurtherRetryRetrieval(trsupport, tres, TSDB_CODE_TSC_SORTED_RES_TOO_MANY);
|
||||
return;
|
||||
pParentSql, pSql, tsMaxNumOfOrderedResults, num);
|
||||
return tscAbortFurtherRetryRetrieval(trsupport, tres, TSDB_CODE_TSC_SORTED_RES_TOO_MANY);
|
||||
}
|
||||
|
||||
#ifdef _DEBUG_VIEW
|
||||
|
@ -1722,11 +1762,11 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR
|
|||
tColModelDisplayEx(pDesc->pColumnModel, pRes->data, pRes->numOfRows, pRes->numOfRows, colInfo);
|
||||
#endif
|
||||
|
||||
if (tsTotalTmpDirGB != 0 && tsAvailTmpDirGB < tsMinimalTmpDirGB) {
|
||||
tscError("%p sub:%p client disk space remain %.3f GB, need at least %.3f GB, stop query", pPObj, pSql,
|
||||
tsAvailTmpDirGB, tsMinimalTmpDirGB);
|
||||
tscAbortFurtherRetryRetrieval(trsupport, tres, TSDB_CODE_TSC_NO_DISKSPACE);
|
||||
return;
|
||||
// no disk space for tmp directory
|
||||
if (tsTotalTmpDirGB != 0 && tsAvailTmpDirectorySpace < tsReservedTmpDirectorySpace) {
|
||||
tscError("%p sub:%p client disk space remain %.3f GB, need at least %.3f GB, stop query", pParentSql, pSql,
|
||||
tsAvailTmpDirectorySpace, tsReservedTmpDirectorySpace);
|
||||
return tscAbortFurtherRetryRetrieval(trsupport, tres, TSDB_CODE_TSC_NO_DISKSPACE);
|
||||
}
|
||||
|
||||
int32_t ret = saveToBuffer(trsupport->pExtMemBuffer[idx], pDesc, trsupport->localBuffer, pRes->data,
|
||||
|
@ -1771,80 +1811,56 @@ static SSqlObj *tscCreateSqlObjForSubquery(SSqlObj *pSql, SRetrieveSupport *trsu
|
|||
void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
|
||||
SRetrieveSupport *trsupport = (SRetrieveSupport *) param;
|
||||
|
||||
SSqlObj* pParentSql = trsupport->pParentSqlObj;
|
||||
SSqlObj* pParentSql = trsupport->pParentSql;
|
||||
SSqlObj* pSql = (SSqlObj *) tres;
|
||||
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
|
||||
assert(pSql->cmd.numOfClause == 1 && pQueryInfo->numOfTables == 1);
|
||||
|
||||
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
|
||||
SCMVgroupInfo* pVgroup = &pTableMetaInfo->vgroupList->vgroups[0];
|
||||
|
||||
SSubqueryState* pState = trsupport->pState;
|
||||
assert(pState->numOfRemain <= pState->numOfTotal && pState->numOfRemain >= 0 && pParentSql->numOfSubs == pState->numOfTotal);
|
||||
SCMVgroupInfo* pVgroup = &pTableMetaInfo->vgroupList->vgroups[trsupport->subqueryIndex];
|
||||
|
||||
// todo set error code
|
||||
// stable query killed or other subquery failed, all query stopped
|
||||
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
|
||||
|
||||
// stable query is killed, abort further retry
|
||||
trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY;
|
||||
|
||||
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
|
||||
code = pParentSql->res.code;
|
||||
}
|
||||
|
||||
tscDebug("%p query cancelled or failed, sub:%p, orderOfSub:%d abort, code:%s", pParentSql, pSql,
|
||||
trsupport->subqueryIndex, tstrerror(code));
|
||||
tscTrace("%p query cancelled or failed, sub:%p, vgId:%d, orderOfSub:%d, code:%s, global code:%s",
|
||||
pParentSql, pSql, pVgroup->vgId, trsupport->subqueryIndex, tstrerror(code), tstrerror(pParentSql->res.code));
|
||||
|
||||
tscHandleSubqueryError(param, tres, code);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* if a query on a vnode is failed, all retrieve operations from vnode that occurs later
|
||||
* if a subquery on a vnode failed, all retrieve operations from vnode that occurs later
|
||||
* than this one are actually not necessary, we simply call the tscRetrieveFromDnodeCallBack
|
||||
* function to abort current and remain retrieve process.
|
||||
*
|
||||
* NOTE: thread safe is required.
|
||||
*/
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
if (trsupport->numOfRetry++ >= MAX_NUM_OF_SUBQUERY_RETRY) {
|
||||
tscDebug("%p sub:%p reach the max retry times, set global code:%s", pParentSql, pSql, tstrerror(code));
|
||||
atomic_val_compare_exchange_32(&pParentSql->res.code, TSDB_CODE_SUCCESS, code);
|
||||
} else { // does not reach the maximum retry time, go on
|
||||
tscDebug("%p sub:%p failed code:%s, retry:%d", pParentSql, pSql, tstrerror(code), trsupport->numOfRetry);
|
||||
|
||||
SSqlObj *pNew = tscCreateSqlObjForSubquery(pParentSql, trsupport, pSql);
|
||||
if (taos_errno(pSql) != TSDB_CODE_SUCCESS) {
|
||||
assert(code == taos_errno(pSql));
|
||||
|
||||
if (pNew == NULL) {
|
||||
tscError("%p sub:%p failed to create new subquery due to out of memory, abort retry, vgId:%d, orderOfSub:%d",
|
||||
trsupport->pParentSqlObj, pSql, pVgroup->vgId, trsupport->subqueryIndex);
|
||||
|
||||
pParentSql->res.code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY;
|
||||
} else {
|
||||
SQueryInfo *pNewQueryInfo = tscGetQueryInfoDetail(&pNew->cmd, 0);
|
||||
assert(pNewQueryInfo->pTableMetaInfo[0]->pTableMeta != NULL);
|
||||
|
||||
taos_free_result(pSql);
|
||||
tscProcessSql(pNew);
|
||||
if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY) {
|
||||
tscTrace("%p sub:%p failed code:%s, retry:%d", pParentSql, pSql, tstrerror(code), trsupport->numOfRetry);
|
||||
if (tscReissueSubquery(trsupport, pSql, code) == TSDB_CODE_SUCCESS) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (pParentSql->res.code != TSDB_CODE_SUCCESS) { // at least one peer subquery failed, abort current query
|
||||
tscDebug("%p sub:%p query failed,ip:%s,vgId:%d,orderOfSub:%d,global code:%d", pParentSql, pSql,
|
||||
pVgroup->ipAddr[0].fqdn, pVgroup->vgId, trsupport->subqueryIndex, pParentSql->res.code);
|
||||
|
||||
tscHandleSubqueryError(param, tres, pParentSql->res.code);
|
||||
} else { // success, proceed to retrieve data from dnode
|
||||
tscDebug("%p sub:%p query complete, ip:%s, vgId:%d, orderOfSub:%d, retrieve data", trsupport->pParentSqlObj, pSql,
|
||||
pVgroup->ipAddr[0].fqdn, pVgroup->vgId, trsupport->subqueryIndex);
|
||||
|
||||
if (pSql->res.qhandle == 0) { // qhandle is NULL, code is TSDB_CODE_SUCCESS means no results generated from this vnode
|
||||
tscRetrieveFromDnodeCallBack(param, pSql, 0);
|
||||
} else {
|
||||
taos_fetch_rows_a(tres, tscRetrieveFromDnodeCallBack, param);
|
||||
tscTrace("%p sub:%p reach the max retry times, set global code:%s", pParentSql, pSql, tstrerror(code));
|
||||
atomic_val_compare_exchange_32(&pParentSql->res.code, TSDB_CODE_SUCCESS, code); // set global code and abort
|
||||
}
|
||||
|
||||
|
||||
tscHandleSubqueryError(param, tres, pParentSql->res.code);
|
||||
return;
|
||||
}
|
||||
|
||||
tscTrace("%p sub:%p query complete, ip:%s, vgId:%d, orderOfSub:%d, retrieve data", trsupport->pParentSql, pSql,
|
||||
pVgroup->ipAddr[0].fqdn, pVgroup->vgId, trsupport->subqueryIndex);
|
||||
|
||||
if (pSql->res.qhandle == 0) { // qhandle is NULL, code is TSDB_CODE_SUCCESS means no results generated from this vnode
|
||||
tscRetrieveFromDnodeCallBack(param, pSql, 0);
|
||||
} else {
|
||||
taos_fetch_rows_a(tres, tscRetrieveFromDnodeCallBack, param);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1876,13 +1892,36 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows)
|
|||
|
||||
// release data block data
|
||||
tfree(pState);
|
||||
// pParentCmd->pDataBlocks = tscDestroyBlockArrayList(pParentCmd->pDataBlocks);
|
||||
|
||||
|
||||
// restore user defined fp
|
||||
pParentObj->fp = pParentObj->fetchFp;
|
||||
|
||||
|
||||
// todo remove this parameter in async callback function definition.
|
||||
// all data has been sent to vnode, call user function
|
||||
(*pParentObj->fp)(pParentObj->param, pParentObj, numOfRows);
|
||||
int32_t v = (pParentObj->res.code != TSDB_CODE_SUCCESS)? pParentObj->res.code:pParentObj->res.numOfRows;
|
||||
(*pParentObj->fp)(pParentObj->param, pParentObj, v);
|
||||
}
|
||||
|
||||
/**
|
||||
* it is a subquery, so after parse the sql string, copy the submit block to payload of itself
|
||||
* @param pSql
|
||||
* @return
|
||||
*/
|
||||
int32_t tscHandleInsertRetry(SSqlObj* pSql) {
|
||||
assert(pSql != NULL && pSql->param != NULL);
|
||||
SSqlCmd* pCmd = &pSql->cmd;
|
||||
SSqlRes* pRes = &pSql->res;
|
||||
|
||||
SInsertSupporter* pSupporter = (SInsertSupporter*) pSql->param;
|
||||
assert(pSupporter->index < pSupporter->pState->numOfTotal);
|
||||
|
||||
STableDataBlocks* pTableDataBlock = taosArrayGetP(pCmd->pDataBlocks, pSupporter->index);
|
||||
pRes->code = tscCopyDataBlockToPayload(pSql, pTableDataBlock);
|
||||
if (pRes->code != TSDB_CODE_SUCCESS) {
|
||||
return pRes->code;
|
||||
}
|
||||
|
||||
return tscProcessSql(pSql);
|
||||
}
|
||||
|
||||
int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
|
||||
|
@ -1906,10 +1945,11 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
|
|||
|
||||
while(numOfSub < pSql->numOfSubs) {
|
||||
SInsertSupporter* pSupporter = calloc(1, sizeof(SInsertSupporter));
|
||||
pSupporter->pSql = pSql;
|
||||
pSupporter->pSql = pSql;
|
||||
pSupporter->pState = pState;
|
||||
|
||||
SSqlObj *pNew = createSimpleSubObj(pSql, multiVnodeInsertFinalize, pSupporter, TSDB_SQL_INSERT);//createSubqueryObj(pSql, 0, multiVnodeInsertFinalize, pSupporter1, TSDB_SQL_INSERT, NULL);
|
||||
pSupporter->index = numOfSub;
|
||||
|
||||
SSqlObj *pNew = createSimpleSubObj(pSql, multiVnodeInsertFinalize, pSupporter, TSDB_SQL_INSERT);
|
||||
if (pNew == NULL) {
|
||||
tscError("%p failed to malloc buffer for subObj, orderOfSub:%d, reason:%s", pSql, numOfSub, strerror(errno));
|
||||
goto _error;
|
||||
|
@ -1940,6 +1980,8 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
|
|||
return pRes->code; // free all allocated resource
|
||||
}
|
||||
|
||||
pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
|
||||
|
||||
// use the local variable
|
||||
for (int32_t j = 0; j < numOfSub; ++j) {
|
||||
SSqlObj *pSub = pSql->pSubs[j];
|
||||
|
@ -1947,7 +1989,6 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
|
|||
tscProcessSql(pSub);
|
||||
}
|
||||
|
||||
pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
||||
_error:
|
||||
|
|
|
@ -562,10 +562,8 @@ int32_t tscGetDataBlockFromList(void* pHashList, SArray* pDataBlockList, int64_t
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock) {
|
||||
static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, bool includeSchema) {
|
||||
// TODO: optimize this function, handle the case while binary is not presented
|
||||
int len = 0;
|
||||
|
||||
STableMeta* pTableMeta = pTableDataBlock->pTableMeta;
|
||||
STableComInfo tinfo = tscGetTableInfo(pTableMeta);
|
||||
SSchema* pSchema = tscGetTableSchema(pTableMeta);
|
||||
|
@ -575,16 +573,37 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock) {
|
|||
pDataBlock += sizeof(SSubmitBlk);
|
||||
|
||||
int32_t flen = 0; // original total length of row
|
||||
for (int32_t i = 0; i < tinfo.numOfColumns; ++i) {
|
||||
flen += TYPE_BYTES[pSchema[i].type];
|
||||
|
||||
// schema needs to be included into the submit data block
|
||||
if (includeSchema) {
|
||||
int32_t numOfCols = tscGetNumOfColumns(pTableDataBlock->pTableMeta);
|
||||
for(int32_t j = 0; j < numOfCols; ++j) {
|
||||
STColumn* pCol = (STColumn*) pDataBlock;
|
||||
pCol->colId = pSchema[j].colId;
|
||||
pCol->type = pSchema[j].type;
|
||||
pCol->bytes = pSchema[j].bytes;
|
||||
pCol->offset = 0;
|
||||
|
||||
pDataBlock += sizeof(STColumn);
|
||||
flen += TYPE_BYTES[pSchema[j].type];
|
||||
}
|
||||
|
||||
int32_t schemaSize = sizeof(STColumn) * numOfCols;
|
||||
pBlock->schemaLen = schemaSize;
|
||||
} else {
|
||||
for (int32_t j = 0; j < tinfo.numOfColumns; ++j) {
|
||||
flen += TYPE_BYTES[pSchema[j].type];
|
||||
}
|
||||
|
||||
pBlock->schemaLen = 0;
|
||||
}
|
||||
|
||||
char* p = pTableDataBlock->pData + sizeof(SSubmitBlk);
|
||||
pBlock->len = 0;
|
||||
pBlock->dataLen = 0;
|
||||
int32_t numOfRows = htons(pBlock->numOfRows);
|
||||
|
||||
for (int32_t i = 0; i < numOfRows; ++i) {
|
||||
SDataRow trow = (SDataRow)pDataBlock;
|
||||
SDataRow trow = (SDataRow) pDataBlock;
|
||||
dataRowSetLen(trow, TD_DATA_ROW_HEAD_SIZE + flen);
|
||||
dataRowSetVersion(trow, pTableMeta->sversion);
|
||||
|
||||
|
@ -595,20 +614,21 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock) {
|
|||
p += pSchema[j].bytes;
|
||||
}
|
||||
|
||||
// p += pTableDataBlock->rowSize;
|
||||
pDataBlock += dataRowLen(trow);
|
||||
pBlock->len += dataRowLen(trow);
|
||||
pBlock->dataLen += dataRowLen(trow);
|
||||
}
|
||||
|
||||
len = pBlock->len;
|
||||
pBlock->len = htonl(pBlock->len);
|
||||
int32_t len = pBlock->dataLen + pBlock->schemaLen;
|
||||
pBlock->dataLen = htonl(pBlock->dataLen);
|
||||
pBlock->schemaLen = htonl(pBlock->schemaLen);
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SArray* pTableDataBlockList) {
|
||||
SSqlCmd* pCmd = &pSql->cmd;
|
||||
|
||||
// the expanded size when a row data is converted to SDataRow format
|
||||
// the maximum expanded size in byte when a row-wise data is converted to SDataRow format
|
||||
const int32_t MAX_EXPAND_SIZE = TD_DATA_ROW_HEAD_SIZE + TYPE_BYTES[TSDB_DATA_TYPE_BINARY];
|
||||
|
||||
void* pVnodeDataBlockHashList = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false);
|
||||
|
@ -617,7 +637,6 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SArray* pTableDataBlockList) {
|
|||
size_t total = taosArrayGetSize(pTableDataBlockList);
|
||||
for (int32_t i = 0; i < total; ++i) {
|
||||
STableDataBlocks* pOneTableBlock = taosArrayGetP(pTableDataBlockList, i);
|
||||
|
||||
STableDataBlocks* dataBuf = NULL;
|
||||
|
||||
int32_t ret =
|
||||
|
@ -666,16 +685,17 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SArray* pTableDataBlockList) {
|
|||
pBlocks->uid = htobe64(pBlocks->uid);
|
||||
pBlocks->sversion = htonl(pBlocks->sversion);
|
||||
pBlocks->numOfRows = htons(pBlocks->numOfRows);
|
||||
pBlocks->schemaLen = 0;
|
||||
|
||||
// erase the empty space reserved for binary data
|
||||
int32_t finalLen = trimDataBlock(dataBuf->pData + dataBuf->size, pOneTableBlock);
|
||||
int32_t finalLen = trimDataBlock(dataBuf->pData + dataBuf->size, pOneTableBlock, pCmd->submitSchema);
|
||||
assert(finalLen <= len);
|
||||
|
||||
dataBuf->size += (finalLen + sizeof(SSubmitBlk));
|
||||
assert(dataBuf->size <= dataBuf->nAllocSize);
|
||||
|
||||
// the length does not include the SSubmitBlk structure
|
||||
pBlocks->len = htonl(finalLen);
|
||||
pBlocks->dataLen = htonl(finalLen);
|
||||
|
||||
dataBuf->numOfTables += 1;
|
||||
}
|
||||
|
|
|
@ -128,10 +128,10 @@ extern float tsTotalLogDirGB;
|
|||
extern float tsTotalTmpDirGB;
|
||||
extern float tsTotalDataDirGB;
|
||||
extern float tsAvailLogDirGB;
|
||||
extern float tsAvailTmpDirGB;
|
||||
extern float tsAvailTmpDirectorySpace;
|
||||
extern float tsAvailDataDirGB;
|
||||
extern float tsMinimalLogDirGB;
|
||||
extern float tsMinimalTmpDirGB;
|
||||
extern float tsReservedTmpDirectorySpace;
|
||||
extern float tsMinimalDataDirGB;
|
||||
extern int32_t tsTotalMemoryMB;
|
||||
extern int32_t tsVersion;
|
||||
|
|
|
@ -259,7 +259,7 @@ bool isNEleNull(SDataCol *pCol, int nEle) {
|
|||
case TSDB_DATA_TYPE_BINARY:
|
||||
case TSDB_DATA_TYPE_NCHAR:
|
||||
for (int i = 0; i < nEle; i++) {
|
||||
if (!isNull(varDataVal(tdGetColDataOfRow(pCol, i)), pCol->type)) return false;
|
||||
if (!isNull(tdGetColDataOfRow(pCol, i), pCol->type)) return false;
|
||||
}
|
||||
return true;
|
||||
default:
|
||||
|
|
|
@ -43,7 +43,7 @@ int16_t tsNumOfTotalVnodes = TSDB_INVALID_VNODE_NUM;
|
|||
int32_t tsNumOfMnodes = 3;
|
||||
|
||||
// common
|
||||
int32_t tsRpcTimer = 300;
|
||||
int32_t tsRpcTimer = 1000;
|
||||
int32_t tsRpcMaxTime = 600; // seconds;
|
||||
int32_t tsMaxShellConns = 5000;
|
||||
int32_t tsMaxConnections = 5000;
|
||||
|
@ -170,9 +170,9 @@ int64_t tsStreamMax;
|
|||
int32_t tsNumOfCores = 1;
|
||||
float tsTotalTmpDirGB = 0;
|
||||
float tsTotalDataDirGB = 0;
|
||||
float tsAvailTmpDirGB = 0;
|
||||
float tsAvailTmpDirectorySpace = 0;
|
||||
float tsAvailDataDirGB = 0;
|
||||
float tsMinimalTmpDirGB = 0.1;
|
||||
float tsReservedTmpDirectorySpace = 0.1;
|
||||
float tsMinimalDataDirGB = 0.5;
|
||||
int32_t tsTotalMemoryMB = 0;
|
||||
int32_t tsVersion = 0;
|
||||
|
@ -807,7 +807,7 @@ static void doInitGlobalConfig() {
|
|||
taosInitConfigOption(cfg);
|
||||
|
||||
cfg.option = "minimalTmpDirGB";
|
||||
cfg.ptr = &tsMinimalTmpDirGB;
|
||||
cfg.ptr = &tsReservedTmpDirectorySpace;
|
||||
cfg.valType = TAOS_CFG_VTYPE_FLOAT;
|
||||
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
|
||||
cfg.minValue = 0.001;
|
||||
|
|
|
@ -283,7 +283,8 @@ static void cqProcessStreamRes(void *param, TAOS_RES *tres, TAOS_ROW row) {
|
|||
}
|
||||
tdAppendColVal(trow, val, c->type, c->bytes, c->offset);
|
||||
}
|
||||
pBlk->len = htonl(dataRowLen(trow));
|
||||
pBlk->dataLen = htonl(dataRowLen(trow));
|
||||
pBlk->schemaLen = 0;
|
||||
|
||||
pBlk->uid = htobe64(pObj->uid);
|
||||
pBlk->tid = htonl(pObj->tid);
|
||||
|
|
|
@ -39,6 +39,15 @@
|
|||
|
||||
#define MPEER_CONTENT_LEN 2000
|
||||
|
||||
typedef struct {
|
||||
pthread_t thread;
|
||||
int32_t threadIndex;
|
||||
int32_t failed;
|
||||
int32_t opened;
|
||||
int32_t vnodeNum;
|
||||
int32_t * vnodeList;
|
||||
} SOpenVnodeThread;
|
||||
|
||||
void * tsDnodeTmr = NULL;
|
||||
static void * tsStatusTimer = NULL;
|
||||
static uint32_t tsRebootTime;
|
||||
|
@ -242,28 +251,86 @@ static int32_t dnodeGetVnodeList(int32_t vnodeList[], int32_t *numOfVnodes) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t dnodeOpenVnodes() {
|
||||
static void *dnodeOpenVnode(void *param) {
|
||||
SOpenVnodeThread *pThread = param;
|
||||
char vnodeDir[TSDB_FILENAME_LEN * 3];
|
||||
int32_t failed = 0;
|
||||
int32_t *vnodeList = (int32_t *)malloc(sizeof(int32_t) * TSDB_MAX_VNODES);
|
||||
int32_t numOfVnodes;
|
||||
int32_t status;
|
||||
|
||||
status = dnodeGetVnodeList(vnodeList, &numOfVnodes);
|
||||
dDebug("thread:%d, start to open %d vnodes", pThread->threadIndex, pThread->vnodeNum);
|
||||
|
||||
for (int32_t v = 0; v < pThread->vnodeNum; ++v) {
|
||||
int32_t vgId = pThread->vnodeList[v];
|
||||
snprintf(vnodeDir, TSDB_FILENAME_LEN * 3, "%s/vnode%d", tsVnodeDir, vgId);
|
||||
if (vnodeOpen(vgId, vnodeDir) < 0) {
|
||||
dError("vgId:%d, failed to open vnode by thread:%d", vgId, pThread->threadIndex);
|
||||
pThread->failed++;
|
||||
} else {
|
||||
dDebug("vgId:%d, is openned by thread:%d", vgId, pThread->threadIndex);
|
||||
pThread->opened++;
|
||||
}
|
||||
}
|
||||
|
||||
dDebug("thread:%d, total vnodes:%d, openned:%d failed:%d", pThread->threadIndex, pThread->vnodeNum, pThread->opened,
|
||||
pThread->failed);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int32_t dnodeOpenVnodes() {
|
||||
int32_t *vnodeList = calloc(TSDB_MAX_VNODES, sizeof(int32_t));
|
||||
int32_t numOfVnodes;
|
||||
int32_t status = dnodeGetVnodeList(vnodeList, &numOfVnodes);
|
||||
|
||||
if (status != TSDB_CODE_SUCCESS) {
|
||||
dInfo("Get dnode list failed");
|
||||
dInfo("get dnode list failed");
|
||||
free(vnodeList);
|
||||
return status;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < numOfVnodes; ++i) {
|
||||
snprintf(vnodeDir, TSDB_FILENAME_LEN * 3, "%s/vnode%d", tsVnodeDir, vnodeList[i]);
|
||||
if (vnodeOpen(vnodeList[i], vnodeDir) < 0) failed++;
|
||||
int32_t threadNum = tsNumOfCores;
|
||||
int32_t vnodesPerThread = numOfVnodes / threadNum + 1;
|
||||
SOpenVnodeThread *threads = calloc(threadNum, sizeof(SOpenVnodeThread));
|
||||
for (int32_t t = 0; t < threadNum; ++t) {
|
||||
threads[t].threadIndex = t;
|
||||
threads[t].vnodeList = calloc(vnodesPerThread, sizeof(int32_t));
|
||||
}
|
||||
|
||||
for (int32_t v = 0; v < numOfVnodes; ++v) {
|
||||
int32_t t = v % threadNum;
|
||||
SOpenVnodeThread *pThread = &threads[t];
|
||||
pThread->vnodeList[pThread->vnodeNum++] = vnodeList[v];
|
||||
}
|
||||
|
||||
dDebug("start %d threads to open %d vnodes", threadNum, numOfVnodes);
|
||||
|
||||
for (int32_t t = 0; t < threadNum; ++t) {
|
||||
SOpenVnodeThread *pThread = &threads[t];
|
||||
if (pThread->vnodeNum == 0) continue;
|
||||
|
||||
pthread_attr_t thAttr;
|
||||
pthread_attr_init(&thAttr);
|
||||
pthread_attr_setdetachstate(&thAttr, PTHREAD_CREATE_JOINABLE);
|
||||
if (pthread_create(&pThread->thread, &thAttr, dnodeOpenVnode, pThread) != 0) {
|
||||
dError("thread:%d, failed to create thread to open vnode, reason:%s", pThread->threadIndex, strerror(errno));
|
||||
}
|
||||
|
||||
pthread_attr_destroy(&thAttr);
|
||||
}
|
||||
|
||||
int32_t openVnodes = 0;
|
||||
int32_t failedVnodes = 0;
|
||||
for (int32_t t = 0; t < threadNum; ++t) {
|
||||
SOpenVnodeThread *pThread = &threads[t];
|
||||
if (pThread->vnodeNum > 0 && pThread->thread) {
|
||||
pthread_join(pThread->thread, NULL);
|
||||
}
|
||||
openVnodes += pThread->opened;
|
||||
failedVnodes += pThread->failed;
|
||||
free(pThread->vnodeList);
|
||||
}
|
||||
|
||||
free(vnodeList);
|
||||
dInfo("there are total vnodes:%d, openned:%d failed:%d", numOfVnodes, numOfVnodes-failed, failed);
|
||||
free(threads);
|
||||
dInfo("there are total vnodes:%d, openned:%d failed:%d", numOfVnodes, openVnodes, failedVnodes);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -273,7 +340,7 @@ void dnodeStartStream() {
|
|||
int32_t status = dnodeGetVnodeList(vnodeList, &numOfVnodes);
|
||||
|
||||
if (status != TSDB_CODE_SUCCESS) {
|
||||
dInfo("Get dnode list failed");
|
||||
dInfo("get dnode list failed");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -292,7 +359,7 @@ static void dnodeCloseVnodes() {
|
|||
status = dnodeGetVnodeList(vnodeList, &numOfVnodes);
|
||||
|
||||
if (status != TSDB_CODE_SUCCESS) {
|
||||
dInfo("Get dnode list failed");
|
||||
dInfo("get dnode list failed");
|
||||
free(vnodeList);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -36,6 +36,7 @@ typedef struct {
|
|||
int32_t min; // min number of workers
|
||||
int32_t num; // current number of workers
|
||||
SReadWorker *readWorker;
|
||||
pthread_mutex_t mutex;
|
||||
} SReadWorkerPool;
|
||||
|
||||
static void *dnodeProcessReadQueue(void *param);
|
||||
|
@ -51,27 +52,28 @@ int32_t dnodeInitVnodeRead() {
|
|||
readPool.min = 2;
|
||||
readPool.max = tsNumOfCores * tsNumOfThreadsPerCore;
|
||||
if (readPool.max <= readPool.min * 2) readPool.max = 2 * readPool.min;
|
||||
readPool.readWorker = (SReadWorker *) calloc(sizeof(SReadWorker), readPool.max);
|
||||
readPool.readWorker = (SReadWorker *)calloc(sizeof(SReadWorker), readPool.max);
|
||||
pthread_mutex_init(&readPool.mutex, NULL);
|
||||
|
||||
if (readPool.readWorker == NULL) return -1;
|
||||
for (int i=0; i < readPool.max; ++i) {
|
||||
for (int i = 0; i < readPool.max; ++i) {
|
||||
SReadWorker *pWorker = readPool.readWorker + i;
|
||||
pWorker->workerId = i;
|
||||
}
|
||||
|
||||
dInfo("dnode read is opened");
|
||||
dInfo("dnode read is opened, min worker:%d max worker:%d", readPool.min, readPool.max);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void dnodeCleanupVnodeRead() {
|
||||
for (int i=0; i < readPool.max; ++i) {
|
||||
for (int i = 0; i < readPool.max; ++i) {
|
||||
SReadWorker *pWorker = readPool.readWorker + i;
|
||||
if (pWorker->thread) {
|
||||
taosQsetThreadResume(readQset);
|
||||
}
|
||||
}
|
||||
|
||||
for (int i=0; i < readPool.max; ++i) {
|
||||
for (int i = 0; i < readPool.max; ++i) {
|
||||
SReadWorker *pWorker = readPool.readWorker + i;
|
||||
if (pWorker->thread) {
|
||||
pthread_join(pWorker->thread, NULL);
|
||||
|
@ -80,6 +82,7 @@ void dnodeCleanupVnodeRead() {
|
|||
|
||||
free(readPool.readWorker);
|
||||
taosCloseQset(readQset);
|
||||
pthread_mutex_destroy(&readPool.mutex);
|
||||
|
||||
dInfo("dnode read is closed");
|
||||
}
|
||||
|
@ -136,8 +139,12 @@ void dnodeDispatchToVnodeReadQueue(SRpcMsg *pMsg) {
|
|||
}
|
||||
|
||||
void *dnodeAllocateVnodeRqueue(void *pVnode) {
|
||||
pthread_mutex_lock(&readPool.mutex);
|
||||
taos_queue queue = taosOpenQueue();
|
||||
if (queue == NULL) return NULL;
|
||||
if (queue == NULL) {
|
||||
pthread_mutex_unlock(&readPool.mutex);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
taosAddIntoQset(readQset, queue, pVnode);
|
||||
|
||||
|
@ -160,6 +167,7 @@ void *dnodeAllocateVnodeRqueue(void *pVnode) {
|
|||
} while (readPool.num < readPool.min);
|
||||
}
|
||||
|
||||
pthread_mutex_unlock(&readPool.mutex);
|
||||
dDebug("pVnode:%p, read queue:%p is allocated", pVnode, queue);
|
||||
|
||||
return queue;
|
||||
|
|
|
@ -47,6 +47,7 @@ typedef struct {
|
|||
int32_t max; // max number of workers
|
||||
int32_t nextId; // from 0 to max-1, cyclic
|
||||
SWriteWorker *writeWorker;
|
||||
pthread_mutex_t mutex;
|
||||
} SWriteWorkerPool;
|
||||
|
||||
static void *dnodeProcessWriteQueue(void *param);
|
||||
|
@ -58,25 +59,26 @@ int32_t dnodeInitVnodeWrite() {
|
|||
wWorkerPool.max = tsNumOfCores;
|
||||
wWorkerPool.writeWorker = (SWriteWorker *)calloc(sizeof(SWriteWorker), wWorkerPool.max);
|
||||
if (wWorkerPool.writeWorker == NULL) return -1;
|
||||
pthread_mutex_init(&wWorkerPool.mutex, NULL);
|
||||
|
||||
for (int32_t i = 0; i < wWorkerPool.max; ++i) {
|
||||
wWorkerPool.writeWorker[i].workerId = i;
|
||||
}
|
||||
|
||||
dInfo("dnode write is opened");
|
||||
dInfo("dnode write is opened, max worker %d", wWorkerPool.max);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void dnodeCleanupVnodeWrite() {
|
||||
for (int32_t i = 0; i < wWorkerPool.max; ++i) {
|
||||
SWriteWorker *pWorker = wWorkerPool.writeWorker + i;
|
||||
SWriteWorker *pWorker = wWorkerPool.writeWorker + i;
|
||||
if (pWorker->thread) {
|
||||
taosQsetThreadResume(pWorker->qset);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
for (int32_t i = 0; i < wWorkerPool.max; ++i) {
|
||||
SWriteWorker *pWorker = wWorkerPool.writeWorker + i;
|
||||
SWriteWorker *pWorker = wWorkerPool.writeWorker + i;
|
||||
if (pWorker->thread) {
|
||||
pthread_join(pWorker->thread, NULL);
|
||||
taosFreeQall(pWorker->qall);
|
||||
|
@ -84,6 +86,7 @@ void dnodeCleanupVnodeWrite() {
|
|||
}
|
||||
}
|
||||
|
||||
pthread_mutex_destroy(&wWorkerPool.mutex);
|
||||
free(wWorkerPool.writeWorker);
|
||||
dInfo("dnode write is closed");
|
||||
}
|
||||
|
@ -124,14 +127,19 @@ void dnodeDispatchToVnodeWriteQueue(SRpcMsg *pMsg) {
|
|||
}
|
||||
|
||||
void *dnodeAllocateVnodeWqueue(void *pVnode) {
|
||||
pthread_mutex_lock(&wWorkerPool.mutex);
|
||||
SWriteWorker *pWorker = wWorkerPool.writeWorker + wWorkerPool.nextId;
|
||||
void *queue = taosOpenQueue();
|
||||
if (queue == NULL) return NULL;
|
||||
if (queue == NULL) {
|
||||
pthread_mutex_unlock(&wWorkerPool.mutex);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (pWorker->qset == NULL) {
|
||||
pWorker->qset = taosOpenQset();
|
||||
if (pWorker->qset == NULL) {
|
||||
taosCloseQueue(queue);
|
||||
pthread_mutex_unlock(&wWorkerPool.mutex);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -140,6 +148,7 @@ void *dnodeAllocateVnodeWqueue(void *pVnode) {
|
|||
if (pWorker->qall == NULL) {
|
||||
taosCloseQset(pWorker->qset);
|
||||
taosCloseQueue(queue);
|
||||
pthread_mutex_unlock(&wWorkerPool.mutex);
|
||||
return NULL;
|
||||
}
|
||||
pthread_attr_t thAttr;
|
||||
|
@ -163,6 +172,7 @@ void *dnodeAllocateVnodeWqueue(void *pVnode) {
|
|||
wWorkerPool.nextId = (wWorkerPool.nextId + 1) % wWorkerPool.max;
|
||||
}
|
||||
|
||||
pthread_mutex_unlock(&wWorkerPool.mutex);
|
||||
dDebug("pVnode:%p, write queue:%p is allocated", pVnode, queue);
|
||||
|
||||
return queue;
|
||||
|
@ -201,6 +211,8 @@ static void *dnodeProcessWriteQueue(void *param) {
|
|||
int type;
|
||||
void *pVnode, *item;
|
||||
|
||||
dDebug("write worker:%d is running", pWorker->workerId);
|
||||
|
||||
while (1) {
|
||||
numOfMsgs = taosReadAllQitemsFromQset(pWorker->qset, pWorker->qall, &pVnode);
|
||||
if (numOfMsgs == 0) {
|
||||
|
|
|
@ -153,6 +153,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_TAG_ALREAY_EXIST, 0, 0x0369, "mnode tag
|
|||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TAG_NOT_EXIST, 0, 0x036A, "mnode tag not exist")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_FIELD_ALREAY_EXIST, 0, 0x036B, "mnode field already exist")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_FIELD_NOT_EXIST, 0, 0x036C, "mnode field not exist")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_STABLE_NAME, 0, 0x036D, "mnode invalid stable name")
|
||||
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_DB_NOT_SELECTED, 0, 0x0380, "mnode db not selected")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_DB_ALREADY_EXIST, 0, 0x0381, "mnode database aleady exist")
|
||||
|
|
|
@ -192,7 +192,8 @@ typedef struct SSubmitBlk {
|
|||
int32_t tid; // table id
|
||||
int32_t padding; // TODO just for padding here
|
||||
int32_t sversion; // data schema version
|
||||
int32_t len; // data part length, not including the SSubmitBlk head
|
||||
int32_t dataLen; // data part length, not including the SSubmitBlk head
|
||||
int32_t schemaLen; // schema length, if length is 0, no schema exists
|
||||
int16_t numOfRows; // total number of rows in current submit block
|
||||
char data[];
|
||||
} SSubmitBlk;
|
||||
|
@ -618,7 +619,7 @@ typedef struct {
|
|||
} SMDVnodeDesc;
|
||||
|
||||
typedef struct {
|
||||
char db[TSDB_DB_NAME_LEN];
|
||||
char db[TSDB_ACCT_LEN + TSDB_DB_NAME_LEN];
|
||||
SMDVnodeCfg cfg;
|
||||
SMDVnodeDesc nodes[TSDB_MAX_REPLICA];
|
||||
} SMDCreateVnodeMsg;
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#include "tarray.h"
|
||||
#include "tdataformat.h"
|
||||
#include "tname.h"
|
||||
#include "hash.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
|
@ -168,8 +169,9 @@ typedef struct SDataBlockInfo {
|
|||
} SDataBlockInfo;
|
||||
|
||||
typedef struct {
|
||||
size_t numOfTables;
|
||||
SArray *pGroupList;
|
||||
size_t numOfTables;
|
||||
SArray *pGroupList;
|
||||
SHashObj *map; // speedup acquire the tableQueryInfo from STableId
|
||||
} STableGroupInfo;
|
||||
|
||||
typedef struct SQueryRowCond {
|
||||
|
|
|
@ -790,7 +790,7 @@ int isCommentLine(char *line) {
|
|||
void source_file(TAOS *con, char *fptr) {
|
||||
wordexp_t full_path;
|
||||
int read_len = 0;
|
||||
char * cmd = calloc(1, MAX_COMMAND_SIZE);
|
||||
char * cmd = calloc(1, tsMaxSQLStringLen+1);
|
||||
size_t cmd_len = 0;
|
||||
char * line = NULL;
|
||||
size_t line_len = 0;
|
||||
|
@ -822,7 +822,7 @@ void source_file(TAOS *con, char *fptr) {
|
|||
}
|
||||
|
||||
while ((read_len = getline(&line, &line_len, f)) != -1) {
|
||||
if (read_len >= MAX_COMMAND_SIZE) continue;
|
||||
if (read_len >= tsMaxSQLStringLen) continue;
|
||||
line[--read_len] = '\0';
|
||||
|
||||
if (read_len == 0 || isCommentLine(line)) { // line starts with #
|
||||
|
@ -839,7 +839,7 @@ void source_file(TAOS *con, char *fptr) {
|
|||
memcpy(cmd + cmd_len, line, read_len);
|
||||
printf("%s%s\n", PROMPT_HEADER, cmd);
|
||||
shellRunCommand(con, cmd);
|
||||
memset(cmd, 0, MAX_COMMAND_SIZE);
|
||||
memset(cmd, 0, tsMaxSQLStringLen);
|
||||
cmd_len = 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -132,7 +132,7 @@ typedef struct SVgObj {
|
|||
int64_t createdTime;
|
||||
int32_t lbDnodeId;
|
||||
int32_t lbTime;
|
||||
char dbName[TSDB_DB_NAME_LEN];
|
||||
char dbName[TSDB_ACCT_LEN + TSDB_DB_NAME_LEN];
|
||||
int8_t inUse;
|
||||
int8_t accessState;
|
||||
int8_t reserved0[5];
|
||||
|
|
|
@ -287,7 +287,7 @@ void sdbUpdateSync() {
|
|||
SDnodeObj *pDnode = mnodeGetDnode(pMnode->mnodeId);
|
||||
if (pDnode != NULL) {
|
||||
syncCfg.nodeInfo[index].nodePort = pDnode->dnodePort + TSDB_PORT_SYNC;
|
||||
strcpy(syncCfg.nodeInfo[index].nodeFqdn, pDnode->dnodeEp);
|
||||
tstrncpy(syncCfg.nodeInfo[index].nodeFqdn, pDnode->dnodeFqdn, TSDB_FQDN_LEN);
|
||||
index++;
|
||||
}
|
||||
|
||||
|
@ -367,6 +367,7 @@ void sdbCleanUp() {
|
|||
tsSdbObj.status = SDB_STATUS_CLOSING;
|
||||
|
||||
sdbCleanupWriteWorker();
|
||||
sdbDebug("sdb will be closed, version:%" PRId64, tsSdbObj.version);
|
||||
|
||||
if (tsSdbObj.sync) {
|
||||
syncStop(tsSdbObj.sync);
|
||||
|
@ -976,11 +977,11 @@ static void *sdbWorkerFp(void *param) {
|
|||
tstrerror(pOper->retCode));
|
||||
}
|
||||
|
||||
dnodeSendRpcMnodeWriteRsp(pOper->pMsg, pOper->retCode);
|
||||
|
||||
if (pOper != NULL) {
|
||||
sdbDecRef(pOper->table, pOper->pObj);
|
||||
}
|
||||
|
||||
dnodeSendRpcMnodeWriteRsp(pOper->pMsg, pOper->retCode);
|
||||
}
|
||||
taosFreeQitem(item);
|
||||
}
|
||||
|
|
|
@ -281,6 +281,7 @@ static int32_t mnodeProcessHeartBeatMsg(SMnodeMsg *pMsg) {
|
|||
|
||||
static int32_t mnodeProcessConnectMsg(SMnodeMsg *pMsg) {
|
||||
SCMConnectMsg *pConnectMsg = pMsg->rpcMsg.pCont;
|
||||
SCMConnectRsp *pConnectRsp = NULL;
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
SRpcConnInfo connInfo;
|
||||
|
@ -309,7 +310,7 @@ static int32_t mnodeProcessConnectMsg(SMnodeMsg *pMsg) {
|
|||
mnodeDecDbRef(pDb);
|
||||
}
|
||||
|
||||
SCMConnectRsp *pConnectRsp = rpcMallocCont(sizeof(SCMConnectRsp));
|
||||
pConnectRsp = rpcMallocCont(sizeof(SCMConnectRsp));
|
||||
if (pConnectRsp == NULL) {
|
||||
code = TSDB_CODE_MND_OUT_OF_MEMORY;
|
||||
goto connect_over;
|
||||
|
@ -332,7 +333,7 @@ static int32_t mnodeProcessConnectMsg(SMnodeMsg *pMsg) {
|
|||
|
||||
connect_over:
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
rpcFreeCont(pConnectRsp);
|
||||
if (pConnectRsp) rpcFreeCont(pConnectRsp);
|
||||
mLError("user:%s login from %s, result:%s", connInfo.user, taosIpStr(connInfo.clientIp), tstrerror(code));
|
||||
} else {
|
||||
mLInfo("user:%s login from %s, result:%s", connInfo.user, taosIpStr(connInfo.clientIp), tstrerror(code));
|
||||
|
|
|
@ -382,11 +382,13 @@ static void mnodeAddTableIntoStable(SSuperTableObj *pStable, SChildTableObj *pCt
|
|||
pStable->numOfTables++;
|
||||
|
||||
if (pStable->vgHash == NULL) {
|
||||
pStable->vgHash = taosHashInit(100000, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false);
|
||||
pStable->vgHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false);
|
||||
}
|
||||
|
||||
if (pStable->vgHash != NULL) {
|
||||
taosHashPut(pStable->vgHash, (char *)&pCtable->vgId, sizeof(pCtable->vgId), &pCtable->vgId, sizeof(pCtable->vgId));
|
||||
if (taosHashGet(pStable->vgHash, &pCtable->vgId, sizeof(pCtable->vgId)) == NULL) {
|
||||
taosHashPut(pStable->vgHash, &pCtable->vgId, sizeof(pCtable->vgId), &pCtable->vgId, sizeof(pCtable->vgId));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -457,10 +459,9 @@ static int32_t mnodeSuperTableActionUpdate(SSdbOper *pOper) {
|
|||
free(pNew);
|
||||
free(oldTableId);
|
||||
free(oldSchema);
|
||||
|
||||
mnodeDecTableRef(pTable);
|
||||
}
|
||||
|
||||
mnodeDecTableRef(pTable);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -1254,13 +1255,13 @@ int32_t mnodeRetrieveShowSuperTables(SShowObj *pShow, char *data, int32_t rows,
|
|||
char * pWrite;
|
||||
int32_t cols = 0;
|
||||
SSuperTableObj *pTable = NULL;
|
||||
char prefix[20] = {0};
|
||||
char prefix[64] = {0};
|
||||
int32_t prefixLen;
|
||||
|
||||
SDbObj *pDb = mnodeGetDb(pShow->db);
|
||||
if (pDb == NULL) return 0;
|
||||
|
||||
strcpy(prefix, pDb->name);
|
||||
tstrncpy(prefix, pDb->name, 64);
|
||||
strcat(prefix, TS_PATH_DELIMITER);
|
||||
prefixLen = strlen(prefix);
|
||||
|
||||
|
@ -1558,10 +1559,10 @@ static void *mnodeBuildCreateChildTableMsg(SCMCreateTableMsg *pMsg, SChildTableO
|
|||
|
||||
static int32_t mnodeDoCreateChildTableCb(SMnodeMsg *pMsg, int32_t code) {
|
||||
SChildTableObj *pTable = (SChildTableObj *)pMsg->pTable;
|
||||
if (pTable != NULL) {
|
||||
mDebug("app:%p:%p, table:%s, create table in id:%d, uid:%" PRIu64 ", result:%s", pMsg->rpcMsg.ahandle, pMsg,
|
||||
pTable->info.tableId, pTable->sid, pTable->uid, tstrerror(code));
|
||||
}
|
||||
assert(pTable);
|
||||
|
||||
mDebug("app:%p:%p, table:%s, create table in id:%d, uid:%" PRIu64 ", result:%s", pMsg->rpcMsg.ahandle, pMsg,
|
||||
pTable->info.tableId, pTable->sid, pTable->uid, tstrerror(code));
|
||||
|
||||
if (code != TSDB_CODE_SUCCESS) return code;
|
||||
|
||||
|
@ -1965,9 +1966,15 @@ static int32_t mnodeDoGetChildTableMeta(SMnodeMsg *pMsg, STableMetaMsg *pMeta) {
|
|||
|
||||
static int32_t mnodeAutoCreateChildTable(SMnodeMsg *pMsg) {
|
||||
SCMTableInfoMsg *pInfo = pMsg->rpcMsg.pCont;
|
||||
STagData *pTag = (STagData *)pInfo->tags;
|
||||
STagData *pTags = (STagData *)pInfo->tags;
|
||||
int32_t tagLen = htonl(pTags->dataLen);
|
||||
if (pTags->name[0] == 0) {
|
||||
mError("app:%p:%p, table:%s, failed to create table on demand for stable is empty, tagLen:%d", pMsg->rpcMsg.ahandle,
|
||||
pMsg, pInfo->tableId, tagLen);
|
||||
return TSDB_CODE_MND_INVALID_STABLE_NAME;
|
||||
}
|
||||
|
||||
int32_t contLen = sizeof(SCMCreateTableMsg) + offsetof(STagData, data) + htonl(pTag->dataLen);
|
||||
int32_t contLen = sizeof(SCMCreateTableMsg) + offsetof(STagData, data) + tagLen;
|
||||
SCMCreateTableMsg *pCreateMsg = rpcMallocCont(contLen);
|
||||
if (pCreateMsg == NULL) {
|
||||
mError("app:%p:%p, table:%s, failed to create table while get meta info, no enough memory", pMsg->rpcMsg.ahandle,
|
||||
|
@ -1982,9 +1989,9 @@ static int32_t mnodeAutoCreateChildTable(SMnodeMsg *pMsg) {
|
|||
pCreateMsg->getMeta = 1;
|
||||
pCreateMsg->contLen = htonl(contLen);
|
||||
|
||||
memcpy(pCreateMsg->schema, pInfo->tags, contLen - sizeof(SCMCreateTableMsg));
|
||||
mDebug("app:%p:%p, table:%s, start to create on demand, stable:%s", pMsg->rpcMsg.ahandle, pMsg, pInfo->tableId,
|
||||
((STagData *)(pCreateMsg->schema))->name);
|
||||
memcpy(pCreateMsg->schema, pTags, contLen - sizeof(SCMCreateTableMsg));
|
||||
mDebug("app:%p:%p, table:%s, start to create on demand, tagLen:%d stable:%s",
|
||||
pMsg->rpcMsg.ahandle, pMsg, pInfo->tableId, tagLen, pTags->name);
|
||||
|
||||
rpcFreeCont(pMsg->rpcMsg.pCont);
|
||||
pMsg->rpcMsg.msgType = TSDB_MSG_TYPE_CM_CREATE_TABLE;
|
||||
|
@ -2370,10 +2377,21 @@ static int32_t mnodeRetrieveShowTables(SShowObj *pShow, char *data, int32_t rows
|
|||
SPatternCompareInfo info = PATTERN_COMPARE_INFO_INITIALIZER;
|
||||
|
||||
char prefix[64] = {0};
|
||||
strcpy(prefix, pDb->name);
|
||||
tstrncpy(prefix, pDb->name, 64);
|
||||
strcat(prefix, TS_PATH_DELIMITER);
|
||||
int32_t prefixLen = strlen(prefix);
|
||||
|
||||
char* pattern = NULL;
|
||||
if (pShow->payloadLen > 0) {
|
||||
pattern = (char*)malloc(pShow->payloadLen + 1);
|
||||
if (pattern == NULL) {
|
||||
terrno = TSDB_CODE_QRY_OUT_OF_MEMORY;
|
||||
return 0;
|
||||
}
|
||||
memcpy(pattern, pShow->payload, pShow->payloadLen);
|
||||
pattern[pShow->payloadLen] = 0;
|
||||
}
|
||||
|
||||
while (numOfRows < rows) {
|
||||
pShow->pIter = mnodeGetNextChildTable(pShow->pIter, &pTable);
|
||||
if (pTable == NULL) break;
|
||||
|
@ -2389,7 +2407,7 @@ static int32_t mnodeRetrieveShowTables(SShowObj *pShow, char *data, int32_t rows
|
|||
// pattern compare for table name
|
||||
mnodeExtractTableName(pTable->info.tableId, tableName);
|
||||
|
||||
if (pShow->payloadLen > 0 && patternMatch(pShow->payload, tableName, sizeof(tableName) - 1, &info) != TSDB_PATTERN_MATCH) {
|
||||
if (pattern != NULL && patternMatch(pattern, tableName, sizeof(tableName) - 1, &info) != TSDB_PATTERN_MATCH) {
|
||||
mnodeDecTableRef(pTable);
|
||||
continue;
|
||||
}
|
||||
|
@ -2433,6 +2451,7 @@ static int32_t mnodeRetrieveShowTables(SShowObj *pShow, char *data, int32_t rows
|
|||
|
||||
mnodeVacuumResult(data, NUM_OF_COLUMNS, numOfRows, rows, pShow);
|
||||
mnodeDecDbRef(pDb);
|
||||
free(pattern);
|
||||
|
||||
return numOfRows;
|
||||
}
|
||||
|
@ -2560,7 +2579,7 @@ static int32_t mnodeRetrieveStreamTables(SShowObj *pShow, char *data, int32_t ro
|
|||
SPatternCompareInfo info = PATTERN_COMPARE_INFO_INITIALIZER;
|
||||
|
||||
char prefix[64] = {0};
|
||||
strcpy(prefix, pDb->name);
|
||||
tstrncpy(prefix, pDb->name, 64);
|
||||
strcat(prefix, TS_PATH_DELIMITER);
|
||||
int32_t prefixLen = strlen(prefix);
|
||||
|
||||
|
|
|
@ -358,7 +358,7 @@ int32_t mnodeCreateVgroup(SMnodeMsg *pMsg, SDbObj *pDb) {
|
|||
if (pMsg == NULL) return TSDB_CODE_MND_APP_ERROR;
|
||||
|
||||
SVgObj *pVgroup = (SVgObj *)calloc(1, sizeof(SVgObj));
|
||||
tstrncpy(pVgroup->dbName, pDb->name, TSDB_DB_NAME_LEN);
|
||||
tstrncpy(pVgroup->dbName, pDb->name, TSDB_ACCT_LEN + TSDB_DB_NAME_LEN);
|
||||
pVgroup->numOfVnodes = pDb->cfg.replications;
|
||||
pVgroup->createdTime = taosGetTimestampMs();
|
||||
pVgroup->accessState = TSDB_VN_ALL_ACCCESS;
|
||||
|
|
|
@ -326,12 +326,12 @@ bool taosGetDisk() {
|
|||
|
||||
if (statvfs("/tmp", &info)) {
|
||||
//tsTotalTmpDirGB = 0;
|
||||
//tsAvailTmpDirGB = 0;
|
||||
//tsAvailTmpDirectorySpace = 0;
|
||||
uError("failed to get disk size, tmpDir:/tmp errno:%s", strerror(errno));
|
||||
return false;
|
||||
} else {
|
||||
tsTotalTmpDirGB = (float)((double)info.f_blocks * (double)info.f_frsize / unit);
|
||||
tsAvailTmpDirGB = (float)((double)info.f_bavail * (double)info.f_frsize / unit);
|
||||
tsAvailTmpDirectorySpace = (float)((double)info.f_bavail * (double)info.f_frsize / unit);
|
||||
}
|
||||
|
||||
return true;
|
||||
|
|
|
@ -359,6 +359,8 @@ void httpExecCmd(HttpContext *pContext) {
|
|||
|
||||
void httpProcessRequestCb(void *param, TAOS_RES *result, int code) {
|
||||
HttpContext *pContext = param;
|
||||
taos_free_result(result);
|
||||
|
||||
if (pContext == NULL) return;
|
||||
|
||||
if (code < 0) {
|
||||
|
|
|
@ -161,12 +161,12 @@ typedef struct SQuery {
|
|||
} SQuery;
|
||||
|
||||
typedef struct SQueryRuntimeEnv {
|
||||
SResultInfo* resultInfo; // todo refactor to merge with SWindowResInfo
|
||||
SResultInfo* resultInfo; // todo refactor to merge with SWindowResInfo
|
||||
SQuery* pQuery;
|
||||
SQLFunctionCtx* pCtx;
|
||||
int16_t numOfRowsPerPage;
|
||||
int16_t offset[TSDB_MAX_COLUMNS];
|
||||
uint16_t scanFlag; // denotes reversed scan of data or not
|
||||
uint16_t scanFlag; // denotes reversed scan of data or not
|
||||
SFillInfo* pFillInfo;
|
||||
SWindowResInfo windowResInfo;
|
||||
STSBuf* pTSBuf;
|
||||
|
@ -176,7 +176,8 @@ typedef struct SQueryRuntimeEnv {
|
|||
void* pQueryHandle;
|
||||
void* pSecQueryHandle; // another thread for
|
||||
SDiskbasedResultBuf* pResultBuf; // query result buffer based on blocked-wised disk file
|
||||
bool topBotQuery; // false;
|
||||
bool topBotQuery; // false
|
||||
int32_t prevGroupId; // previous executed group id
|
||||
} SQueryRuntimeEnv;
|
||||
|
||||
typedef struct SQInfo {
|
||||
|
|
|
@ -12,12 +12,10 @@
|
|||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#include <taosmsg.h>
|
||||
#include "os.h"
|
||||
#include "qfill.h"
|
||||
|
||||
#include "hash.h"
|
||||
#include "hashfunc.h"
|
||||
#include "qExecutor.h"
|
||||
#include "qUtil.h"
|
||||
#include "qast.h"
|
||||
|
@ -25,7 +23,6 @@
|
|||
#include "query.h"
|
||||
#include "queryLog.h"
|
||||
#include "taosmsg.h"
|
||||
#include "tdataformat.h"
|
||||
#include "tlosertree.h"
|
||||
#include "tscUtil.h" // todo move the function to common module
|
||||
#include "tscompression.h"
|
||||
|
@ -91,6 +88,9 @@ typedef struct {
|
|||
} SQueryStatusInfo;
|
||||
|
||||
#define CLEAR_QUERY_STATUS(q, st) ((q)->status &= (~(st)))
|
||||
#define GET_NUM_OF_TABLEGROUP(q) taosArrayGetSize((q)->tableqinfoGroupInfo.pGroupList)
|
||||
#define GET_TABLEGROUP(q, _index) ((SArray*) taosArrayGetP((q)->tableqinfoGroupInfo.pGroupList, (_index)))
|
||||
|
||||
static void setQueryStatus(SQuery *pQuery, int8_t status);
|
||||
|
||||
static bool isIntervalQuery(SQuery *pQuery) { return pQuery->intervalTime > 0; }
|
||||
|
@ -1708,7 +1708,23 @@ static bool onlyFirstQuery(SQuery *pQuery) { return onlyOneQueryType(pQuery, TSD
|
|||
|
||||
static bool onlyLastQuery(SQuery *pQuery) { return onlyOneQueryType(pQuery, TSDB_FUNC_LAST, TSDB_FUNC_LAST_DST); }
|
||||
|
||||
static void changeExecuteScanOrder(SQuery *pQuery, bool stableQuery) {
|
||||
// todo refactor, add iterator
|
||||
static void doExchangeTimeWindow(SQInfo* pQInfo) {
|
||||
size_t t = GET_NUM_OF_TABLEGROUP(pQInfo);
|
||||
for(int32_t i = 0; i < t; ++i) {
|
||||
SArray* p1 = GET_TABLEGROUP(pQInfo, i);
|
||||
|
||||
size_t len = taosArrayGetSize(p1);
|
||||
for(int32_t j = 0; j < len; ++j) {
|
||||
STableQueryInfo* pTableQueryInfo = (STableQueryInfo*) taosArrayGetP(p1, j);
|
||||
SWAP(pTableQueryInfo->win.skey, pTableQueryInfo->win.ekey, TSKEY);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void changeExecuteScanOrder(SQInfo *pQInfo, bool stableQuery) {
|
||||
SQuery* pQuery = pQInfo->runtimeEnv.pQuery;
|
||||
|
||||
// in case of point-interpolation query, use asc order scan
|
||||
char msg[] = "QInfo:%p scan order changed for %s query, old:%d, new:%d, qrange exchanged, old qrange:%" PRId64
|
||||
"-%" PRId64 ", new qrange:%" PRId64 "-%" PRId64;
|
||||
|
@ -1748,6 +1764,7 @@ static void changeExecuteScanOrder(SQuery *pQuery, bool stableQuery) {
|
|||
pQuery->window.ekey, pQuery->window.ekey, pQuery->window.skey);
|
||||
|
||||
SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY);
|
||||
doExchangeTimeWindow(pQInfo);
|
||||
}
|
||||
|
||||
pQuery->order.order = TSDB_ORDER_ASC;
|
||||
|
@ -1757,6 +1774,7 @@ static void changeExecuteScanOrder(SQuery *pQuery, bool stableQuery) {
|
|||
pQuery->window.ekey, pQuery->window.ekey, pQuery->window.skey);
|
||||
|
||||
SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY);
|
||||
doExchangeTimeWindow(pQInfo);
|
||||
}
|
||||
|
||||
pQuery->order.order = TSDB_ORDER_DESC;
|
||||
|
@ -2219,7 +2237,7 @@ static void doSetTagValueInParam(void *tsdb, void* pTable, int32_t tagColId, tVa
|
|||
}
|
||||
|
||||
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
|
||||
if (isNull(varDataVal(val), type)) {
|
||||
if (isNull(val, type)) {
|
||||
tag->nType = TSDB_DATA_TYPE_NULL;
|
||||
return;
|
||||
}
|
||||
|
@ -2489,10 +2507,10 @@ int32_t mergeIntoGroupResult(SQInfo *pQInfo) {
|
|||
int64_t st = taosGetTimestampMs();
|
||||
int32_t ret = TSDB_CODE_SUCCESS;
|
||||
|
||||
int32_t numOfGroups = taosArrayGetSize(pQInfo->tableqinfoGroupInfo.pGroupList);
|
||||
int32_t numOfGroups = GET_NUM_OF_TABLEGROUP(pQInfo);
|
||||
|
||||
while (pQInfo->groupIndex < numOfGroups) {
|
||||
SArray *group = taosArrayGetP(pQInfo->tableqinfoGroupInfo.pGroupList, pQInfo->groupIndex);
|
||||
SArray *group = GET_TABLEGROUP(pQInfo, pQInfo->groupIndex);
|
||||
ret = mergeIntoGroupResultImpl(pQInfo, group);
|
||||
if (ret < 0) { // not enough disk space to save the data into disk
|
||||
return -1;
|
||||
|
@ -2525,7 +2543,7 @@ void copyResToQueryResultBuf(SQInfo *pQInfo, SQuery *pQuery) {
|
|||
}
|
||||
|
||||
// check if all results has been sent to client
|
||||
int32_t numOfGroup = taosArrayGetSize(pQInfo->tableqinfoGroupInfo.pGroupList);
|
||||
int32_t numOfGroup = GET_NUM_OF_TABLEGROUP(pQInfo);
|
||||
if (pQInfo->numOfGroupResultPages == 0 && pQInfo->groupIndex == numOfGroup) {
|
||||
pQInfo->tableIndex = pQInfo->tableqinfoGroupInfo.numOfTables; // set query completed
|
||||
return;
|
||||
|
@ -2859,10 +2877,10 @@ void disableFuncInReverseScan(SQInfo *pQInfo) {
|
|||
}
|
||||
}
|
||||
|
||||
int32_t numOfGroups = taosArrayGetSize(pQInfo->tableqinfoGroupInfo.pGroupList);
|
||||
int32_t numOfGroups = GET_NUM_OF_TABLEGROUP(pQInfo);
|
||||
|
||||
for(int32_t i = 0; i < numOfGroups; ++i) {
|
||||
SArray *group = taosArrayGetP(pQInfo->tableqinfoGroupInfo.pGroupList, i);
|
||||
SArray *group = GET_TABLEGROUP(pQInfo, i);
|
||||
|
||||
size_t t = taosArrayGetSize(group);
|
||||
for (int32_t j = 0; j < t; ++j) {
|
||||
|
@ -3252,6 +3270,8 @@ static bool hasMainOutput(SQuery *pQuery) {
|
|||
}
|
||||
|
||||
static STableQueryInfo *createTableQueryInfo( SQueryRuntimeEnv *pRuntimeEnv, void* pTable, STimeWindow win) {
|
||||
SQuery* pQuery = pRuntimeEnv->pQuery;
|
||||
|
||||
STableQueryInfo *pTableQueryInfo = calloc(1, sizeof(STableQueryInfo));
|
||||
|
||||
pTableQueryInfo->win = win;
|
||||
|
@ -3260,7 +3280,15 @@ static STableQueryInfo *createTableQueryInfo( SQueryRuntimeEnv *pRuntimeEnv, voi
|
|||
pTableQueryInfo->pTable = pTable;
|
||||
pTableQueryInfo->cur.vgroupIndex = -1;
|
||||
|
||||
initWindowResInfo(&pTableQueryInfo->windowResInfo, pRuntimeEnv, 100, 100, TSDB_DATA_TYPE_INT);
|
||||
int32_t initialSize = 1;
|
||||
int32_t initialThreshold = 1;
|
||||
|
||||
if (isIntervalQuery(pQuery) || isGroupbyNormalCol(pQuery->pGroupbyExpr)) {
|
||||
initialSize = 20;
|
||||
initialThreshold = 100;
|
||||
}
|
||||
|
||||
initWindowResInfo(&pTableQueryInfo->windowResInfo, pRuntimeEnv, initialSize, initialThreshold, TSDB_DATA_TYPE_INT);
|
||||
return pTableQueryInfo;
|
||||
}
|
||||
|
||||
|
@ -3273,26 +3301,34 @@ void destroyTableQueryInfo(STableQueryInfo *pTableQueryInfo, int32_t numOfCols)
|
|||
free(pTableQueryInfo);
|
||||
}
|
||||
|
||||
void setCurrentQueryTable(SQueryRuntimeEnv *pRuntimeEnv, STableQueryInfo *pTableQueryInfo) {
|
||||
SQuery *pQuery = pRuntimeEnv->pQuery;
|
||||
pQuery->current = pTableQueryInfo;
|
||||
|
||||
assert(((pTableQueryInfo->lastKey >= pTableQueryInfo->win.skey) && QUERY_IS_ASC_QUERY(pQuery)) ||
|
||||
((pTableQueryInfo->lastKey <= pTableQueryInfo->win.skey) && !QUERY_IS_ASC_QUERY(pQuery)));
|
||||
}
|
||||
#define SET_CURRENT_QUERY_TABLE_INFO(_runtime, _tableInfo) \
|
||||
do { \
|
||||
SQuery *_query = (_runtime)->pQuery; \
|
||||
_query->current = _tableInfo; \
|
||||
assert((((_tableInfo)->lastKey >= (_tableInfo)->win.skey) && QUERY_IS_ASC_QUERY(_query)) || \
|
||||
(((_tableInfo)->lastKey <= (_tableInfo)->win.skey) && !QUERY_IS_ASC_QUERY(_query))); \
|
||||
} while (0)
|
||||
|
||||
/**
|
||||
* set output buffer for different group
|
||||
* TODO opt performance if current group is identical to previous group
|
||||
* @param pRuntimeEnv
|
||||
* @param pDataBlockInfo
|
||||
*/
|
||||
void setExecutionContext(SQInfo *pQInfo, void* pTable, int32_t groupIndex, TSKEY nextKey) {
|
||||
void setExecutionContext(SQInfo *pQInfo, int32_t groupIndex, TSKEY nextKey) {
|
||||
SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv;
|
||||
STableQueryInfo *pTableQueryInfo = pRuntimeEnv->pQuery->current;
|
||||
|
||||
SWindowResInfo * pWindowResInfo = &pRuntimeEnv->windowResInfo;
|
||||
int32_t GROUPRESULTID = 1;
|
||||
STableQueryInfo *pTableQueryInfo = pRuntimeEnv->pQuery->current;
|
||||
SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo;
|
||||
|
||||
// lastKey needs to be updated
|
||||
pTableQueryInfo->lastKey = nextKey;
|
||||
setAdditionalInfo(pQInfo, pTableQueryInfo->pTable, pTableQueryInfo);
|
||||
|
||||
if (pRuntimeEnv->prevGroupId != INT32_MIN && pRuntimeEnv->prevGroupId == groupIndex) {
|
||||
return;
|
||||
}
|
||||
|
||||
int32_t GROUPRESULTID = 1;
|
||||
SWindowResult *pWindowRes = doSetTimeWindowFromKey(pRuntimeEnv, pWindowResInfo, (char *)&groupIndex, sizeof(groupIndex));
|
||||
if (pWindowRes == NULL) {
|
||||
return;
|
||||
|
@ -3309,11 +3345,10 @@ void setExecutionContext(SQInfo *pQInfo, void* pTable, int32_t groupIndex, TSKEY
|
|||
}
|
||||
}
|
||||
|
||||
// record the current active group id
|
||||
pRuntimeEnv->prevGroupId = groupIndex;
|
||||
setWindowResOutputBuf(pRuntimeEnv, pWindowRes);
|
||||
initCtxOutputBuf(pRuntimeEnv);
|
||||
|
||||
pTableQueryInfo->lastKey = nextKey;
|
||||
setAdditionalInfo(pQInfo, pTableQueryInfo->pTable, pTableQueryInfo);
|
||||
}
|
||||
|
||||
void setWindowResOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *pResult) {
|
||||
|
@ -3349,7 +3384,7 @@ void setWindowResOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *
|
|||
SQLFunctionCtx *pCtx = &pRuntimeEnv->pCtx[i];
|
||||
|
||||
pCtx->resultInfo = &pResult->resultInfo[i];
|
||||
if (pCtx->resultInfo->complete) {
|
||||
if (pCtx->resultInfo->initialized && pCtx->resultInfo->complete) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -3479,7 +3514,7 @@ static int32_t getNumOfSubset(SQInfo *pQInfo) {
|
|||
if (isGroupbyNormalCol(pQuery->pGroupbyExpr) || (isIntervalQuery(pQuery))) {
|
||||
totalSubset = numOfClosedTimeWindow(&pQInfo->runtimeEnv.windowResInfo);
|
||||
} else {
|
||||
totalSubset = taosArrayGetSize(pQInfo->tableqinfoGroupInfo.pGroupList);
|
||||
totalSubset = GET_NUM_OF_TABLEGROUP(pQInfo);
|
||||
}
|
||||
|
||||
return totalSubset;
|
||||
|
@ -3619,36 +3654,40 @@ void stableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBlockInfo *
|
|||
bool queryHasRemainResults(SQueryRuntimeEnv* pRuntimeEnv) {
|
||||
SQuery *pQuery = pRuntimeEnv->pQuery;
|
||||
SFillInfo *pFillInfo = pRuntimeEnv->pFillInfo;
|
||||
|
||||
// todo refactor
|
||||
if (pQuery->fillType == TSDB_FILL_NONE || (pQuery->fillType != TSDB_FILL_NONE && isPointInterpoQuery(pQuery))) {
|
||||
assert(pFillInfo == NULL);
|
||||
|
||||
if (pQuery->limit.limit > 0 && pQuery->rec.total >= pQuery->limit.limit) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (pQuery->limit.limit > 0 && pQuery->rec.rows >= pQuery->limit.limit) {
|
||||
return false;
|
||||
}
|
||||
if (pQuery->fillType != TSDB_FILL_NONE && !isPointInterpoQuery(pQuery)) {
|
||||
// There are results not returned to client yet, so filling operation applied to the remain result is required
|
||||
// in the first place.
|
||||
int32_t remain = taosNumOfRemainRows(pFillInfo);
|
||||
if (remain > 0) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// There are results not returned to client, fill operation applied to the remain result set in the
|
||||
// first place is required.
|
||||
int32_t remain = taosNumOfRemainRows(pFillInfo);
|
||||
if (remain > 0) {
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* While the code reaches here, there are no results returned to client now.
|
||||
* If query is not completed yet, the gaps between two results blocks need to be handled after next data block
|
||||
* is retrieved from TSDB.
|
||||
*
|
||||
* NOTE: If the result set is not the first block, the gap in front of the result set will be filled. If the result
|
||||
* set is the FIRST result block, the gap between the start time of query time window and the timestamp of the
|
||||
* first result row in the actual result set will fill nothing.
|
||||
*/
|
||||
if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) {
|
||||
int32_t numOfTotal = getFilledNumOfRes(pFillInfo, pQuery->window.ekey, pQuery->rec.capacity);
|
||||
return numOfTotal > 0;
|
||||
/*
|
||||
* While the code reaches here, there are no results remains now.
|
||||
* If query is not completed yet, the gaps between two results blocks need to be handled after next data block
|
||||
* is retrieved from TSDB.
|
||||
*
|
||||
* NOTE: If the result set is not the first block, the gap in front of the result set will be filled. If the result
|
||||
* set is the FIRST result block, the gap between the start time of query time window and the timestamp of the
|
||||
* first result row in the actual result set will fill nothing.
|
||||
*/
|
||||
if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) {
|
||||
int32_t numOfTotal = getFilledNumOfRes(pFillInfo, pQuery->window.ekey, pQuery->rec.capacity);
|
||||
return numOfTotal > 0;
|
||||
}
|
||||
|
||||
} else {
|
||||
// there are results waiting for returned to client.
|
||||
if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED) &&
|
||||
(isGroupbyNormalCol(pQuery->pGroupbyExpr) || isIntervalQuery(pQuery)) &&
|
||||
(pRuntimeEnv->windowResInfo.size > 0)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
|
@ -3690,7 +3729,7 @@ static void doCopyQueryResultToMsg(SQInfo *pQInfo, int32_t numOfRows, char *data
|
|||
}
|
||||
}
|
||||
|
||||
int32_t doFillGapsInResults(SQueryRuntimeEnv* pRuntimeEnv, tFilePage **pDst, int32_t *numOfInterpo) {
|
||||
int32_t doFillGapsInResults(SQueryRuntimeEnv* pRuntimeEnv, tFilePage **pDst, int32_t *numOfFilled) {
|
||||
SQInfo* pQInfo = GET_QINFO_ADDR(pRuntimeEnv);
|
||||
SQuery *pQuery = pRuntimeEnv->pQuery;
|
||||
SFillInfo* pFillInfo = pRuntimeEnv->pFillInfo;
|
||||
|
@ -3995,7 +4034,7 @@ static void setupQueryHandle(void* tsdb, SQInfo* pQInfo, bool isSTableQuery) {
|
|||
&& (!isGroupbyNormalCol(pQuery->pGroupbyExpr))
|
||||
&& (!isFixedOutputQuery(pQuery))
|
||||
) {
|
||||
SArray* pa = taosArrayGetP(pQInfo->tableqinfoGroupInfo.pGroupList, 0);
|
||||
SArray* pa = GET_TABLEGROUP(pQInfo, 0);
|
||||
STableQueryInfo* pCheckInfo = taosArrayGetP(pa, 0);
|
||||
cond.twindow = pCheckInfo->win;
|
||||
}
|
||||
|
@ -4039,7 +4078,7 @@ int32_t doInitQInfo(SQInfo *pQInfo, void *param, void *tsdb, int32_t vgId, bool
|
|||
pQuery->precision = tsdbGetCfg(tsdb)->precision;
|
||||
|
||||
setScanLimitationByResultBuffer(pQuery);
|
||||
changeExecuteScanOrder(pQuery, false);
|
||||
changeExecuteScanOrder(pQInfo, false);
|
||||
setupQueryHandle(tsdb, pQInfo, isSTableQuery);
|
||||
|
||||
pQInfo->tsdb = tsdb;
|
||||
|
@ -4049,6 +4088,7 @@ int32_t doInitQInfo(SQInfo *pQInfo, void *param, void *tsdb, int32_t vgId, bool
|
|||
pRuntimeEnv->pTSBuf = param;
|
||||
pRuntimeEnv->cur.vgroupIndex = -1;
|
||||
pRuntimeEnv->stableQuery = isSTableQuery;
|
||||
pRuntimeEnv->prevGroupId = INT32_MIN;
|
||||
|
||||
if (param != NULL) {
|
||||
int16_t order = (pQuery->order.order == pRuntimeEnv->pTSBuf->tsOrder) ? TSDB_ORDER_ASC : TSDB_ORDER_DESC;
|
||||
|
@ -4139,33 +4179,13 @@ static int64_t scanMultiTableDataBlocks(SQInfo *pQInfo) {
|
|||
}
|
||||
|
||||
SDataBlockInfo blockInfo = tsdbRetrieveDataBlockInfo(pQueryHandle);
|
||||
STableQueryInfo *pTableQueryInfo = NULL;
|
||||
|
||||
// todo opt performance using hash table
|
||||
size_t numOfGroup = taosArrayGetSize(pQInfo->tableqinfoGroupInfo.pGroupList);
|
||||
for (int32_t i = 0; i < numOfGroup; ++i) {
|
||||
SArray *group = taosArrayGetP(pQInfo->tableqinfoGroupInfo.pGroupList, i);
|
||||
|
||||
size_t num = taosArrayGetSize(group);
|
||||
for (int32_t j = 0; j < num; ++j) {
|
||||
STableQueryInfo *p = taosArrayGetP(group, j);
|
||||
|
||||
STableId id = tsdbGetTableId(p->pTable);
|
||||
if (id.tid == blockInfo.tid) {
|
||||
assert(id.uid == blockInfo.uid);
|
||||
pTableQueryInfo = p;
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (pTableQueryInfo != NULL) {
|
||||
break;
|
||||
}
|
||||
STableQueryInfo **pTableQueryInfo = (STableQueryInfo**) taosHashGet(pQInfo->tableqinfoGroupInfo.map, &blockInfo.tid, sizeof(blockInfo.tid));
|
||||
if(pTableQueryInfo == NULL) {
|
||||
break;
|
||||
}
|
||||
|
||||
assert(pTableQueryInfo != NULL);
|
||||
setCurrentQueryTable(pRuntimeEnv, pTableQueryInfo);
|
||||
|
||||
assert(*pTableQueryInfo != NULL);
|
||||
SET_CURRENT_QUERY_TABLE_INFO(pRuntimeEnv, *pTableQueryInfo);
|
||||
|
||||
SDataStatis *pStatis = NULL;
|
||||
SArray *pDataBlock = loadDataBlockOnDemand(pRuntimeEnv, pQueryHandle, &blockInfo, &pStatis);
|
||||
|
@ -4173,11 +4193,11 @@ static int64_t scanMultiTableDataBlocks(SQInfo *pQInfo) {
|
|||
if (!isGroupbyNormalCol(pQuery->pGroupbyExpr)) {
|
||||
if (!isIntervalQuery(pQuery)) {
|
||||
int32_t step = QUERY_IS_ASC_QUERY(pQuery)? 1:-1;
|
||||
setExecutionContext(pQInfo, pTableQueryInfo->pTable, pTableQueryInfo->groupIndex, blockInfo.window.ekey + step);
|
||||
setExecutionContext(pQInfo, (*pTableQueryInfo)->groupIndex, blockInfo.window.ekey + step);
|
||||
} else { // interval query
|
||||
TSKEY nextKey = blockInfo.window.skey;
|
||||
setIntervalQueryRange(pQInfo, nextKey);
|
||||
/*int32_t ret = */setAdditionalInfo(pQInfo, pTableQueryInfo->pTable, pTableQueryInfo);
|
||||
/*int32_t ret = */setAdditionalInfo(pQInfo, (*pTableQueryInfo)->pTable, *pTableQueryInfo);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4197,7 +4217,7 @@ static bool multiTableMultioutputHelper(SQInfo *pQInfo, int32_t index) {
|
|||
SQuery * pQuery = pRuntimeEnv->pQuery;
|
||||
|
||||
setQueryStatus(pQuery, QUERY_NOT_COMPLETED);
|
||||
SArray *group = taosArrayGetP(pQInfo->tableqinfoGroupInfo.pGroupList, 0);
|
||||
SArray *group = GET_TABLEGROUP(pQInfo, 0);
|
||||
STableQueryInfo* pCheckInfo = taosArrayGetP(group, index);
|
||||
|
||||
setTagVal(pRuntimeEnv, pCheckInfo->pTable, pQInfo->tsdb);
|
||||
|
@ -4261,7 +4281,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) {
|
|||
SQuery * pQuery = pRuntimeEnv->pQuery;
|
||||
setQueryStatus(pQuery, QUERY_COMPLETED);
|
||||
|
||||
size_t numOfGroups = taosArrayGetSize(pQInfo->tableqinfoGroupInfo.pGroupList);
|
||||
size_t numOfGroups = GET_NUM_OF_TABLEGROUP(pQInfo);
|
||||
|
||||
if (isPointInterpoQuery(pQuery) || isFirstLastRowQuery(pQuery)) {
|
||||
resetCtxOutputBuf(pRuntimeEnv);
|
||||
|
@ -4311,7 +4331,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) {
|
|||
taosArrayDestroy(s);
|
||||
|
||||
// here we simply set the first table as current table
|
||||
pQuery->current = (STableQueryInfo*) taosArrayGetP(pQInfo->tableqinfoGroupInfo.pGroupList, 0);
|
||||
pQuery->current = (STableQueryInfo*) GET_TABLEGROUP(pQInfo, 0);
|
||||
scanOneTableDataBlocks(pRuntimeEnv, pQuery->current->lastKey);
|
||||
|
||||
int64_t numOfRes = getNumOfResult(pRuntimeEnv);
|
||||
|
@ -4424,7 +4444,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) {
|
|||
resetCtxOutputBuf(pRuntimeEnv);
|
||||
resetTimeWindowInfo(pRuntimeEnv, &pRuntimeEnv->windowResInfo);
|
||||
|
||||
SArray *group = taosArrayGetP(pQInfo->tableqinfoGroupInfo.pGroupList, 0);
|
||||
SArray *group = GET_TABLEGROUP(pQInfo, 0);
|
||||
assert(taosArrayGetSize(group) == pQInfo->tableqinfoGroupInfo.numOfTables &&
|
||||
1 == taosArrayGetSize(pQInfo->tableqinfoGroupInfo.pGroupList));
|
||||
|
||||
|
@ -4549,7 +4569,8 @@ static void doSaveContext(SQInfo *pQInfo) {
|
|||
if (pRuntimeEnv->pSecQueryHandle != NULL) {
|
||||
tsdbCleanupQueryHandle(pRuntimeEnv->pSecQueryHandle);
|
||||
}
|
||||
|
||||
|
||||
pRuntimeEnv->prevGroupId = INT32_MIN;
|
||||
pRuntimeEnv->pSecQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &pQInfo->tableGroupInfo, pQInfo);
|
||||
|
||||
setQueryStatus(pQuery, QUERY_NOT_COMPLETED);
|
||||
|
@ -4575,9 +4596,9 @@ static void doCloseAllTimeWindowAfterScan(SQInfo *pQInfo) {
|
|||
SQuery *pQuery = pQInfo->runtimeEnv.pQuery;
|
||||
|
||||
if (isIntervalQuery(pQuery)) {
|
||||
size_t numOfGroup = taosArrayGetSize(pQInfo->tableqinfoGroupInfo.pGroupList);
|
||||
size_t numOfGroup = GET_NUM_OF_TABLEGROUP(pQInfo);
|
||||
for (int32_t i = 0; i < numOfGroup; ++i) {
|
||||
SArray *group = taosArrayGetP(pQInfo->tableqinfoGroupInfo.pGroupList, i);
|
||||
SArray *group = GET_TABLEGROUP(pQInfo, i);
|
||||
|
||||
size_t num = taosArrayGetSize(group);
|
||||
for (int32_t j = 0; j < num; ++j) {
|
||||
|
@ -4794,7 +4815,7 @@ static void tableIntervalProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) {
|
|||
SQuery *pQuery = pRuntimeEnv->pQuery;
|
||||
pQuery->current = pTableInfo;
|
||||
|
||||
int32_t numOfInterpo = 0;
|
||||
int32_t numOfFilled = 0;
|
||||
TSKEY newStartKey = TSKEY_INITIAL_VAL;
|
||||
|
||||
// skip blocks without load the actual data block from file if no filter condition present
|
||||
|
@ -4822,9 +4843,9 @@ static void tableIntervalProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) {
|
|||
} else {
|
||||
taosFillSetStartInfo(pRuntimeEnv->pFillInfo, pQuery->rec.rows, pQuery->window.ekey);
|
||||
taosFillCopyInputDataFromFilePage(pRuntimeEnv->pFillInfo, (tFilePage**) pQuery->sdata);
|
||||
numOfInterpo = 0;
|
||||
numOfFilled = 0;
|
||||
|
||||
pQuery->rec.rows = doFillGapsInResults(pRuntimeEnv, (tFilePage **)pQuery->sdata, &numOfInterpo);
|
||||
pQuery->rec.rows = doFillGapsInResults(pRuntimeEnv, (tFilePage **)pQuery->sdata, &numOfFilled);
|
||||
if (pQuery->rec.rows > 0 || Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) {
|
||||
limitResults(pRuntimeEnv);
|
||||
break;
|
||||
|
@ -4843,7 +4864,7 @@ static void tableIntervalProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) {
|
|||
clearFirstNTimeWindow(pRuntimeEnv, pQInfo->groupIndex);
|
||||
}
|
||||
|
||||
pQInfo->pointsInterpo += numOfInterpo;
|
||||
pQInfo->pointsInterpo += numOfFilled;
|
||||
}
|
||||
|
||||
static void tableQueryImpl(SQInfo *pQInfo) {
|
||||
|
@ -4851,45 +4872,41 @@ static void tableQueryImpl(SQInfo *pQInfo) {
|
|||
SQuery * pQuery = pRuntimeEnv->pQuery;
|
||||
|
||||
if (queryHasRemainResults(pRuntimeEnv)) {
|
||||
/*
|
||||
* There are remain results that are not returned due to result interpolation
|
||||
* So, we do keep in this procedure instead of launching retrieve procedure for next results.
|
||||
*/
|
||||
int32_t numOfInterpo = 0;
|
||||
pQuery->rec.rows = doFillGapsInResults(pRuntimeEnv, (tFilePage **)pQuery->sdata, &numOfInterpo);
|
||||
|
||||
if (pQuery->rec.rows > 0) {
|
||||
limitResults(pRuntimeEnv);
|
||||
}
|
||||
|
||||
qDebug("QInfo:%p current:%" PRId64 " returned, total:%" PRId64, pQInfo, pQuery->rec.rows, pQuery->rec.total);
|
||||
return;
|
||||
}
|
||||
|
||||
// here we have scan all qualified data in both data file and cache
|
||||
if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) {
|
||||
// continue to get push data from the group result
|
||||
if (isGroupbyNormalCol(pQuery->pGroupbyExpr) ||
|
||||
((isIntervalQuery(pQuery) && pQuery->rec.total < pQuery->limit.limit))) {
|
||||
// todo limit the output for interval query?
|
||||
if (pQuery->fillType != TSDB_FILL_NONE) {
|
||||
/*
|
||||
* There are remain results that are not returned due to result interpolation
|
||||
* So, we do keep in this procedure instead of launching retrieve procedure for next results.
|
||||
*/
|
||||
int32_t numOfFilled = 0;
|
||||
pQuery->rec.rows = doFillGapsInResults(pRuntimeEnv, (tFilePage **)pQuery->sdata, &numOfFilled);
|
||||
|
||||
if (pQuery->rec.rows > 0) {
|
||||
limitResults(pRuntimeEnv);
|
||||
}
|
||||
|
||||
qDebug("QInfo:%p current:%" PRId64 " returned, total:%" PRId64, pQInfo, pQuery->rec.rows, pQuery->rec.total);
|
||||
return;
|
||||
} else {
|
||||
pQuery->rec.rows = 0;
|
||||
pQInfo->groupIndex = 0; // always start from 0
|
||||
|
||||
if (pRuntimeEnv->windowResInfo.size > 0) {
|
||||
copyFromWindowResToSData(pQInfo, pRuntimeEnv->windowResInfo.pResult);
|
||||
pQuery->rec.rows += pQuery->rec.rows;
|
||||
|
||||
clearFirstNTimeWindow(pRuntimeEnv, pQInfo->groupIndex);
|
||||
|
||||
if (pQuery->rec.rows > 0) {
|
||||
qDebug("QInfo:%p %"PRId64" rows returned from group results, total:%"PRId64"", pQInfo, pQuery->rec.rows, pQuery->rec.total);
|
||||
|
||||
// there are not data remains
|
||||
if (pRuntimeEnv->windowResInfo.size <= 0) {
|
||||
qDebug("QInfo:%p query over, %"PRId64" rows are returned", pQInfo, pQuery->rec.total);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
qDebug("QInfo:%p query over, %"PRId64" rows are returned", pQInfo, pQuery->rec.total);
|
||||
return;
|
||||
}
|
||||
|
||||
// number of points returned during this query
|
||||
|
@ -4897,7 +4914,7 @@ static void tableQueryImpl(SQInfo *pQInfo) {
|
|||
int64_t st = taosGetTimestampUs();
|
||||
|
||||
assert(pQInfo->tableqinfoGroupInfo.numOfTables == 1);
|
||||
SArray* g = taosArrayGetP(pQInfo->tableqinfoGroupInfo.pGroupList, 0);
|
||||
SArray* g = GET_TABLEGROUP(pQInfo, 0);
|
||||
STableQueryInfo* item = taosArrayGetP(g, 0);
|
||||
|
||||
// group by normal column, sliding window query, interval query are handled by interval query processor
|
||||
|
@ -5609,7 +5626,9 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList,
|
|||
|
||||
pQInfo->tableqinfoGroupInfo.pGroupList = taosArrayInit(numOfGroups, POINTER_BYTES);
|
||||
pQInfo->tableqinfoGroupInfo.numOfTables = pTableGroupInfo->numOfTables;
|
||||
}
|
||||
pQInfo->tableqinfoGroupInfo.map = taosHashInit(pTableGroupInfo->numOfTables,
|
||||
taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false);
|
||||
}
|
||||
|
||||
int tableIndex = 0;
|
||||
STimeWindow window = pQueryMsg->window;
|
||||
|
@ -5637,6 +5656,7 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList,
|
|||
item->groupIndex = i;
|
||||
item->tableIndex = tableIndex++;
|
||||
taosArrayPush(p1, &item);
|
||||
taosHashPut(pQInfo->tableqinfoGroupInfo.map, &id.tid, sizeof(id.tid), &item, POINTER_BYTES);
|
||||
}
|
||||
|
||||
taosArrayPush(pQInfo->tableqinfoGroupInfo.pGroupList, &p1);
|
||||
|
@ -5658,26 +5678,6 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList,
|
|||
return pQInfo;
|
||||
|
||||
_cleanup:
|
||||
//tfree(pQuery->fillVal);
|
||||
|
||||
//if (pQuery->sdata != NULL) {
|
||||
// for (int16_t col = 0; col < pQuery->numOfOutput; ++col) {
|
||||
// tfree(pQuery->sdata[col]);
|
||||
// }
|
||||
//}
|
||||
|
||||
//
|
||||
//tfree(pQuery->sdata);
|
||||
//tfree(pQuery->pFilterInfo);
|
||||
//tfree(pQuery->colList);
|
||||
|
||||
//tfree(pExprs);
|
||||
//tfree(pGroupbyExpr);
|
||||
|
||||
//taosArrayDestroy(pQInfo->arrTableIdInfo);
|
||||
//tsdbDestoryTableGroup(&pQInfo->tableGroupInfo);
|
||||
//
|
||||
//tfree(pQInfo);
|
||||
freeQInfo(pQInfo);
|
||||
|
||||
return NULL;
|
||||
|
@ -5712,7 +5712,6 @@ static int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQ
|
|||
UNUSED(ret);
|
||||
}
|
||||
|
||||
// only the successful complete requries the sem_post/over = 1 operations.
|
||||
if ((QUERY_IS_ASC_QUERY(pQuery) && (pQuery->window.skey > pQuery->window.ekey)) ||
|
||||
(!QUERY_IS_ASC_QUERY(pQuery) && (pQuery->window.ekey > pQuery->window.skey))) {
|
||||
qDebug("QInfo:%p no result in time range %" PRId64 "-%" PRId64 ", order %d", pQInfo, pQuery->window.skey,
|
||||
|
@ -5722,7 +5721,7 @@ static int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQ
|
|||
sem_post(&pQInfo->dataReady);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
if (pQInfo->tableqinfoGroupInfo.numOfTables == 0) {
|
||||
qDebug("QInfo:%p no table qualified for tag filter, abort query", pQInfo);
|
||||
setQueryStatus(pQuery, QUERY_COMPLETED);
|
||||
|
@ -5784,9 +5783,9 @@ static void freeQInfo(SQInfo *pQInfo) {
|
|||
}
|
||||
|
||||
// todo refactor, extract method to destroytableDataInfo
|
||||
int32_t numOfGroups = taosArrayGetSize(pQInfo->tableqinfoGroupInfo.pGroupList);
|
||||
int32_t numOfGroups = GET_NUM_OF_TABLEGROUP(pQInfo);
|
||||
for (int32_t i = 0; i < numOfGroups; ++i) {
|
||||
SArray *p = taosArrayGetP(pQInfo->tableqinfoGroupInfo.pGroupList, i);
|
||||
SArray *p = GET_TABLEGROUP(pQInfo, i);;
|
||||
|
||||
size_t num = taosArrayGetSize(p);
|
||||
for(int32_t j = 0; j < num; ++j) {
|
||||
|
@ -5800,7 +5799,7 @@ static void freeQInfo(SQInfo *pQInfo) {
|
|||
}
|
||||
|
||||
taosArrayDestroy(pQInfo->tableqinfoGroupInfo.pGroupList);
|
||||
|
||||
taosHashCleanup(pQInfo->tableqinfoGroupInfo.map);
|
||||
tsdbDestoryTableGroup(&pQInfo->tableGroupInfo);
|
||||
taosArrayDestroy(pQInfo->arrTableIdInfo);
|
||||
|
||||
|
@ -6027,7 +6026,7 @@ void qDestroyQueryInfo(qinfo_t qHandle, void (*fp)(void*), void* param) {
|
|||
return;
|
||||
}
|
||||
|
||||
int16_t ref = T_REF_DEC(pQInfo);
|
||||
int32_t ref = T_REF_DEC(pQInfo);
|
||||
qDebug("QInfo:%p dec refCount, value:%d", pQInfo, ref);
|
||||
|
||||
if (ref == 0) {
|
||||
|
@ -6053,6 +6052,11 @@ void qTableQuery(qinfo_t qinfo, void (*fp)(void*), void* param) {
|
|||
return;
|
||||
}
|
||||
|
||||
if (pQInfo->tableqinfoGroupInfo.numOfTables == 0) {
|
||||
qDebug("QInfo:%p no table exists for query, abort", pQInfo);
|
||||
return;
|
||||
}
|
||||
|
||||
qDebug("QInfo:%p query task is launched", pQInfo);
|
||||
|
||||
if (onlyQueryTags(pQInfo->runtimeEnv.pQuery)) {
|
||||
|
@ -6175,14 +6179,14 @@ static void buildTagQueryResult(SQInfo* pQInfo) {
|
|||
SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv;
|
||||
SQuery * pQuery = pRuntimeEnv->pQuery;
|
||||
|
||||
size_t numOfGroup = taosArrayGetSize(pQInfo->tableqinfoGroupInfo.pGroupList);
|
||||
size_t numOfGroup = GET_NUM_OF_TABLEGROUP(pQInfo);
|
||||
assert(numOfGroup == 0 || numOfGroup == 1);
|
||||
|
||||
if (numOfGroup == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
SArray* pa = taosArrayGetP(pQInfo->tableqinfoGroupInfo.pGroupList, 0);
|
||||
SArray* pa = GET_TABLEGROUP(pQInfo, 0);
|
||||
|
||||
size_t num = taosArrayGetSize(pa);
|
||||
assert(num == pQInfo->tableqinfoGroupInfo.numOfTables);
|
||||
|
|
|
@ -133,7 +133,6 @@ void clearFirstNTimeWindow(SQueryRuntimeEnv *pRuntimeEnv, int32_t num) {
|
|||
}
|
||||
|
||||
pWindowResInfo->size = remain;
|
||||
printf("---------------size:%ld\n", taosHashGetSize(pWindowResInfo->hashList));
|
||||
for (int32_t k = 0; k < pWindowResInfo->size; ++k) {
|
||||
SWindowResult *pResult = &pWindowResInfo->pResult[k];
|
||||
int32_t *p = (int32_t *)taosHashGet(pWindowResInfo->hashList, (const char *)&pResult->window.skey,
|
||||
|
|
|
@ -209,8 +209,7 @@ int32_t taosNumOfRemainRows(SFillInfo* pFillInfo) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
return FILL_IS_ASC_FILL(pFillInfo) ? (pFillInfo->numOfRows - pFillInfo->rowIdx)
|
||||
: pFillInfo->rowIdx + 1;
|
||||
return FILL_IS_ASC_FILL(pFillInfo) ? (pFillInfo->numOfRows - pFillInfo->rowIdx) : pFillInfo->rowIdx + 1;
|
||||
}
|
||||
|
||||
// todo: refactor
|
||||
|
|
|
@ -631,5 +631,5 @@ void exprSerializeTest2() {
|
|||
}
|
||||
} // namespace
|
||||
TEST(testCase, astTest) {
|
||||
exprSerializeTest2();
|
||||
// exprSerializeTest2();
|
||||
}
|
|
@ -665,6 +665,12 @@ static SRpcConn *rpcAllocateServerConn(SRpcInfo *pRpc, SRecvInfo *pRecv) {
|
|||
return pConn;
|
||||
}
|
||||
|
||||
// if code is not 0, it means it is simple reqhead, just ignore
|
||||
if (pHead->code != 0) {
|
||||
terrno = TSDB_CODE_RPC_ALREADY_PROCESSED;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int sid = taosAllocateId(pRpc->idPool);
|
||||
if (sid <= 0) {
|
||||
tError("%s maximum number of sessions:%d is reached", pRpc->label, pRpc->sessions);
|
||||
|
@ -1028,15 +1034,20 @@ static void rpcProcessIncomingMsg(SRpcConn *pConn, SRpcHead *pHead) {
|
|||
rpcMsg.ahandle = pConn->ahandle;
|
||||
|
||||
if ( rpcIsReq(pHead->msgType) ) {
|
||||
rpcMsg.handle = pConn;
|
||||
rpcAddRef(pRpc); // add the refCount for requests
|
||||
if (rpcMsg.contLen > 0) {
|
||||
rpcMsg.handle = pConn;
|
||||
rpcAddRef(pRpc); // add the refCount for requests
|
||||
|
||||
// start the progress timer to monitor the response from server app
|
||||
if (pConn->connType != RPC_CONN_TCPS)
|
||||
pConn->pTimer = taosTmrStart(rpcProcessProgressTimer, tsProgressTimer, pConn, pRpc->tmrCtrl);
|
||||
// start the progress timer to monitor the response from server app
|
||||
if (pConn->connType != RPC_CONN_TCPS)
|
||||
pConn->pTimer = taosTmrStart(rpcProcessProgressTimer, tsProgressTimer, pConn, pRpc->tmrCtrl);
|
||||
|
||||
// notify the server app
|
||||
(*(pRpc->cfp))(&rpcMsg, NULL);
|
||||
// notify the server app
|
||||
(*(pRpc->cfp))(&rpcMsg, NULL);
|
||||
} else {
|
||||
tDebug("%s, message body is empty, ignore", pConn->info);
|
||||
rpcFreeCont(rpcMsg.pCont);
|
||||
}
|
||||
} else {
|
||||
// it's a response
|
||||
SRpcReqContext *pContext = pConn->pContext;
|
||||
|
|
|
@ -419,7 +419,7 @@ static int taosReadTcpData(SFdObj *pFdObj, SRecvInfo *pInfo) {
|
|||
tError("%s %p TCP malloc(size:%d) fail", pThreadObj->label, pFdObj->thandle, msgLen);
|
||||
return -1;
|
||||
} else {
|
||||
// tDebug("malloc mem: %p", buffer);
|
||||
tDebug("TCP malloc mem: %p", buffer);
|
||||
}
|
||||
|
||||
msg = buffer + tsRpcOverhead;
|
||||
|
|
|
@ -212,7 +212,7 @@ static void *taosRecvUdpData(void *param) {
|
|||
tError("%s failed to allocate memory, size:%ld", pConn->label, dataLen);
|
||||
continue;
|
||||
} else {
|
||||
// tTrace("malloc mem: %p", tmsg);
|
||||
tDebug("UDP malloc mem: %p", tmsg);
|
||||
}
|
||||
|
||||
tmsg += tsRpcOverhead; // overhead for SRpcReqContext
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
#include "tutil.h"
|
||||
#include "ttime.h"
|
||||
|
||||
const char *tsdbFileSuffix[] = {".head", ".data", ".last", "", ".h", ".h"};
|
||||
const char *tsdbFileSuffix[] = {".head", ".data", ".last", "", ".h", ".l"};
|
||||
|
||||
static int tsdbInitFile(SFile *pFile, STsdbRepo *pRepo, int fid, int type);
|
||||
static void tsdbDestroyFile(SFile *pFile);
|
||||
|
|
|
@ -768,7 +768,8 @@ static SSubmitBlk *tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter) {
|
|||
SSubmitBlk *pBlock = pIter->pBlock;
|
||||
if (pBlock == NULL) return NULL;
|
||||
|
||||
pBlock->len = htonl(pBlock->len);
|
||||
pBlock->dataLen = htonl(pBlock->dataLen);
|
||||
pBlock->schemaLen = htonl(pBlock->schemaLen);
|
||||
pBlock->numOfRows = htons(pBlock->numOfRows);
|
||||
pBlock->uid = htobe64(pBlock->uid);
|
||||
pBlock->tid = htonl(pBlock->tid);
|
||||
|
@ -776,11 +777,11 @@ static SSubmitBlk *tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter) {
|
|||
pBlock->sversion = htonl(pBlock->sversion);
|
||||
pBlock->padding = htonl(pBlock->padding);
|
||||
|
||||
pIter->len = pIter->len + sizeof(SSubmitBlk) + pBlock->len;
|
||||
pIter->len = pIter->len + sizeof(SSubmitBlk) + pBlock->dataLen;
|
||||
if (pIter->len >= pIter->totalLen) {
|
||||
pIter->pBlock = NULL;
|
||||
} else {
|
||||
pIter->pBlock = (SSubmitBlk *)((char *)pBlock + pBlock->len + sizeof(SSubmitBlk));
|
||||
pIter->pBlock = (SSubmitBlk *)((char *)pBlock + pBlock->dataLen + sizeof(SSubmitBlk));
|
||||
}
|
||||
|
||||
return pBlock;
|
||||
|
@ -832,10 +833,10 @@ _err:
|
|||
}
|
||||
|
||||
static int tsdbInitSubmitBlkIter(SSubmitBlk *pBlock, SSubmitBlkIter *pIter) {
|
||||
if (pBlock->len <= 0) return -1;
|
||||
pIter->totalLen = pBlock->len;
|
||||
if (pBlock->dataLen <= 0) return -1;
|
||||
pIter->totalLen = pBlock->dataLen;
|
||||
pIter->len = 0;
|
||||
pIter->row = (SDataRow)(pBlock->data);
|
||||
pIter->row = (SDataRow)(pBlock->data+pBlock->schemaLen);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -111,7 +111,7 @@ int tsdbInsertRowToMem(STsdbRepo *pRepo, SDataRow row, STable *pTable) {
|
|||
ASSERT(pTableData->numOfRows == tSkipListGetSize(pTableData->pData));
|
||||
}
|
||||
|
||||
tsdbDebug("vgId:%d a row is inserted to table %s tid %d uid %" PRIu64 " key %" PRIu64, REPO_ID(pRepo),
|
||||
tsdbTrace("vgId:%d a row is inserted to table %s tid %d uid %" PRIu64 " key %" PRIu64, REPO_ID(pRepo),
|
||||
TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable), key);
|
||||
|
||||
return 0;
|
||||
|
@ -443,12 +443,14 @@ static int tsdbCommitMeta(STsdbRepo *pRepo) {
|
|||
if (tdUpdateKVStoreRecord(pMeta->pStore, pAct->uid, (void *)(pCont->cont), pCont->len) < 0) {
|
||||
tsdbError("vgId:%d failed to update meta with uid %" PRIu64 " since %s", REPO_ID(pRepo), pAct->uid,
|
||||
tstrerror(terrno));
|
||||
tdKVStoreEndCommit(pMeta->pStore);
|
||||
goto _err;
|
||||
}
|
||||
} else if (pAct->act == TSDB_DROP_META) {
|
||||
if (tdDropKVStoreRecord(pMeta->pStore, pAct->uid) < 0) {
|
||||
tsdbError("vgId:%d failed to drop meta with uid %" PRIu64 " since %s", REPO_ID(pRepo), pAct->uid,
|
||||
tstrerror(terrno));
|
||||
tdKVStoreEndCommit(pMeta->pStore);
|
||||
goto _err;
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -480,13 +480,11 @@ int tsdbUpdateTable(STsdbRepo *pRepo, STable *pTable, STableCfg *pCfg) {
|
|||
bool changed = false;
|
||||
STsdbMeta *pMeta = pRepo->tsdbMeta;
|
||||
|
||||
if (pTable->type == TSDB_SUPER_TABLE) {
|
||||
if (schemaVersion(pTable->tagSchema) < schemaVersion(pCfg->tagSchema)) {
|
||||
if (tsdbUpdateTableTagSchema(pTable, pCfg->tagSchema) < 0) {
|
||||
tsdbError("vgId:%d failed to update table %s tag schema since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
|
||||
tstrerror(terrno));
|
||||
return -1;
|
||||
}
|
||||
if ((pTable->type == TSDB_SUPER_TABLE) && (schemaVersion(pTable->tagSchema) < schemaVersion(pCfg->tagSchema))) {
|
||||
if (tsdbUpdateTableTagSchema(pTable, pCfg->tagSchema) < 0) {
|
||||
tsdbError("vgId:%d failed to update table %s tag schema since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
|
||||
tstrerror(terrno));
|
||||
return -1;
|
||||
}
|
||||
changed = true;
|
||||
}
|
||||
|
@ -552,13 +550,13 @@ int tsdbUnlockRepoMeta(STsdbRepo *pRepo) {
|
|||
}
|
||||
|
||||
void tsdbRefTable(STable *pTable) {
|
||||
int16_t ref = T_REF_INC(pTable);
|
||||
int32_t ref = T_REF_INC(pTable);
|
||||
UNUSED(ref);
|
||||
// tsdbDebug("ref table %"PRIu64", tid:%d, refCount:%d", TABLE_UID(pTable), TABLE_TID(pTable), ref);
|
||||
}
|
||||
|
||||
void tsdbUnRefTable(STable *pTable) {
|
||||
int16_t ref = T_REF_DEC(pTable);
|
||||
int32_t ref = T_REF_DEC(pTable);
|
||||
tsdbDebug("unref table uid:%"PRIu64", tid:%d, refCount:%d", TABLE_UID(pTable), TABLE_TID(pTable), ref);
|
||||
|
||||
if (ref == 0) {
|
||||
|
@ -598,7 +596,7 @@ static int tsdbRestoreTable(void *pHandle, void *cont, int contLen) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
tsdbDebug("vgId:%d table %s tid %d uid %" PRIu64 " is restored from file", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
|
||||
tsdbTrace("vgId:%d table %s tid %d uid %" PRIu64 " is restored from file", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
|
||||
TABLE_TID(pTable), TABLE_UID(pTable));
|
||||
return 0;
|
||||
}
|
||||
|
@ -799,7 +797,7 @@ static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx) {
|
|||
pTable->cqhandle = (*pRepo->appH.cqCreateFunc)(pRepo->appH.cqH, TABLE_UID(pTable), TABLE_TID(pTable), pTable->sql, tsdbGetTableSchema(pTable));
|
||||
}
|
||||
|
||||
tsdbDebug("vgId:%d table %s tid %d uid %" PRIu64 " is added to meta", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
|
||||
tsdbTrace("vgId:%d table %s tid %d uid %" PRIu64 " is added to meta", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
|
||||
TABLE_TID(pTable), TABLE_UID(pTable));
|
||||
return 0;
|
||||
|
||||
|
@ -1215,7 +1213,7 @@ static int tsdbRemoveTableFromStore(STsdbRepo *pRepo, STable *pTable) {
|
|||
while (tSkipListIterNext(pIter)) {
|
||||
STable *tTable = *(STable **)SL_GET_NODE_DATA(tSkipListIterGet(pIter));
|
||||
ASSERT(TABLE_TYPE(tTable) == TSDB_CHILD_TABLE);
|
||||
pBuf = tsdbInsertTableAct(pRepo, TSDB_DROP_META, pBuf, pTable);
|
||||
pBuf = tsdbInsertTableAct(pRepo, TSDB_DROP_META, pBuf, tTable);
|
||||
}
|
||||
|
||||
tSkipListDestroyIter(pIter);
|
||||
|
@ -1254,4 +1252,4 @@ static int tsdbRmTableFromMeta(STsdbRepo *pRepo, STable *pTable) {
|
|||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -121,18 +121,19 @@ int tsdbSetAndOpenHelperFile(SRWHelper *pHelper, SFileGroup *pGroup) {
|
|||
if (tsendfile(pHelper->files.nHeadF.fd, pHelper->files.headF.fd, NULL, TSDB_FILE_HEAD_SIZE) < TSDB_FILE_HEAD_SIZE) {
|
||||
tsdbError("vgId:%d failed to sendfile %d bytes from file %s to %s since %s", REPO_ID(pHelper->pRepo),
|
||||
TSDB_FILE_HEAD_SIZE, pHelper->files.headF.fname, pHelper->files.nHeadF.fname, strerror(errno));
|
||||
errno = TAOS_SYSTEM_ERROR(errno);
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
goto _err;
|
||||
}
|
||||
|
||||
// Create and open .l file if should
|
||||
if (tsdbShouldCreateNewLast(pHelper)) {
|
||||
if (tsdbOpenFile(&(pHelper->files.nLastF), O_WRONLY | O_CREAT) < 0) goto _err;
|
||||
if (tsendfile(pHelper->files.nLastF.fd, pHelper->files.lastF.fd, NULL, TSDB_FILE_HEAD_SIZE) < TSDB_FILE_HEAD_SIZE)
|
||||
if (tsendfile(pHelper->files.nLastF.fd, pHelper->files.lastF.fd, NULL, TSDB_FILE_HEAD_SIZE) < TSDB_FILE_HEAD_SIZE) {
|
||||
tsdbError("vgId:%d failed to sendfile %d bytes from file %s to %s since %s", REPO_ID(pHelper->pRepo),
|
||||
TSDB_FILE_HEAD_SIZE, pHelper->files.lastF.fname, pHelper->files.nLastF.fname, strerror(errno));
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
goto _err;
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
goto _err;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (tsdbOpenFile(&(pHelper->files.dataF), O_RDONLY) < 0) goto _err;
|
||||
|
|
|
@ -148,7 +148,7 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab
|
|||
pQueryHandle->type = TSDB_QUERY_TYPE_ALL;
|
||||
pQueryHandle->cur.fid = -1;
|
||||
pQueryHandle->cur.win = TSWINDOW_INITIALIZER;
|
||||
pQueryHandle->checkFiles = true;//ASCENDING_TRAVERSE(pQueryHandle->order);
|
||||
pQueryHandle->checkFiles = true;
|
||||
pQueryHandle->activeIndex = 0; // current active table index
|
||||
pQueryHandle->qinfo = qinfo;
|
||||
pQueryHandle->outputCapacity = ((STsdbRepo*)tsdb)->config.maxRowsPerFileBlock;
|
||||
|
@ -475,11 +475,15 @@ static int32_t binarySearchForBlock(SCompBlock* pBlock, int32_t numOfBlocks, TSK
|
|||
}
|
||||
|
||||
static int32_t getFileCompInfo(STsdbQueryHandle* pQueryHandle, int32_t* numOfBlocks, int32_t type) {
|
||||
// todo check open file failed
|
||||
SFileGroup* fileGroup = pQueryHandle->pFileGroup;
|
||||
|
||||
assert(fileGroup->files[TSDB_FILE_TYPE_HEAD].fname > 0);
|
||||
tsdbSetAndOpenHelperFile(&pQueryHandle->rhelper, fileGroup);
|
||||
|
||||
int32_t code = tsdbSetAndOpenHelperFile(&pQueryHandle->rhelper, fileGroup);
|
||||
|
||||
//open file failed, return error code to client
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
// load all the comp offset value for all tables in this file
|
||||
*numOfBlocks = 0;
|
||||
|
@ -538,17 +542,12 @@ static int32_t getFileCompInfo(STsdbQueryHandle* pQueryHandle, int32_t* numOfBlo
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static SDataBlockInfo getTrueDataBlockInfo(STableCheckInfo* pCheckInfo, SCompBlock* pBlock) {
|
||||
SDataBlockInfo info = {
|
||||
.window = {.skey = pBlock->keyFirst, .ekey = pBlock->keyLast},
|
||||
.numOfCols = pBlock->numOfCols,
|
||||
.rows = pBlock->numOfRows,
|
||||
.tid = pCheckInfo->tableId.tid,
|
||||
.uid = pCheckInfo->tableId.uid,
|
||||
};
|
||||
|
||||
return info;
|
||||
}
|
||||
#define GET_FILE_DATA_BLOCK_INFO(_checkInfo, _block) \
|
||||
((SDataBlockInfo){.window = {.skey = (_block)->keyFirst, .ekey = (_block)->keyLast}, \
|
||||
.numOfCols = (_block)->numOfCols, \
|
||||
.rows = (_block)->numOfRows, \
|
||||
.tid = (_checkInfo)->tableId.tid, \
|
||||
.uid = (_checkInfo)->tableId.uid})
|
||||
|
||||
static SArray* getColumnIdList(STsdbQueryHandle* pQueryHandle) {
|
||||
size_t numOfCols = QH_GET_NUM_OF_COLS(pQueryHandle);
|
||||
|
@ -593,8 +592,7 @@ static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlo
|
|||
if (pCheckInfo->pDataCols == NULL) {
|
||||
STsdbMeta* pMeta = tsdbGetMeta(pRepo);
|
||||
// TODO
|
||||
pCheckInfo->pDataCols =
|
||||
tdNewDataCols(pMeta->maxRowBytes, pMeta->maxCols, pRepo->config.maxRowsPerFileBlock);
|
||||
pCheckInfo->pDataCols = tdNewDataCols(pMeta->maxRowBytes, pMeta->maxCols, pRepo->config.maxRowsPerFileBlock);
|
||||
}
|
||||
|
||||
tdInitDataCols(pCheckInfo->pDataCols, tsdbGetTableSchema(pCheckInfo->pTableObj));
|
||||
|
@ -623,7 +621,7 @@ static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlo
|
|||
|
||||
static void handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock, STableCheckInfo* pCheckInfo){
|
||||
SQueryFilePos* cur = &pQueryHandle->cur;
|
||||
SDataBlockInfo binfo = getTrueDataBlockInfo(pCheckInfo, pBlock);
|
||||
SDataBlockInfo binfo = GET_FILE_DATA_BLOCK_INFO(pCheckInfo, pBlock);
|
||||
|
||||
/*bool hasData = */ initTableMemIterator(pQueryHandle, pCheckInfo);
|
||||
SDataRow row = getSDataRowInTableMem(pCheckInfo);
|
||||
|
@ -943,7 +941,7 @@ static void copyOneRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity,
|
|||
static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* pCheckInfo, SCompBlock* pBlock,
|
||||
SArray* sa) {
|
||||
SQueryFilePos* cur = &pQueryHandle->cur;
|
||||
SDataBlockInfo blockInfo = getTrueDataBlockInfo(pCheckInfo, pBlock);
|
||||
SDataBlockInfo blockInfo = GET_FILE_DATA_BLOCK_INFO(pCheckInfo, pBlock);
|
||||
|
||||
initTableMemIterator(pQueryHandle, pCheckInfo);
|
||||
SDataCols* pCols = pQueryHandle->rhelper.pDataCols[0];
|
||||
|
@ -1319,8 +1317,8 @@ static int32_t createDataBlocksInfo(STsdbQueryHandle* pQueryHandle, int32_t numO
|
|||
|
||||
assert(cnt <= numOfBlocks && numOfQualTables <= numOfTables); // the pTableQueryInfo[j]->numOfBlocks may be 0
|
||||
sup.numOfTables = numOfQualTables;
|
||||
SLoserTreeInfo* pTree = NULL;
|
||||
|
||||
SLoserTreeInfo* pTree = NULL;
|
||||
uint8_t ret = tLoserTreeCreate(&pTree, sup.numOfTables, &sup, dataBlockOrderCompar);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
cleanBlockOrderSupporter(&sup, numOfTables);
|
||||
|
@ -1359,16 +1357,18 @@ static int32_t createDataBlocksInfo(STsdbQueryHandle* pQueryHandle, int32_t numO
|
|||
}
|
||||
|
||||
// todo opt for only one table case
|
||||
static bool getDataBlocksInFilesImpl(STsdbQueryHandle* pQueryHandle) {
|
||||
static int32_t getDataBlocksInFilesImpl(STsdbQueryHandle* pQueryHandle, bool* exists) {
|
||||
pQueryHandle->numOfBlocks = 0;
|
||||
SQueryFilePos* cur = &pQueryHandle->cur;
|
||||
|
||||
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
int32_t numOfBlocks = 0;
|
||||
int32_t numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo);
|
||||
|
||||
while ((pQueryHandle->pFileGroup = tsdbGetFileGroupNext(&pQueryHandle->fileIter)) != NULL) {
|
||||
int32_t type = ASCENDING_TRAVERSE(pQueryHandle->order)? QUERY_RANGE_GREATER_EQUAL:QUERY_RANGE_LESS_EQUAL;
|
||||
if (getFileCompInfo(pQueryHandle, &numOfBlocks, type) != TSDB_CODE_SUCCESS) {
|
||||
if ((code = getFileCompInfo(pQueryHandle, &numOfBlocks, type)) != TSDB_CODE_SUCCESS) {
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1393,20 +1393,25 @@ static bool getDataBlocksInFilesImpl(STsdbQueryHandle* pQueryHandle) {
|
|||
|
||||
// no data in file anymore
|
||||
if (pQueryHandle->numOfBlocks <= 0) {
|
||||
assert(pQueryHandle->pFileGroup == NULL);
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
assert(pQueryHandle->pFileGroup == NULL);
|
||||
}
|
||||
|
||||
cur->fid = -1; // denote that there are no data in file anymore
|
||||
|
||||
return false;
|
||||
*exists = false;
|
||||
return code;
|
||||
}
|
||||
|
||||
cur->slot = ASCENDING_TRAVERSE(pQueryHandle->order)? 0:pQueryHandle->numOfBlocks-1;
|
||||
cur->fid = pQueryHandle->pFileGroup->fileId;
|
||||
|
||||
STableBlockInfo* pBlockInfo = &pQueryHandle->pDataBlockInfo[cur->slot];
|
||||
return loadFileDataBlock(pQueryHandle, pBlockInfo->compBlock, pBlockInfo->pTableCheckInfo);
|
||||
*exists = loadFileDataBlock(pQueryHandle, pBlockInfo->compBlock, pBlockInfo->pTableCheckInfo);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static bool getDataBlocksInFiles(STsdbQueryHandle* pQueryHandle) {
|
||||
static int32_t getDataBlocksInFiles(STsdbQueryHandle* pQueryHandle, bool* exists) {
|
||||
STsdbFileH* pFileHandle = tsdbGetFile(pQueryHandle->pTsdb);
|
||||
SQueryFilePos* cur = &pQueryHandle->cur;
|
||||
|
||||
|
@ -1419,7 +1424,7 @@ static bool getDataBlocksInFiles(STsdbQueryHandle* pQueryHandle) {
|
|||
tsdbInitFileGroupIter(pFileHandle, &pQueryHandle->fileIter, pQueryHandle->order);
|
||||
tsdbSeekFileGroupIter(&pQueryHandle->fileIter, fid);
|
||||
|
||||
return getDataBlocksInFilesImpl(pQueryHandle);
|
||||
return getDataBlocksInFilesImpl(pQueryHandle, exists);
|
||||
} else {
|
||||
// check if current file block is all consumed
|
||||
STableBlockInfo* pBlockInfo = &pQueryHandle->pDataBlockInfo[cur->slot];
|
||||
|
@ -1430,7 +1435,7 @@ static bool getDataBlocksInFiles(STsdbQueryHandle* pQueryHandle) {
|
|||
if ((cur->slot == pQueryHandle->numOfBlocks - 1 && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
|
||||
(cur->slot == 0 && !ASCENDING_TRAVERSE(pQueryHandle->order))) {
|
||||
// all data blocks in current file has been checked already, try next file if exists
|
||||
return getDataBlocksInFilesImpl(pQueryHandle);
|
||||
return getDataBlocksInFilesImpl(pQueryHandle, exists);
|
||||
} else {
|
||||
// next block of the same file
|
||||
int32_t step = ASCENDING_TRAVERSE(pQueryHandle->order) ? 1 : -1;
|
||||
|
@ -1440,11 +1445,15 @@ static bool getDataBlocksInFiles(STsdbQueryHandle* pQueryHandle) {
|
|||
cur->blockCompleted = false;
|
||||
|
||||
STableBlockInfo* pNext = &pQueryHandle->pDataBlockInfo[cur->slot];
|
||||
return loadFileDataBlock(pQueryHandle, pNext->compBlock, pNext->pTableCheckInfo);
|
||||
*exists = loadFileDataBlock(pQueryHandle, pNext->compBlock, pNext->pTableCheckInfo);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
} else {
|
||||
handleDataMergeIfNeeded(pQueryHandle, pBlockInfo->compBlock, pCheckInfo);
|
||||
return pQueryHandle->realNumOfRows > 0;
|
||||
*exists = pQueryHandle->realNumOfRows > 0;
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1576,8 +1585,14 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) {
|
|||
}
|
||||
|
||||
if (pQueryHandle->checkFiles) {
|
||||
if (getDataBlocksInFiles(pQueryHandle)) {
|
||||
return true;
|
||||
bool exists = true;
|
||||
int32_t code = getDataBlocksInFiles(pQueryHandle, &exists);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (exists) {
|
||||
return exists;
|
||||
}
|
||||
|
||||
pQueryHandle->activeIndex = 0;
|
||||
|
@ -1824,7 +1839,7 @@ SArray* tsdbRetrieveDataBlock(TsdbQueryHandleT* pQueryHandle, SArray* pIdList) {
|
|||
if (pHandle->cur.mixBlock) {
|
||||
return pHandle->pColumns;
|
||||
} else {
|
||||
SDataBlockInfo binfo = getTrueDataBlockInfo(pCheckInfo, pBlockInfo->compBlock);
|
||||
SDataBlockInfo binfo = GET_FILE_DATA_BLOCK_INFO(pCheckInfo, pBlockInfo->compBlock);
|
||||
assert(pHandle->realNumOfRows <= binfo.rows);
|
||||
|
||||
// data block has been loaded, todo extract method
|
||||
|
|
|
@ -22,7 +22,7 @@ typedef void (*_ref_fn_t)(const void* pObj);
|
|||
|
||||
#define T_REF_DECLARE() \
|
||||
struct { \
|
||||
int16_t val; \
|
||||
int32_t val; \
|
||||
} _ref;
|
||||
|
||||
#define T_REF_REGISTER_FUNC(s, e) \
|
||||
|
@ -31,7 +31,7 @@ typedef void (*_ref_fn_t)(const void* pObj);
|
|||
_ref_fn_t end; \
|
||||
} _ref_func = {.begin = (s), .end = (e)};
|
||||
|
||||
#define T_REF_INC(x) (atomic_add_fetch_16(&((x)->_ref.val), 1))
|
||||
#define T_REF_INC(x) (atomic_add_fetch_32(&((x)->_ref.val), 1))
|
||||
|
||||
#define T_REF_INC_WITH_CB(x, p) \
|
||||
do { \
|
||||
|
@ -41,11 +41,11 @@ typedef void (*_ref_fn_t)(const void* pObj);
|
|||
} \
|
||||
} while (0)
|
||||
|
||||
#define T_REF_DEC(x) (atomic_sub_fetch_16(&((x)->_ref.val), 1))
|
||||
#define T_REF_DEC(x) (atomic_sub_fetch_32(&((x)->_ref.val), 1))
|
||||
|
||||
#define T_REF_DEC_WITH_CB(x, p) \
|
||||
do { \
|
||||
int32_t v = atomic_sub_fetch_16(&((x)->_ref.val), 1); \
|
||||
int32_t v = atomic_sub_fetch_32(&((x)->_ref.val), 1); \
|
||||
if (v == 0 && (p)->_ref_func.end != NULL) { \
|
||||
(p)->_ref_func.end((x)); \
|
||||
} \
|
||||
|
|
|
@ -415,7 +415,7 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) {
|
|||
}
|
||||
|
||||
*data = NULL;
|
||||
int16_t ref = T_REF_DEC(pNode);
|
||||
int32_t ref = T_REF_DEC(pNode);
|
||||
uDebug("%p data released, refcnt:%d", pNode, ref);
|
||||
|
||||
if (_remove) {
|
||||
|
|
|
@ -259,6 +259,7 @@ int tdUpdateKVStoreRecord(SKVStore *pStore, uint64_t uid, void *cont, int contLe
|
|||
}
|
||||
|
||||
taosHashPut(pStore->map, (void *)(&uid), sizeof(uid), (void *)(&rInfo), sizeof(rInfo));
|
||||
uDebug("put uid %" PRIu64 " into kvStore %s", uid, pStore->fname);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -292,6 +293,7 @@ int tdDropKVStoreRecord(SKVStore *pStore, uint64_t uid) {
|
|||
pStore->info.tombSize += (rInfo.size + sizeof(SKVRecord) * 2);
|
||||
|
||||
taosHashRemove(pStore->map, (void *)(&uid), sizeof(uid));
|
||||
uDebug("drop uid %" PRIu64 " from KV store %s", uid, pStore->fname);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -68,10 +68,15 @@ void taosCloseQueue(taos_queue param) {
|
|||
if (param == NULL) return;
|
||||
STaosQueue *queue = (STaosQueue *)param;
|
||||
STaosQnode *pTemp;
|
||||
STaosQset *qset;
|
||||
|
||||
pthread_mutex_lock(&queue->mutex);
|
||||
STaosQnode *pNode = queue->head;
|
||||
queue->head = NULL;
|
||||
qset = queue->qset;
|
||||
pthread_mutex_unlock(&queue->mutex);
|
||||
|
||||
if (queue->qset) taosRemoveFromQset(queue->qset, queue);
|
||||
if (queue->qset) taosRemoveFromQset(qset, queue);
|
||||
|
||||
pthread_mutex_lock(&queue->mutex);
|
||||
|
||||
|
@ -95,7 +100,7 @@ void *taosAllocateQitem(int size) {
|
|||
void taosFreeQitem(void *param) {
|
||||
if (param == NULL) return;
|
||||
|
||||
uDebug("item:%p is freed", param);
|
||||
uTrace("item:%p is freed", param);
|
||||
char *temp = (char *)param;
|
||||
temp -= sizeof(STaosQnode);
|
||||
free(temp);
|
||||
|
@ -119,7 +124,7 @@ int taosWriteQitem(taos_queue param, int type, void *item) {
|
|||
|
||||
queue->numOfItems++;
|
||||
if (queue->qset) atomic_add_fetch_32(&queue->qset->numOfItems, 1);
|
||||
uDebug("item:%p is put into queue:%p, type:%d items:%d", item, queue, type, queue->numOfItems);
|
||||
uTrace("item:%p is put into queue:%p, type:%d items:%d", item, queue, type, queue->numOfItems);
|
||||
|
||||
pthread_mutex_unlock(&queue->mutex);
|
||||
|
||||
|
@ -201,7 +206,7 @@ int taosGetQitem(taos_qall param, int *type, void **pitem) {
|
|||
*pitem = pNode->item;
|
||||
*type = pNode->type;
|
||||
num = 1;
|
||||
uDebug("item:%p is fetched, type:%d", *pitem, *type);
|
||||
uTrace("item:%p is fetched, type:%d", *pitem, *type);
|
||||
}
|
||||
|
||||
return num;
|
||||
|
@ -339,7 +344,7 @@ int taosReadQitemFromQset(taos_qset param, int *type, void **pitem, void **phand
|
|||
queue->numOfItems--;
|
||||
atomic_sub_fetch_32(&qset->numOfItems, 1);
|
||||
code = 1;
|
||||
uDebug("item:%p is read out from queue:%p, type:%d items:%d", *pitem, queue, *type, queue->numOfItems);
|
||||
uTrace("item:%p is read out from queue:%p, type:%d items:%d", *pitem, queue, *type, queue->numOfItems);
|
||||
}
|
||||
|
||||
pthread_mutex_unlock(&queue->mutex);
|
||||
|
|
|
@ -60,7 +60,7 @@ int64_t user_mktime64(const unsigned int year0, const unsigned int mon0,
|
|||
// year*365 - 719499)*24 + hour)*60 + min)*60 + sec);
|
||||
int64_t res;
|
||||
res = 367*((int64_t)mon)/12;
|
||||
res += year/4 - year/100 + year/400 + day + year*365 - 719499;
|
||||
res += year/4 - year/100 + year/400 + day + ((int64_t)year)*365 - 719499;
|
||||
res = res*24;
|
||||
res = ((res + hour) * 60 + min) * 60 + sec;
|
||||
|
||||
|
|
|
@ -123,9 +123,8 @@ int32_t vnodeCreate(SMDCreateVnodeMsg *pVnodeCfg) {
|
|||
|
||||
char tsdbDir[TSDB_FILENAME_LEN] = {0};
|
||||
sprintf(tsdbDir, "%s/vnode%d/tsdb", tsVnodeDir, pVnodeCfg->cfg.vgId);
|
||||
code = tsdbCreateRepo(tsdbDir, &tsdbCfg);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
vError("vgId:%d, failed to create tsdb in vnode, reason:%s", pVnodeCfg->cfg.vgId, tstrerror(code));
|
||||
if (tsdbCreateRepo(tsdbDir, &tsdbCfg) < 0) {
|
||||
vError("vgId:%d, failed to create tsdb in vnode, reason:%s", pVnodeCfg->cfg.vgId, tstrerror(terrno));
|
||||
return TSDB_CODE_VND_INIT_FAILED;
|
||||
}
|
||||
|
||||
|
@ -601,10 +600,11 @@ static int32_t vnodeReadCfg(SVnodeObj *pVnode) {
|
|||
|
||||
content = calloc(1, maxLen + 1);
|
||||
if (content == NULL) goto PARSE_OVER;
|
||||
int len = fread(content, 1, maxLen, fp);
|
||||
int len = fread(content, 1, maxLen, fp);
|
||||
if (len <= 0) {
|
||||
vError("vgId:%d, failed to read vnode cfg, content is null", pVnode->vgId);
|
||||
free(content);
|
||||
fclose(fp);
|
||||
return errno;
|
||||
}
|
||||
|
||||
|
@ -649,7 +649,7 @@ static int32_t vnodeReadCfg(SVnodeObj *pVnode) {
|
|||
}
|
||||
pVnode->tsdbCfg.maxTables = maxTables->valueint;
|
||||
|
||||
cJSON *daysPerFile = cJSON_GetObjectItem(root, "daysPerFile");
|
||||
cJSON *daysPerFile = cJSON_GetObjectItem(root, "daysPerFile");
|
||||
if (!daysPerFile || daysPerFile->type != cJSON_Number) {
|
||||
vError("vgId:%d, failed to read vnode cfg, daysPerFile not found", pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
|
|
|
@ -137,7 +137,6 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
|
|||
}
|
||||
|
||||
if (pQInfo != NULL) {
|
||||
vDebug("vgId:%d, QInfo:%p, do qTableQuery", pVnode->vgId, pQInfo);
|
||||
qTableQuery(pQInfo, vnodeRelease, pVnode); // do execute query
|
||||
}
|
||||
|
||||
|
|
|
@ -94,7 +94,7 @@ static int32_t vnodeProcessSubmitMsg(SVnodeObj *pVnode, void *pCont, SRspRet *pR
|
|||
|
||||
// save insert result into item
|
||||
|
||||
vDebug("vgId:%d, submit msg is processed", pVnode->vgId);
|
||||
vTrace("vgId:%d, submit msg is processed", pVnode->vgId);
|
||||
|
||||
pRet->len = sizeof(SShellSubmitRspMsg);
|
||||
pRet->rsp = rpcMallocCont(pRet->len);
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
4. pip install ../src/connector/python/linux/python2 ; pip3 install
|
||||
../src/connector/python/linux/python3
|
||||
|
||||
5. pip install numpy; pip3 install numpy
|
||||
5. pip install numpy; pip3 install numpy (numpy is required only if you need to run querySort.py)
|
||||
|
||||
> Note: Both Python2 and Python3 are currently supported by the Python test
|
||||
> framework. Since Python2 is no longer officially supported by Python Software
|
||||
|
|
|
@ -77,11 +77,7 @@ class TDTestCase:
|
|||
# join queries
|
||||
tdSql.query(
|
||||
"select * from stb_p, stb_t where stb_p.ts=stb_t.ts and stb_p.id = stb_t.id")
|
||||
tdSql.checkRows(6)
|
||||
|
||||
tdSql.query(
|
||||
"select * from stb_p, stb_t where stb_p.ts=stb_t.ts and stb_p.id = stb_t.id order by ts desc")
|
||||
tdSql.checkColumnSorted(0, "desc")
|
||||
tdSql.checkRows(6)
|
||||
|
||||
tdSql.error(
|
||||
"select ts, pressure, temperature, id, dscrption from stb_p, stb_t where stb_p.ts=stb_t.ts and stb_p.id = stb_t.id")
|
||||
|
|
|
@ -16,6 +16,7 @@ import taos
|
|||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
import numpy as np
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
|
@ -26,6 +27,46 @@ class TDTestCase:
|
|||
self.rowNum = 10
|
||||
self.ts = 1537146000000
|
||||
|
||||
def checkColumnSorted(self, col, order):
|
||||
frame = inspect.stack()[1]
|
||||
callerModule = inspect.getmodule(frame[0])
|
||||
callerFilename = callerModule.__file__
|
||||
|
||||
if col < 0:
|
||||
tdLog.exit(
|
||||
"%s failed: sql:%s, col:%d is smaller than zero" %
|
||||
(callerFilename, tdSql.sql, col))
|
||||
if col > tdSql.queryCols:
|
||||
tdLog.exit(
|
||||
"%s failed: sql:%s, col:%d is larger than queryCols:%d" %
|
||||
(callerFilename, tdSql.sql, col, tdSql.queryCols))
|
||||
|
||||
matrix = np.array(tdSql.queryResult)
|
||||
list = matrix[:, 0]
|
||||
|
||||
if order == "" or order.upper() == "ASC":
|
||||
if all(sorted(list) == list):
|
||||
tdLog.info(
|
||||
"sql:%s, column :%d is sorted in accending order as expected" %
|
||||
(tdSql.sql, col))
|
||||
else:
|
||||
tdLog.exit(
|
||||
"%s failed: sql:%s, col:%d is not sorted in accesnind order" %
|
||||
(callerFilename, tdSql.sql, col))
|
||||
elif order.upper() == "DESC":
|
||||
if all(sorted(list, reverse=True) == list):
|
||||
tdLog.info(
|
||||
"sql:%s, column :%d is sorted in decending order as expected" %
|
||||
(tdSql.sql, col))
|
||||
else:
|
||||
tdLog.exit(
|
||||
"%s failed: sql:%s, col:%d is not sorted in decending order" %
|
||||
(callerFilename, tdSql.sql, col))
|
||||
else:
|
||||
tdLog.exit(
|
||||
"%s failed: sql:%s, the order provided for col:%d is not correct" %
|
||||
(callerFilename, tdSql.sql, col))
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
|
@ -49,11 +90,11 @@ class TDTestCase:
|
|||
print("======= step 2: verify order for each column =========")
|
||||
# sort for timestamp in asc order
|
||||
tdSql.query("select * from st order by ts asc")
|
||||
tdSql.checkColumnSorted(0, "asc")
|
||||
self.checkColumnSorted(0, "asc")
|
||||
|
||||
# sort for timestamp in desc order
|
||||
tdSql.query("select * from st order by ts desc")
|
||||
tdSql.checkColumnSorted(0, "desc")
|
||||
self.checkColumnSorted(0, "desc")
|
||||
|
||||
for i in range(1, 10):
|
||||
tdSql.error("select * from st order by tbcol%d" % i)
|
||||
|
@ -63,17 +104,17 @@ class TDTestCase:
|
|||
tdSql.query(
|
||||
"select avg(tbcol1) from st group by tagcol%d order by tagcol%d" %
|
||||
(i, i))
|
||||
tdSql.checkColumnSorted(1, "")
|
||||
self.checkColumnSorted(1, "")
|
||||
|
||||
tdSql.query(
|
||||
"select avg(tbcol1) from st group by tagcol%d order by tagcol%d asc" %
|
||||
(i, i))
|
||||
tdSql.checkColumnSorted(1, "asc")
|
||||
self.checkColumnSorted(1, "asc")
|
||||
|
||||
tdSql.query(
|
||||
"select avg(tbcol1) from st group by tagcol%d order by tagcol%d desc" %
|
||||
(i, i))
|
||||
tdSql.checkColumnSorted(1, "desc")
|
||||
self.checkColumnSorted(1, "desc")
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
|
|
|
@ -43,7 +43,7 @@ python3 ./test.py -f tag_lite/commit.py
|
|||
python3 ./test.py -f tag_lite/create.py
|
||||
python3 ./test.py -f tag_lite/datatype.py
|
||||
python3 ./test.py -f tag_lite/datatype-without-alter.py
|
||||
# python3 ./test.py -f tag_lite/delete.py
|
||||
python3 ./test.py -f tag_lite/delete.py
|
||||
python3 ./test.py -f tag_lite/double.py
|
||||
python3 ./test.py -f tag_lite/float.py
|
||||
python3 ./test.py -f tag_lite/int_binary.py
|
||||
|
@ -134,9 +134,10 @@ python3 ./test.py -f table/del_stable.py
|
|||
python3 ./test.py -f query/filter.py
|
||||
python3 ./test.py -f query/filterAllIntTypes.py
|
||||
python3 ./test.py -f query/filterFloatAndDouble.py
|
||||
python3 ./test.py -f query/filterOtherTypes.py
|
||||
python3 ./test.py -f query/queryError.py
|
||||
python3 ./test.py -f query/querySort.py
|
||||
|
||||
|
||||
#stream
|
||||
python3 ./test.py -f stream/stream1.py
|
||||
python3 ./test.py -f stream/stream2.py
|
||||
|
|
|
@ -8,24 +8,8 @@ python3 ./test.py $1 -s && sleep 1
|
|||
# insert
|
||||
python3 ./test.py $1 -f insert/basic.py
|
||||
python3 ./test.py $1 -s && sleep 1
|
||||
python3 ./test.py $1 -f insert/int.py
|
||||
python3 ./test.py $1 -s && sleep 1
|
||||
python3 ./test.py $1 -f insert/float.py
|
||||
python3 ./test.py $1 -s && sleep 1
|
||||
python3 ./test.py $1 -f insert/bigint.py
|
||||
python3 ./test.py $1 -s && sleep 1
|
||||
python3 ./test.py $1 -f insert/bool.py
|
||||
python3 ./test.py $1 -s && sleep 1
|
||||
python3 ./test.py $1 -f insert/double.py
|
||||
python3 ./test.py $1 -s && sleep 1
|
||||
python3 ./test.py $1 -f insert/smallint.py
|
||||
python3 ./test.py $1 -s && sleep 1
|
||||
python3 ./test.py $1 -f insert/tinyint.py
|
||||
python3 ./test.py $1 -s && sleep 1
|
||||
python3 ./test.py $1 -f insert/binary.py
|
||||
python3 ./test.py $1 -s && sleep 1
|
||||
python3 ./test.py $1 -f insert/date.py
|
||||
python3 ./test.py $1 -s && sleep 1
|
||||
python3 ./test.py $1 -f insert/nchar.py
|
||||
python3 ./test.py $1 -s && sleep 1
|
||||
python3 ./test.py $1 -f insert/multi.py
|
||||
|
@ -42,18 +26,6 @@ python3 ./test.py $1 -s && sleep 1
|
|||
# import
|
||||
python3 ./test.py $1 -f import_merge/importDataLastSub.py
|
||||
python3 ./test.py $1 -s && sleep 1
|
||||
python3 ./test.py $1 -f import_merge/importHead.py
|
||||
python3 ./test.py $1 -s && sleep 1
|
||||
python3 ./test.py $1 -f import_merge/importLastT.py
|
||||
python3 ./test.py $1 -s && sleep 1
|
||||
python3 ./test.py $1 -f import_merge/importSpan.py
|
||||
python3 ./test.py $1 -s && sleep 1
|
||||
python3 ./test.py $1 -f import_merge/importTail.py
|
||||
python3 ./test.py $1 -s && sleep 1
|
||||
python3 ./test.py $1 -f import_merge/importTRestart.py
|
||||
python3 ./test.py $1 -s && sleep 1
|
||||
python3 ./test.py $1 -f import_merge/importInsertThenImport.py
|
||||
python3 ./test.py $1 -s && sleep 1
|
||||
|
||||
#tag
|
||||
python3 ./test.py $1 -f tag_lite/filter.py
|
||||
|
|
|
@ -17,7 +17,6 @@ import time
|
|||
import datetime
|
||||
import inspect
|
||||
from util.log import *
|
||||
import numpy as np
|
||||
|
||||
|
||||
class TDSql:
|
||||
|
@ -199,47 +198,7 @@ class TDSql:
|
|||
"%s failed: sql:%s, affectedRows:%d != expect:%d" %
|
||||
(callerFilename, self.sql, self.affectedRows, expectAffectedRows))
|
||||
tdLog.info("sql:%s, affectedRows:%d == expect:%d" %
|
||||
(self.sql, self.affectedRows, expectAffectedRows))
|
||||
|
||||
def checkColumnSorted(self, col, order):
|
||||
frame = inspect.stack()[1]
|
||||
callerModule = inspect.getmodule(frame[0])
|
||||
callerFilename = callerModule.__file__
|
||||
|
||||
if col < 0:
|
||||
tdLog.exit(
|
||||
"%s failed: sql:%s, col:%d is smaller than zero" %
|
||||
(callerFilename, self.sql, col))
|
||||
if col > self.queryCols:
|
||||
tdLog.exit(
|
||||
"%s failed: sql:%s, col:%d is larger than queryCols:%d" %
|
||||
(callerFilename, self.sql, col, self.queryCols))
|
||||
|
||||
matrix = np.array(self.queryResult)
|
||||
list = matrix[:, 0]
|
||||
|
||||
if order == "" or order.upper() == "ASC":
|
||||
if all(sorted(list) == list):
|
||||
tdLog.info(
|
||||
"sql:%s, column :%d is sorted in accending order as expected" %
|
||||
(self.sql, col))
|
||||
else:
|
||||
tdLog.exit(
|
||||
"%s failed: sql:%s, col:%d is not sorted in accesnind order" %
|
||||
(callerFilename, self.sql, col))
|
||||
elif order.upper() == "DESC":
|
||||
if all(sorted(list, reverse=True) == list):
|
||||
tdLog.info(
|
||||
"sql:%s, column :%d is sorted in decending order as expected" %
|
||||
(self.sql, col))
|
||||
else:
|
||||
tdLog.exit(
|
||||
"%s failed: sql:%s, col:%d is not sorted in decending order" %
|
||||
(callerFilename, self.sql, col))
|
||||
else:
|
||||
tdLog.exit(
|
||||
"%s failed: sql:%s, the order provided for col:%d is not correct" %
|
||||
(callerFilename, self.sql, col))
|
||||
(self.sql, self.affectedRows, expectAffectedRows))
|
||||
|
||||
|
||||
tdSql = TDSql()
|
||||
|
|
|
@ -1,21 +1,20 @@
|
|||
run general/cache/new_metrics.sim
|
||||
run general/column/commit.sim
|
||||
run general/compress/compress.sim
|
||||
run general/compute/interval.sim
|
||||
run general/db/basic4.sim
|
||||
run general/field/binary.sim
|
||||
run general/http/restful_insert.sim
|
||||
run general/http/restful_full.sim
|
||||
run general/import/commit.sim
|
||||
run general/import/replica1.sim
|
||||
run general/parser/auto_create_tb_drop_tb.sim
|
||||
run general/parser/binary_escapeCharacter.sim
|
||||
run general/parser/select_from_cache_disk.sim
|
||||
run general/parser/null_char.sim
|
||||
run general/parser/alter.sim
|
||||
run general/stable/vnode3.sim
|
||||
run general/table/autocreate.sim
|
||||
run general/table/fill.sim
|
||||
run general/table/vgroup.sim
|
||||
run general/tag/filter.sim
|
||||
run general/table/vgroup.sim
|
||||
run general/user/authority.sim
|
||||
run general/user/pass_alter.sim
|
||||
run general/vector/metrics_mix.sim
|
||||
run general/vector/table_field.sim
|
||||
run general/user/authority.sim
|
||||
run general/tag/set.sim
|
||||
run general/table/delete_writing.sim
|
||||
run general/stable/disk.sim
|
||||
|
|
|
@ -232,24 +232,3 @@ cd ../../../debug; make
|
|||
./test.sh -f general/vector/table_mix.sim
|
||||
./test.sh -f general/vector/table_query.sim
|
||||
./test.sh -f general/vector/table_time.sim
|
||||
|
||||
./test.sh -f unique/account/account_create.sim
|
||||
./test.sh -f unique/account/account_delete.sim
|
||||
./test.sh -f unique/account/account_len.sim
|
||||
./test.sh -f unique/account/authority.sim
|
||||
./test.sh -f unique/account/basic.sim
|
||||
./test.sh -f unique/account/paras.sim
|
||||
./test.sh -f unique/account/pass_alter.sim
|
||||
./test.sh -f unique/account/pass_len.sim
|
||||
./test.sh -f unique/account/usage.sim
|
||||
./test.sh -f unique/account/user_create.sim
|
||||
./test.sh -f unique/account/user_len.sim
|
||||
|
||||
./test.sh -f unique/cluster/balance1.sim
|
||||
./test.sh -f unique/cluster/balance2.sim
|
||||
./test.sh -f unique/dnode/balance1.sim
|
||||
./test.sh -f unique/dnode/balance2.sim
|
||||
./test.sh -f unique/stable/dnode3.sim
|
||||
./test.sh -f unique/mnode/mgmt22.sim
|
||||
./test.sh -f unique/mnode/mgmt33.sim
|
||||
./test.sh -f unique/vnode/many.sim
|
|
@ -102,9 +102,10 @@ cd ../../../debug; make
|
|||
./test.sh -f unique/arbitrator/dn3_mn1_replica_change_dropDnod.sim
|
||||
./test.sh -f unique/arbitrator/dn3_mn1_replica_change.sim
|
||||
./test.sh -f unique/arbitrator/dn3_mn1_stopDnode_timeout.sim
|
||||
./test.sh -f unique/arbitrator/dn3_mn1_vnode_change.sim
|
||||
./test.sh -f unique/arbitrator/dn3_mn1_vnode_corruptFile_offline.sim
|
||||
./test.sh -f unique/arbitrator/dn3_mn1_vnode_corruptFile_online.sim
|
||||
#./test.sh -f unique/arbitrator/dn3_mn1_vnode_change.sim
|
||||
#./test.sh -f unique/arbitrator/dn3_mn1_vnode_corruptFile_offline.sim
|
||||
#./test.sh -f unique/arbitrator/dn3_mn1_vnode_corruptFile_online.sim
|
||||
#./test.sh -f unique/arbitrator/dn3_mn1_vnode_createErrData_online.sim
|
||||
./test.sh -f unique/arbitrator/dn3_mn1_vnode_noCorruptFile_offline.sim
|
||||
./test.sh -f unique/arbitrator/dn3_mn1_vnode_delDir.sim
|
||||
./test.sh -f unique/arbitrator/dn3_mn1_r2_vnode_delDir.sim
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
##unsupport run general/alter/cached_schema_after_alter.sim
|
||||
unsupport run general/alter/count.sim
|
||||
unsupport run general/alter/import.sim
|
||||
run general/alter/count.sim
|
||||
run general/alter/import.sim
|
||||
##unsupport run general/alter/insert1.sim
|
||||
unsupport run general/alter/insert2.sim
|
||||
unsupport run general/alter/metrics.sim
|
||||
unsupport run general/alter/table.sim
|
||||
run general/alter/insert2.sim
|
||||
run general/alter/metrics.sim
|
||||
run general/alter/table.sim
|
||||
run general/cache/new_metrics.sim
|
||||
run general/cache/restart_metrics.sim
|
||||
run general/cache/restart_table.sim
|
||||
|
@ -86,14 +86,14 @@ run general/insert/query_block2_file.sim
|
|||
run general/insert/query_file_memory.sim
|
||||
run general/insert/query_multi_file.sim
|
||||
run general/insert/tcp.sim
|
||||
##unsupport run general/parser/alter.sim
|
||||
run general/parser/alter.sim
|
||||
run general/parser/alter1.sim
|
||||
run general/parser/alter_stable.sim
|
||||
run general/parser/auto_create_tb.sim
|
||||
run general/parser/auto_create_tb_drop_tb.sim
|
||||
run general/parser/col_arithmetic_operation.sim
|
||||
run general/parser/columnValue.sim
|
||||
#run general/parser/commit.sim
|
||||
run general/parser/commit.sim
|
||||
run general/parser/create_db.sim
|
||||
run general/parser/create_mt.sim
|
||||
run general/parser/create_tb.sim
|
||||
|
@ -106,7 +106,7 @@ run general/parser/first_last.sim
|
|||
##unsupport run general/parser/import_file.sim
|
||||
run general/parser/lastrow.sim
|
||||
run general/parser/nchar.sim
|
||||
##unsupport run general/parser/null_char.sim
|
||||
run general/parser/null_char.sim
|
||||
run general/parser/single_row_in_tb.sim
|
||||
run general/parser/select_from_cache_disk.sim
|
||||
run general/parser/limit.sim
|
||||
|
@ -132,7 +132,7 @@ run general/parser/groupby.sim
|
|||
run general/parser/bug.sim
|
||||
run general/parser/tags_dynamically_specifiy.sim
|
||||
run general/parser/set_tag_vals.sim
|
||||
##unsupport run general/parser/repeatAlter.sim
|
||||
run general/parser/repeatAlter.sim
|
||||
##unsupport run general/parser/slimit_alter_tags.sim
|
||||
##unsupport run general/parser/stream_on_sys.sim
|
||||
run general/parser/stream.sim
|
||||
|
@ -142,6 +142,8 @@ run general/stable/dnode3.sim
|
|||
run general/stable/metrics.sim
|
||||
run general/stable/values.sim
|
||||
run general/stable/vnode3.sim
|
||||
run general/stable/refcount.sim
|
||||
run general/stable/show.sim
|
||||
run general/table/autocreate.sim
|
||||
run general/table/basic1.sim
|
||||
run general/table/basic2.sim
|
||||
|
|
|
@ -112,7 +112,7 @@ echo "numOfLogLines 100000000" >> $TAOS_CFG
|
|||
echo "dDebugFlag 135" >> $TAOS_CFG
|
||||
echo "mDebugFlag 135" >> $TAOS_CFG
|
||||
echo "sdbDebugFlag 135" >> $TAOS_CFG
|
||||
echo "rpcDebugFlag 143" >> $TAOS_CFG
|
||||
echo "rpcDebugFlag 135" >> $TAOS_CFG
|
||||
echo "tmrDebugFlag 131" >> $TAOS_CFG
|
||||
echo "cDebugFlag 135" >> $TAOS_CFG
|
||||
echo "httpDebugFlag 135" >> $TAOS_CFG
|
||||
|
|
|
@ -64,7 +64,7 @@ print ======== step7
|
|||
$lastRows = $data00
|
||||
print ======== loop Times $x
|
||||
|
||||
if $x < 2 then
|
||||
if $x < 5 then
|
||||
$x = $x + 1
|
||||
goto loop
|
||||
endi
|
||||
|
|
|
@ -75,7 +75,7 @@ print ======== step8
|
|||
$lastRows = $data00
|
||||
print ======== loop Times $x
|
||||
|
||||
if $x < 2 then
|
||||
if $x < 5 then
|
||||
$x = $x + 1
|
||||
goto loop
|
||||
endi
|
||||
|
|
Loading…
Reference in New Issue