From 47665e5694884c2f9c856679fea5c377b7bc1bdf Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sun, 12 Jul 2020 23:52:55 +0800 Subject: [PATCH 01/14] [td-225] fix bug in error process. --- src/client/src/tscAsync.c | 45 +++++++++++++++--------------------- src/client/src/tscServer.c | 3 +-- src/client/src/tscSql.c | 13 +---------- src/client/src/tscSubquery.c | 8 ++++--- 4 files changed, 26 insertions(+), 43 deletions(-) diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c index 41464aa660..17998e1981 100644 --- a/src/client/src/tscAsync.c +++ b/src/client/src/tscAsync.c @@ -420,15 +420,15 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { SSqlCmd *pCmd = &pSql->cmd; SSqlRes *pRes = &pSql->res; + pRes->code = code; if (code != TSDB_CODE_SUCCESS) { - pRes->code = code; - tscQueueAsyncRes(pSql); - return; + tscError("%p ge tableMeta failed, code:%s", pSql, tstrerror(code)); + goto _error; + } else { + tscDebug("%p get tableMeta successfully", pSql); } - tscDebug("%p get tableMeta successfully", pSql); - if (pSql->pStream == NULL) { SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); @@ -453,11 +453,9 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { assert(pParObj->signature == pParObj && trs->subqueryIndex == pTableMetaInfo->vgroupIndex && pTableMetaInfo->vgroupIndex >= 0 && pTableMetaInfo->vgroupList != NULL); - if ((code = tscProcessSql(pSql)) == TSDB_CODE_SUCCESS) { - return; - } - - goto _error; + // tscProcessSql can add error into async res + tscProcessSql(pSql); + return; } else { // continue to process normal async query if (pCmd->parseFinished) { tscDebug("%p update table meta in local cache, continue to process sql and send corresponding query", pSql); @@ -481,26 +479,21 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { return; + } else if (code != TSDB_CODE_SUCCESS) { + goto _error; } - if (code == TSDB_CODE_SUCCESS) { - /* - * Discard previous built submit blocks, and then parse the sql string again and build up all submit blocks, - * and send the required submit block according to index value in supporter to server. - */ - pSql->fp = pSql->fetchFp; // restore the fp - if ((code = tscHandleInsertRetry(pSql)) == TSDB_CODE_SUCCESS) { - return; - } - } - + /* + * Discard previous built submit blocks, and then parse the sql string again and build up all submit blocks, + * and send the required submit block according to index value in supporter to server. + */ + pSql->fp = pSql->fetchFp; // restore the fp + tscHandleInsertRetry(pSql); } else {// in case of other query type, continue - if ((code = tscProcessSql(pSql)) == TSDB_CODE_SUCCESS) { - return; - } + tscProcessSql(pSql); } - goto _error; + return; } else { tscDebug("%p continue parse sql after get table meta", pSql); @@ -538,7 +531,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { goto _error; } - if (code == TSDB_CODE_SUCCESS && UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { + if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { code = tscGetSTableVgroupInfo(pSql, pCmd->clauseIndex); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { return; diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 88fcc3828e..1ae12aaf3d 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -412,7 +412,7 @@ int tscProcessSql(SSqlObj *pSql) { return pSql->res.code; } } else if (pCmd->command < TSDB_SQL_LOCAL) { - pSql->ipList = tscMgmtIpSet; //? + pSql->ipList = tscMgmtIpSet; } else { // local handler return (*tscProcessMsgRsp[pCmd->command])(pSql); } @@ -1372,7 +1372,6 @@ static int tscLocalResultCommonBuilder(SSqlObj *pSql, int32_t numOfRes) { SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); pRes->code = TSDB_CODE_SUCCESS; - if (pRes->rspType == 0) { pRes->numOfRows = numOfRes; pRes->row = 0; diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index 6d75aef01f..9f422b2d76 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -487,19 +487,8 @@ static bool tscFreeQhandleInVnode(SSqlObj* pSql) { (pCmd->command == TSDB_SQL_SELECT && pSql->pStream == NULL && pTableMetaInfo->pTableMeta != NULL)) { pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH; - tscDebug("%p send msg to dnode to free qhandle ASAP, command:%s", pSql, sqlCmd[pCmd->command]); + tscDebug("%p send msg to dnode to free qhandle ASAP, command:%s, ", pSql, sqlCmd[pCmd->command]); tscProcessSql(pSql); - - // in case of sync model query, waits for response and then goes on -// if (pSql->fp == waitForQueryRsp || pSql->fp == waitForRetrieveRsp) { -// sem_wait(&pSql->rspSem); - -// tscFreeSqlObj(pSql); -// tscDebug("%p sqlObj is freed by app", pSql); -// } else { - tscDebug("%p sqlObj will be freed while rsp received", pSql); -// } - return true; } diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 5e2ba46fbc..dd4ed991ed 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -1895,9 +1895,11 @@ int32_t tscHandleInsertRetry(SSqlObj* pSql) { assert(pSupporter->index < pSupporter->pState->numOfTotal); STableDataBlocks* pTableDataBlock = taosArrayGetP(pCmd->pDataBlocks, pSupporter->index); - pRes->code = tscCopyDataBlockToPayload(pSql, pTableDataBlock); - if (pRes->code != TSDB_CODE_SUCCESS) { - return pRes->code; + int32_t code = tscCopyDataBlockToPayload(pSql, pTableDataBlock); + + if ((pRes->code = code)!= TSDB_CODE_SUCCESS) { + tscQueueAsyncRes(pSql); + return code; // here the pSql may have been released already. } return tscProcessSql(pSql); From 411d034d29b5a705f5ce6341433f2c8d77e3b12c Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 13 Jul 2020 12:42:46 +0800 Subject: [PATCH 02/14] [td-225] fix bugs in cache. --- src/client/src/tscSql.c | 8 ++-- src/util/src/tcache.c | 103 ++++++++++++++++++++++++++-------------- 2 files changed, 72 insertions(+), 39 deletions(-) diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index 9f422b2d76..874923aea7 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -481,10 +481,10 @@ static bool tscFreeQhandleInVnode(SSqlObj* pSql) { if (pRes->code == TSDB_CODE_SUCCESS && pRes->completed == false && !tscIsTwoStageSTableQuery(pQueryInfo, 0) && (pCmd->command == TSDB_SQL_SELECT || - pCmd->command == TSDB_SQL_SHOW || - pCmd->command == TSDB_SQL_RETRIEVE || - pCmd->command == TSDB_SQL_FETCH) && - (pCmd->command == TSDB_SQL_SELECT && pSql->pStream == NULL && pTableMetaInfo->pTableMeta != NULL)) { + pCmd->command == TSDB_SQL_SHOW || + pCmd->command == TSDB_SQL_RETRIEVE || + pCmd->command == TSDB_SQL_FETCH) && + (pSql->pStream == NULL && pTableMetaInfo->pTableMeta != NULL)) { pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH; tscDebug("%p send msg to dnode to free qhandle ASAP, command:%s, ", pSql, sqlCmd[pCmd->command]); diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index d3c622633d..df63d567c7 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -413,57 +413,90 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) { *data = NULL; // note: extend lifespan before dec ref count - if (pCacheObj->extendLifespan) { + bool inTrashCan = pNode->inTrashCan; + + if (pCacheObj->extendLifespan && (!inTrashCan)) { atomic_store_64(&pNode->expireTime, pNode->lifespan + taosGetTimestampMs()); uDebug("cache:%s data:%p extend life time to %"PRId64 " before release", pCacheObj->name, pNode->data, pNode->expireTime); } - bool inTrashCan = pNode->inTrashCan; - uDebug("cache:%s, key:%p, %p is released, refcnt:%d", pCacheObj->name, pNode->key, pNode->data, T_REF_VAL_GET(pNode) - 1); + if (_remove) { + __cache_wr_lock(pCacheObj); - // NOTE: once refcount is decrease, pNode may be free by other thread immediately. - int32_t ref = T_REF_DEC(pNode); + // NOTE: once refcount is decrease, pNode may be freed by other thread immediately. + int32_t ref = T_REF_DEC(pNode); + uDebug("cache:%s, key:%p, %p is released, refcnt:%d", pCacheObj->name, pNode->key, pNode->data, ref); - if (inTrashCan) { - // Remove it if the ref count is 0. - // The ref count does not need to load and check again after lock acquired, since ref count can not be increased when - // the node is in trashcan. - if (ref == 0) { - __cache_wr_lock(pCacheObj); - assert(pNode->pTNodeHeader->pData == pNode); - taosRemoveFromTrashCan(pCacheObj, pNode->pTNodeHeader); - __cache_unlock(pCacheObj); + /* + * If it is not referenced by other users, remove it immediately. Otherwise move this node to trashcan wait for all users + * releasing this resources. + * + * NOTE: previous ref is 0, and current ref is still 0, remove it. If previous is not 0, there is another thread + * that tries to do the same thing. + */ + if (pNode->inTrashCan) { + if (ref == 0) { + assert(pNode->pTNodeHeader->pData == pNode); + taosRemoveFromTrashCan(pCacheObj, pNode->pTNodeHeader); + } + } else { + if (ref > 0) { + assert(pNode->pTNodeHeader == NULL); + taosCacheMoveToTrash(pCacheObj, pNode); + } else { + taosCacheReleaseNode(pCacheObj, pNode); + } } + __cache_unlock(pCacheObj); + } else { - assert(pNode->pTNodeHeader == NULL); + uDebug("cache:%s, key:%p, %p is released, refcnt:%d", pCacheObj->name, pNode->key, pNode->data, T_REF_VAL_GET(pNode) - 1); - if (_remove) { // not in trash can, but need to remove it - __cache_wr_lock(pCacheObj); + // NOTE: once refcount is decrease, pNode may be freed by other thread immediately. + int32_t ref = T_REF_DEC(pNode); - /* - * If not referenced by other users. Otherwise move this node to trashcan wait for all users - * releasing this resources. - * - * NOTE: previous ref is 0, and current ref is still 0, remove it. If previous is not 0, there is another thread - * that tries to do the same thing. - */ + if (inTrashCan) { + // Remove it if the ref count is 0. + // The ref count does not need to load and check again after lock acquired, since ref count can not be increased when + // the node is in trashcan. if (ref == 0) { - if (T_REF_VAL_GET(pNode) == 0) { - taosCacheReleaseNode(pCacheObj, pNode); - } else { - taosCacheMoveToTrash(pCacheObj, pNode); - } + __cache_wr_lock(pCacheObj); + assert(pNode->pTNodeHeader->pData == pNode); + taosRemoveFromTrashCan(pCacheObj, pNode->pTNodeHeader); + __cache_unlock(pCacheObj); } - __cache_unlock(pCacheObj); -// } else { // extend its life time -// if (pCacheObj->extendLifespan) { -// atomic_store_64(&pNode->expireTime, pNode->lifespan + taosGetTimestampMs()); -// uDebug("cache:%s data:%p extend life time to %"PRId64 " after release", pCacheObj->name, pNode->data, pNode->expireTime); -// } } } + +// else { +// if (_remove) { // not in trash can, but need to remove it +// __cache_wr_lock(pCacheObj); +// +// /* +// * If not referenced by other users. Otherwise move this node to trashcan wait for all users +// * releasing this resources. +// * +// * NOTE: previous ref is 0, and current ref is still 0, remove it. If previous is not 0, there is another thread +// * that tries to do the same thing. +// */ +// if (ref == 0) { +// if (T_REF_VAL_GET(pNode) == 0) { +// taosCacheReleaseNode(pCacheObj, pNode); +// } else { +// taosCacheMoveToTrash(pCacheObj, pNode); +// } +// } else if (ref > 0) { +// if (!pNode->inTrashCan) { +// assert(pNode->pTNodeHeader == NULL); +// taosCacheMoveToTrash(pCacheObj, pNode); +// } +// } +// +// __cache_unlock(pCacheObj); +// } +// } } void taosCacheEmpty(SCacheObj *pCacheObj) { From f2d5fe86b0721eb9e362885d706f9bf11bf48272 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 13 Jul 2020 12:43:32 +0800 Subject: [PATCH 03/14] [td-225] update the log. --- src/vnode/src/vnodeRead.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/vnode/src/vnodeRead.c b/src/vnode/src/vnodeRead.c index ff58e219b0..7997abaeb0 100644 --- a/src/vnode/src/vnodeRead.c +++ b/src/vnode/src/vnodeRead.c @@ -110,6 +110,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { if (handle == NULL) { // failed to register qhandle pRsp->code = TSDB_CODE_QRY_INVALID_QHANDLE; qDestroyQueryInfo(pQInfo); // destroy it directly + vError("vgId:%d QInfo:%p register qhandle failed, return to app, code:%s", pVnode->vgId, (void*) pQInfo, tstrerror(pRsp->code)); } else { assert(*handle == pQInfo); pRsp->qhandle = htobe64((uint64_t) pQInfo); @@ -125,12 +126,14 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { } else { assert(pQInfo == NULL); } + if (handle != NULL) { + vDebug("vgId:%d, QInfo:%p, dnode query msg disposed, register qhandle and return to app", vgId, *handle); + dnodePutItemIntoReadQueue(pVnode, *handle); qReleaseQInfo(pVnode->qMgmt, (void**) &handle, false); } - vDebug("vgId:%d, QInfo:%p, dnode query msg disposed", vgId, pQInfo); } else { assert(pCont != NULL); handle = qAcquireQInfo(pVnode->qMgmt, (uint64_t) pCont); @@ -138,12 +141,13 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { vWarn("QInfo:%p invalid qhandle in continuing exec query, conn:%p", (void*) pCont, pReadMsg->rpcMsg.handle); code = TSDB_CODE_QRY_INVALID_QHANDLE; } else { - vDebug("vgId:%d, QInfo:%p, dnode query msg in progress", pVnode->vgId, (void*) pCont); + vDebug("vgId:%d, QInfo:%p, dnode continue exec query", pVnode->vgId, (void*) pCont); code = TSDB_CODE_VND_ACTION_IN_PROGRESS; qTableQuery(*handle); // do execute query } qReleaseQInfo(pVnode->qMgmt, (void**) &handle, false); } + return code; } From a8399213f2dd8131f64ed504f0f6f705b31d64a9 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 13 Jul 2020 12:45:06 +0800 Subject: [PATCH 04/14] [td-225]opt query perf. --- src/client/src/tscFunctionImpl.c | 207 +++++++++++++++---------------- src/client/src/tscSQLParser.c | 4 +- src/query/inc/qExecutor.h | 1 + src/query/inc/tsqlfunction.h | 24 +--- src/query/src/qExecutor.c | 161 +++++++++++------------- 5 files changed, 176 insertions(+), 221 deletions(-) diff --git a/src/client/src/tscFunctionImpl.c b/src/client/src/tscFunctionImpl.c index 06e5ff73bf..8e6878f449 100644 --- a/src/client/src/tscFunctionImpl.c +++ b/src/client/src/tscFunctionImpl.c @@ -330,10 +330,6 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI return TSDB_CODE_SUCCESS; } -bool stableQueryFunctChanged(int32_t funcId) { - return (aAggs[funcId].stableFuncId != funcId); -} - /** * the numOfRes should be kept, since it may be used later * and allow the ResultInfo to be re initialized @@ -361,7 +357,6 @@ static bool function_setup(SQLFunctionCtx *pCtx) { } memset(pCtx->aOutputBuf, 0, (size_t)pCtx->outputBytes); - initResultInfo(pResInfo); return true; } @@ -675,16 +670,16 @@ static void sum_func_second_merge(SQLFunctionCtx *pCtx) { } } -static int32_t precal_req_load_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) { +static int32_t statisRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) { return BLK_DATA_STATIS_NEEDED; } -static int32_t data_req_load_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) { +static int32_t dataBlockRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) { return BLK_DATA_ALL_NEEDED; } // todo: if column in current data block are null, opt for this case -static int32_t first_data_req_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) { +static int32_t firstFuncRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) { if (pCtx->order == TSDB_ORDER_DESC) { return BLK_DATA_NO_NEEDED; } @@ -697,7 +692,7 @@ static int32_t first_data_req_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, } } -static int32_t last_data_req_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) { +static int32_t lastFuncRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) { if (pCtx->order != pCtx->param[0].i64Key) { return BLK_DATA_NO_NEEDED; } @@ -709,34 +704,30 @@ static int32_t last_data_req_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, } } -static int32_t first_dist_data_req_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) { +static int32_t firstDistFuncRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) { if (pCtx->order == TSDB_ORDER_DESC) { return BLK_DATA_NO_NEEDED; } - // result buffer has not been set yet. - return BLK_DATA_ALL_NEEDED; - //todo optimize the filter info -// SFirstLastInfo *pInfo = (SFirstLastInfo*) (pCtx->aOutputBuf + pCtx->inputBytes); -// if (pInfo->hasResult != DATA_SET_FLAG) { -// return BLK_DATA_ALL_NEEDED; -// } else { // data in current block is not earlier than current result -// return (pInfo->ts <= start) ? BLK_DATA_NO_NEEDED : BLK_DATA_ALL_NEEDED; -// } + SFirstLastInfo *pInfo = (SFirstLastInfo*) (pCtx->aOutputBuf + pCtx->inputBytes); + if (pInfo->hasResult != DATA_SET_FLAG) { + return BLK_DATA_ALL_NEEDED; + } else { // data in current block is not earlier than current result + return (pInfo->ts <= start) ? BLK_DATA_NO_NEEDED : BLK_DATA_ALL_NEEDED; + } } -static int32_t last_dist_data_req_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) { +static int32_t lastDistFuncRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) { if (pCtx->order != pCtx->param[0].i64Key) { return BLK_DATA_NO_NEEDED; } - return BLK_DATA_ALL_NEEDED; -// SFirstLastInfo *pInfo = (SFirstLastInfo*) (pCtx->aOutputBuf + pCtx->inputBytes); -// if (pInfo->hasResult != DATA_SET_FLAG) { -// return BLK_DATA_ALL_NEEDED; -// } else { -// return (pInfo->ts > end) ? BLK_DATA_NO_NEEDED : BLK_DATA_ALL_NEEDED; -// } + SFirstLastInfo *pInfo = (SFirstLastInfo*) (pCtx->aOutputBuf + pCtx->inputBytes); + if (pInfo->hasResult != DATA_SET_FLAG) { + return BLK_DATA_ALL_NEEDED; + } else { + return (pInfo->ts > end) ? BLK_DATA_NO_NEEDED : BLK_DATA_ALL_NEEDED; + } } ////////////////////////////////////////////////////////////////////////////////////////////// @@ -2123,55 +2114,6 @@ static void copyTopBotRes(SQLFunctionCtx *pCtx, int32_t type) { tfree(pData); } -bool top_bot_datablock_filter(SQLFunctionCtx *pCtx, int32_t functionId, char *minval, char *maxval) { - STopBotInfo *pTopBotInfo = (STopBotInfo *)GET_RES_INFO(pCtx)->interResultBuf; - - int32_t numOfExistsRes = pTopBotInfo->num; - - // required number of results are not reached, continue load data block - if (numOfExistsRes < pCtx->param[0].i64Key) { - return true; - } - - tValuePair *pRes = (tValuePair*) pTopBotInfo->res; - - if (functionId == TSDB_FUNC_TOP) { - switch (pCtx->inputType) { - case TSDB_DATA_TYPE_TINYINT: - return GET_INT8_VAL(maxval) > pRes[0].v.i64Key; - case TSDB_DATA_TYPE_SMALLINT: - return GET_INT16_VAL(maxval) > pRes[0].v.i64Key; - case TSDB_DATA_TYPE_INT: - return GET_INT32_VAL(maxval) > pRes[0].v.i64Key; - case TSDB_DATA_TYPE_BIGINT: - return GET_INT64_VAL(maxval) > pRes[0].v.i64Key; - case TSDB_DATA_TYPE_FLOAT: - return GET_FLOAT_VAL(maxval) > pRes[0].v.dKey; - case TSDB_DATA_TYPE_DOUBLE: - return GET_DOUBLE_VAL(maxval) > pRes[0].v.dKey; - default: - return true; - } - } else { - switch (pCtx->inputType) { - case TSDB_DATA_TYPE_TINYINT: - return GET_INT8_VAL(minval) < pRes[0].v.i64Key; - case TSDB_DATA_TYPE_SMALLINT: - return GET_INT16_VAL(minval) < pRes[0].v.i64Key; - case TSDB_DATA_TYPE_INT: - return GET_INT32_VAL(minval) < pRes[0].v.i64Key; - case TSDB_DATA_TYPE_BIGINT: - return GET_INT64_VAL(minval) < pRes[0].v.i64Key; - case TSDB_DATA_TYPE_FLOAT: - return GET_FLOAT_VAL(minval) < pRes[0].v.dKey; - case TSDB_DATA_TYPE_DOUBLE: - return GET_DOUBLE_VAL(minval) < pRes[0].v.dKey; - default: - return true; - } - } -} - /* * Parameters values: * 1. param[0]: maximum allowable results @@ -2182,15 +2124,62 @@ bool top_bot_datablock_filter(SQLFunctionCtx *pCtx, int32_t functionId, char *mi */ static STopBotInfo *getTopBotOutputInfo(SQLFunctionCtx *pCtx) { SResultInfo *pResInfo = GET_RES_INFO(pCtx); - + // only the first_stage_merge is directly written data into final output buffer if (pResInfo->superTableQ && pCtx->currentStage != SECONDARY_STAGE_MERGE) { return (STopBotInfo*) pCtx->aOutputBuf; - } else { // during normal table query and super table at the secondary_stage, result is written to intermediate buffer + } else { // during normal table query and super table at the secondary_stage, result is written to intermediate buffer return pResInfo->interResultBuf; } } +bool topbot_datablock_filter(SQLFunctionCtx *pCtx, int32_t functionId, const char *minval, const char *maxval) { + STopBotInfo *pTopBotInfo = getTopBotOutputInfo(pCtx); + + // required number of results are not reached, continue load data block + if (pTopBotInfo->num < pCtx->param[0].i64Key) { + return true; + } + + tValuePair **pRes = (tValuePair**) pTopBotInfo->res; + + if (functionId == TSDB_FUNC_TOP) { + switch (pCtx->inputType) { + case TSDB_DATA_TYPE_TINYINT: + return GET_INT8_VAL(maxval) > pRes[0]->v.i64Key; + case TSDB_DATA_TYPE_SMALLINT: + return GET_INT16_VAL(maxval) > pRes[0]->v.i64Key; + case TSDB_DATA_TYPE_INT: + return GET_INT32_VAL(maxval) > pRes[0]->v.i64Key; + case TSDB_DATA_TYPE_BIGINT: + return GET_INT64_VAL(maxval) > pRes[0]->v.i64Key; + case TSDB_DATA_TYPE_FLOAT: + return GET_FLOAT_VAL(maxval) > pRes[0]->v.dKey; + case TSDB_DATA_TYPE_DOUBLE: + return GET_DOUBLE_VAL(maxval) > pRes[0]->v.dKey; + default: + return true; + } + } else { + switch (pCtx->inputType) { + case TSDB_DATA_TYPE_TINYINT: + return GET_INT8_VAL(minval) < pRes[0]->v.i64Key; + case TSDB_DATA_TYPE_SMALLINT: + return GET_INT16_VAL(minval) < pRes[0]->v.i64Key; + case TSDB_DATA_TYPE_INT: + return GET_INT32_VAL(minval) < pRes[0]->v.i64Key; + case TSDB_DATA_TYPE_BIGINT: + return GET_INT64_VAL(minval) < pRes[0]->v.i64Key; + case TSDB_DATA_TYPE_FLOAT: + return GET_FLOAT_VAL(minval) < pRes[0]->v.dKey; + case TSDB_DATA_TYPE_DOUBLE: + return GET_DOUBLE_VAL(minval) < pRes[0]->v.dKey; + default: + return true; + } + } +} + /* * keep the intermediate results during scan data blocks in the format of: * +-----------------------------------+-------------one value pair-----------+------------next value pair-----------+ @@ -3376,7 +3365,7 @@ static void spread_function(SQLFunctionCtx *pCtx) { SResultInfo *pResInfo = GET_RES_INFO(pCtx); SSpreadInfo *pInfo = pResInfo->interResultBuf; - int32_t numOfElems = pCtx->size; + int32_t numOfElems = 0; // todo : opt with pre-calculated result // column missing cause the hasNull to be true @@ -4412,7 +4401,7 @@ static void sumrate_finalizer(SQLFunctionCtx *pCtx) { * e.g., count/sum/avg/min/max/stddev/percentile/apercentile/first/last... * */ -int32_t funcCompatDefList[] = { +int32_t functionCompatList[] = { // count, sum, avg, min, max, stddev, percentile, apercentile, first, last 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // last_row, top, bottom, spread, twa, leastsqr, ts, ts_dummy, tag_dummy, ts_z @@ -4451,7 +4440,7 @@ SQLAggFuncElem aAggs[] = {{ function_finalizer, sum_func_merge, sum_func_second_merge, - precal_req_load_info, + statisRequired, }, { // 2 @@ -4466,7 +4455,7 @@ SQLAggFuncElem aAggs[] = {{ avg_finalizer, avg_func_merge, avg_func_second_merge, - precal_req_load_info, + statisRequired, }, { // 3 @@ -4481,7 +4470,7 @@ SQLAggFuncElem aAggs[] = {{ function_finalizer, min_func_merge, min_func_second_merge, - precal_req_load_info, + statisRequired, }, { // 4 @@ -4496,7 +4485,7 @@ SQLAggFuncElem aAggs[] = {{ function_finalizer, max_func_merge, max_func_second_merge, - precal_req_load_info, + statisRequired, }, { // 5 @@ -4511,7 +4500,7 @@ SQLAggFuncElem aAggs[] = {{ stddev_finalizer, noop1, noop1, - data_req_load_info, + dataBlockRequired, }, { // 6 @@ -4526,7 +4515,7 @@ SQLAggFuncElem aAggs[] = {{ percentile_finalizer, noop1, noop1, - data_req_load_info, + dataBlockRequired, }, { // 7 @@ -4541,7 +4530,7 @@ SQLAggFuncElem aAggs[] = {{ apercentile_finalizer, apercentile_func_merge, apercentile_func_second_merge, - data_req_load_info, + dataBlockRequired, }, { // 8 @@ -4556,7 +4545,7 @@ SQLAggFuncElem aAggs[] = {{ function_finalizer, noop1, noop1, - first_data_req_info, + firstFuncRequired, }, { // 9 @@ -4571,7 +4560,7 @@ SQLAggFuncElem aAggs[] = {{ function_finalizer, noop1, noop1, - last_data_req_info, + lastFuncRequired, }, { // 10 @@ -4587,7 +4576,7 @@ SQLAggFuncElem aAggs[] = {{ last_row_finalizer, noop1, last_dist_func_second_merge, - data_req_load_info, + dataBlockRequired, }, { // 11 @@ -4603,7 +4592,7 @@ SQLAggFuncElem aAggs[] = {{ top_bottom_func_finalizer, top_func_merge, top_func_second_merge, - data_req_load_info, + dataBlockRequired, }, { // 12 @@ -4619,7 +4608,7 @@ SQLAggFuncElem aAggs[] = {{ top_bottom_func_finalizer, bottom_func_merge, bottom_func_second_merge, - data_req_load_info, + dataBlockRequired, }, { // 13 @@ -4649,7 +4638,7 @@ SQLAggFuncElem aAggs[] = {{ twa_function_finalizer, twa_func_merge, twa_function_copy, - data_req_load_info, + dataBlockRequired, }, { // 15 @@ -4664,7 +4653,7 @@ SQLAggFuncElem aAggs[] = {{ leastsquares_finalizer, noop1, noop1, - data_req_load_info, + dataBlockRequired, }, { // 16 @@ -4694,7 +4683,7 @@ SQLAggFuncElem aAggs[] = {{ doFinalizer, copy_function, copy_function, - data_req_load_info, + dataBlockRequired, }, { // 18 @@ -4724,7 +4713,7 @@ SQLAggFuncElem aAggs[] = {{ ts_comp_finalize, copy_function, copy_function, - data_req_load_info, + dataBlockRequired, }, { // 20 @@ -4754,7 +4743,7 @@ SQLAggFuncElem aAggs[] = {{ doFinalizer, copy_function, copy_function, - data_req_load_info, + dataBlockRequired, }, { // 22, multi-output, tag function has only one result @@ -4784,7 +4773,7 @@ SQLAggFuncElem aAggs[] = {{ doFinalizer, copy_function, copy_function, - data_req_load_info, + dataBlockRequired, }, { // 24 @@ -4799,7 +4788,7 @@ SQLAggFuncElem aAggs[] = {{ doFinalizer, noop1, noop1, - data_req_load_info, + dataBlockRequired, }, // distributed version used in two-stage aggregation processes { @@ -4815,7 +4804,7 @@ SQLAggFuncElem aAggs[] = {{ function_finalizer, first_dist_func_merge, first_dist_func_second_merge, - first_dist_data_req_info, + firstDistFuncRequired, }, { // 26 @@ -4830,7 +4819,7 @@ SQLAggFuncElem aAggs[] = {{ function_finalizer, last_dist_func_merge, last_dist_func_second_merge, - last_dist_data_req_info, + lastDistFuncRequired, }, { // 27 @@ -4845,7 +4834,7 @@ SQLAggFuncElem aAggs[] = {{ doFinalizer, noop1, copy_function, - data_req_load_info, + dataBlockRequired, }, { // 28 @@ -4860,7 +4849,7 @@ SQLAggFuncElem aAggs[] = {{ rate_finalizer, rate_func_merge, rate_func_copy, - data_req_load_info, + dataBlockRequired, }, { // 29 @@ -4875,7 +4864,7 @@ SQLAggFuncElem aAggs[] = {{ rate_finalizer, rate_func_merge, rate_func_copy, - data_req_load_info, + dataBlockRequired, }, { // 30 @@ -4890,7 +4879,7 @@ SQLAggFuncElem aAggs[] = {{ sumrate_finalizer, sumrate_func_merge, sumrate_func_second_merge, - data_req_load_info, + dataBlockRequired, }, { // 31 @@ -4905,7 +4894,7 @@ SQLAggFuncElem aAggs[] = {{ sumrate_finalizer, sumrate_func_merge, sumrate_func_second_merge, - data_req_load_info, + dataBlockRequired, }, { // 32 @@ -4920,7 +4909,7 @@ SQLAggFuncElem aAggs[] = {{ sumrate_finalizer, sumrate_func_merge, sumrate_func_second_merge, - data_req_load_info, + dataBlockRequired, }, { // 33 @@ -4935,7 +4924,7 @@ SQLAggFuncElem aAggs[] = {{ sumrate_finalizer, sumrate_func_merge, sumrate_func_second_merge, - data_req_load_info, + dataBlockRequired, }, { // 34 @@ -4950,5 +4939,5 @@ SQLAggFuncElem aAggs[] = {{ noop1, noop1, noop1, - data_req_load_info, + dataBlockRequired, }}; diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 44d10ec2c4..e65d99fb5d 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -2471,7 +2471,7 @@ static bool functionCompatibleCheck(SQueryInfo* pQueryInfo) { startIdx++; } - int32_t factor = funcCompatDefList[tscSqlExprGet(pQueryInfo, startIdx)->functionId]; + int32_t factor = functionCompatList[tscSqlExprGet(pQueryInfo, startIdx)->functionId]; // diff function cannot be executed with other function // arithmetic function can be executed with other arithmetic functions @@ -2489,7 +2489,7 @@ static bool functionCompatibleCheck(SQueryInfo* pQueryInfo) { continue; } - if (funcCompatDefList[functionId] != factor) { + if (functionCompatList[functionId] != factor) { return false; } } diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index 1faad6b141..cdf5f9612c 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -121,6 +121,7 @@ typedef struct SQueryCostInfo { uint32_t loadBlockStatis; uint32_t discardBlocks; uint64_t elapsedTime; + uint64_t ioTime; uint64_t computTime; } SQueryCostInfo; diff --git a/src/query/inc/tsqlfunction.h b/src/query/inc/tsqlfunction.h index 0c69bc02d3..6a4b9874d7 100644 --- a/src/query/inc/tsqlfunction.h +++ b/src/query/inc/tsqlfunction.h @@ -224,25 +224,14 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI #define IS_SINGLEOUTPUT(x) (((x)&TSDB_FUNCSTATE_SO) != 0) #define IS_OUTER_FORWARD(x) (((x)&TSDB_FUNCSTATE_OF) != 0) -/* - * the status of one block, used in metric query. all blocks are mixed together, - * we need the status to decide if one block is a first/end/inter block of one meter - */ -enum { - BLK_FILE_BLOCK = 0x1, - BLK_BLOCK_LOADED = 0x2, - BLK_CACHE_BLOCK = 0x4, // in case of cache block, block must be loaded -}; - /* determine the real data need to calculated the result */ enum { - BLK_DATA_NO_NEEDED = 0x0, + BLK_DATA_NO_NEEDED = 0x0, BLK_DATA_STATIS_NEEDED = 0x1, - BLK_DATA_ALL_NEEDED = 0x3, + BLK_DATA_ALL_NEEDED = 0x3, + BLK_DATA_DISCARD = 0x4, // discard current data block since it is not qualified for filter }; -#define SET_DATA_BLOCK_NOT_LOADED(x) ((x) &= (~BLK_BLOCK_LOADED)); - typedef struct STwaInfo { TSKEY lastKey; int8_t hasResult; // flag to denote has value @@ -264,12 +253,9 @@ typedef struct STwaInfo { /* global sql function array */ extern struct SQLAggFuncElem aAggs[]; -/* compatible check array list */ -extern int32_t funcCompatDefList[]; +extern int32_t functionCompatList[]; // compatible check array list -bool top_bot_datablock_filter(SQLFunctionCtx *pCtx, int32_t functionId, char *minval, char *maxval); - -bool stableQueryFunctChanged(int32_t funcId); +bool topbot_datablock_filter(SQLFunctionCtx *pCtx, int32_t functionId, const char *minval, const char *maxval); void resetResultInfo(SResultInfo *pResInfo); void setResultInfoBuf(SResultInfo *pResInfo, int32_t size, bool superTable, char* buf); diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index ed7a86d843..841e75249f 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -1928,73 +1928,45 @@ char *getPosInResultPage(SQueryRuntimeEnv *pRuntimeEnv, int32_t columnIndex, SWi pQuery->pSelectExpr[columnIndex].bytes * realRowId; } -/** - * decrease the refcount for each table involved in this query - * @param pQInfo - */ -UNUSED_FUNC void vnodeDecMeterRefcnt(SQInfo *pQInfo) { - if (pQInfo != NULL) { - // assert(taosHashGetSize(pQInfo->tableqinfoGroupInfo) >= 1); - } +#define IS_PREFILTER_TYPE(_t) ((_t) != TSDB_DATA_TYPE_DOUBLE && (_t) != TSDB_DATA_TYPE_FLOAT) -#if 0 - if (pQInfo == NULL || pQInfo->tableqinfoGroupInfo.numOfTables == 1) { - atomic_fetch_sub_32(&pQInfo->pObj->numOfQueries, 1); - qDebug("QInfo:%p vid:%d sid:%d meterId:%s, query is over, numOfQueries:%d", pQInfo, pQInfo->pObj->vnode, - pQInfo->pObj->sid, pQInfo->pObj->meterId, pQInfo->pObj->numOfQueries); - } else { - int32_t num = 0; - for (int32_t i = 0; i < pQInfo->tableqinfoGroupInfo.numOfTables; ++i) { - SMeterObj *pMeter = getMeterObj(pQInfo->tableqinfoGroupInfo, pQInfo->pSidSet->pTableIdList[i]->sid); - atomic_fetch_sub_32(&(pMeter->numOfQueries), 1); - - if (pMeter->numOfQueries > 0) { - qDebug("QInfo:%p vid:%d sid:%d meterId:%s, query is over, numOfQueries:%d", pQInfo, pMeter->vnode, pMeter->sid, - pMeter->meterId, pMeter->numOfQueries); - num++; - } - } - - /* - * in order to reduce log output, for all meters of which numOfQueries count are 0, - * we do not output corresponding information - */ - num = pQInfo->tableqinfoGroupInfo.numOfTables - num; - qDebug("QInfo:%p metric query is over, dec query ref for %d meters, numOfQueries on %d meters are 0", pQInfo, - pQInfo->tableqinfoGroupInfo.numOfTables, num); - } -#endif -} - -static bool needToLoadDataBlock(SQuery *pQuery, SDataStatis *pDataStatis, SQLFunctionCtx *pCtx, - int32_t numOfTotalPoints) { - if (pDataStatis == NULL) { +static bool needToLoadDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SDataStatis *pDataStatis, SQLFunctionCtx *pCtx, + int32_t numOfRows) { + SQuery* pQuery = pRuntimeEnv->pQuery; + if (pDataStatis == NULL || (pQuery->numOfFilterCols == 0 && (!pRuntimeEnv->topBotQuery))) { return true; } -#if 0 for (int32_t k = 0; k < pQuery->numOfFilterCols; ++k) { SSingleColumnFilterInfo *pFilterInfo = &pQuery->pFilterInfo[k]; - int32_t colIndex = pFilterInfo->info.colIndex; - // this column not valid in current data block - if (colIndex < 0 || pDataStatis[colIndex].colId != pFilterInfo->info.data.colId) { + int32_t index = -1; + for(int32_t i = 0; i < pQuery->numOfCols; ++i) { + if (pDataStatis[i].colId == pFilterInfo->info.colId) { + index = i; + break; + } + } + + if (index == -1) { continue; } // not support pre-filter operation on binary/nchar data type - if (!vnodeSupportPrefilter(pFilterInfo->info.data.type)) { + if (!IS_PREFILTER_TYPE(pFilterInfo->info.type)) { continue; } // all points in current column are NULL, no need to check its boundary value - if (pDataStatis[colIndex].numOfNull == numOfTotalPoints) { + if (pDataStatis[index].numOfNull == numOfRows) { continue; } - if (pFilterInfo->info.info.type == TSDB_DATA_TYPE_FLOAT) { - float minval = *(double *)(&pDataStatis[colIndex].min); - float maxval = *(double *)(&pDataStatis[colIndex].max); + SDataStatis* pDataBlockst = &pDataStatis[index]; + + if (pFilterInfo->info.type == TSDB_DATA_TYPE_FLOAT) { + float minval = *(double *)(&pDataBlockst->min); + float maxval = *(double *)(&pDataBlockst->max); for (int32_t i = 0; i < pFilterInfo->numOfFilters; ++i) { if (pFilterInfo->pFilters[i].fp(&pFilterInfo->pFilters[i], (char *)&minval, (char *)&maxval)) { @@ -2003,53 +1975,50 @@ static bool needToLoadDataBlock(SQuery *pQuery, SDataStatis *pDataStatis, SQLFun } } else { for (int32_t i = 0; i < pFilterInfo->numOfFilters; ++i) { - if (pFilterInfo->pFilters[i].fp(&pFilterInfo->pFilters[i], (char *)&pDataStatis[colIndex].min, - (char *)&pDataStatis[colIndex].max)) { + if (pFilterInfo->pFilters[i].fp(&pFilterInfo->pFilters[i], (char *)&pDataBlockst->min, (char *)&pDataBlockst->max)) { return true; } } } } - // todo disable this opt code block temporarily - // for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - // int32_t functId = pQuery->pSelectExpr[i].base.functionId; - // if (functId == TSDB_FUNC_TOP || functId == TSDB_FUNC_BOTTOM) { - // return top_bot_datablock_filter(&pCtx[i], functId, (char *)&pField[i].min, (char *)&pField[i].max); - // } - // } + if (pRuntimeEnv->topBotQuery) { + for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { + int32_t functionId = pQuery->pSelectExpr[i].base.functionId; + if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) { + return topbot_datablock_filter(&pCtx[i], functionId, (char *)&pDataStatis[i].min, (char *)&pDataStatis[i].max); + } + } + } -#endif - return true; + return false; } -SArray *loadDataBlockOnDemand(SQueryRuntimeEnv *pRuntimeEnv, void* pQueryHandle, SDataBlockInfo* pBlockInfo, SDataStatis **pStatis) { +int32_t loadDataBlockOnDemand(SQueryRuntimeEnv *pRuntimeEnv, void* pQueryHandle, SDataBlockInfo* pBlockInfo, SDataStatis **pStatis, SArray** pDataBlock) { SQuery *pQuery = pRuntimeEnv->pQuery; - uint32_t r = 0; - SArray * pDataBlock = NULL; - + uint32_t status = 0; if (pQuery->numOfFilterCols > 0) { - r = BLK_DATA_ALL_NEEDED; - } else { - // check if this data block is required to load + status = BLK_DATA_ALL_NEEDED; + } else { // check if this data block is required to load for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { SSqlFuncMsg* pSqlFunc = &pQuery->pSelectExpr[i].base; int32_t functionId = pSqlFunc->functionId; int32_t colId = pSqlFunc->colInfo.colId; - r |= aAggs[functionId].dataReqFunc(&pRuntimeEnv->pCtx[i], pQuery->window.skey, pQuery->window.ekey, colId); + status |= aAggs[functionId].dataReqFunc(&pRuntimeEnv->pCtx[i], pBlockInfo->window.skey, pBlockInfo->window.ekey, colId); } if (pRuntimeEnv->pTSBuf > 0 || QUERY_IS_INTERVAL_QUERY(pQuery)) { - r |= BLK_DATA_ALL_NEEDED; + status |= BLK_DATA_ALL_NEEDED; } } - if (r == BLK_DATA_NO_NEEDED) { - qDebug("QInfo:%p data block discard, rows:%d", GET_QINFO_ADDR(pRuntimeEnv), pBlockInfo->rows); + if (status == BLK_DATA_NO_NEEDED) { + qDebug("QInfo:%p data block discard, brange:%"PRId64 "-%"PRId64", rows:%d", GET_QINFO_ADDR(pRuntimeEnv), + pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows); pRuntimeEnv->summary.discardBlocks += 1; - } else if (r == BLK_DATA_STATIS_NEEDED) { + } else if (status == BLK_DATA_STATIS_NEEDED) { if (tsdbRetrieveDataBlockStatisInfo(pQueryHandle, pStatis) != TSDB_CODE_SUCCESS) { // return DISK_DATA_LOAD_FAILED; } @@ -2057,32 +2026,34 @@ SArray *loadDataBlockOnDemand(SQueryRuntimeEnv *pRuntimeEnv, void* pQueryHandle, pRuntimeEnv->summary.loadBlockStatis += 1; if (*pStatis == NULL) { // data block statistics does not exist, load data block - pDataBlock = tsdbRetrieveDataBlock(pQueryHandle, NULL); + *pDataBlock = tsdbRetrieveDataBlock(pQueryHandle, NULL); pRuntimeEnv->summary.totalCheckedRows += pBlockInfo->rows; } } else { - assert(r == BLK_DATA_ALL_NEEDED); + assert(status == BLK_DATA_ALL_NEEDED); // load the data block statistics to perform further filter - pRuntimeEnv->summary.loadBlockStatis +=1; + pRuntimeEnv->summary.loadBlockStatis += 1; if (tsdbRetrieveDataBlockStatisInfo(pQueryHandle, pStatis) != TSDB_CODE_SUCCESS) { } - if (!needToLoadDataBlock(pQuery,*pStatis, pRuntimeEnv->pCtx, pBlockInfo->rows)) { + if (!needToLoadDataBlock(pRuntimeEnv, *pStatis, pRuntimeEnv->pCtx, pBlockInfo->rows)) { #if defined(_DEBUG_VIEW) qDebug("QInfo:%p block discarded by per-filter", GET_QINFO_ADDR(pRuntimeEnv)); #endif // current block has been discard due to filter applied pRuntimeEnv->summary.discardBlocks += 1; - // return DISK_DATA_DISCARDED; + qDebug("QInfo:%p data block discard, brange:%"PRId64 "-%"PRId64", rows:%d", GET_QINFO_ADDR(pRuntimeEnv), + pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows); + return BLK_DATA_DISCARD; } pRuntimeEnv->summary.totalCheckedRows += pBlockInfo->rows; pRuntimeEnv->summary.loadBlocks += 1; - pDataBlock = tsdbRetrieveDataBlock(pQueryHandle, NULL); + *pDataBlock = tsdbRetrieveDataBlock(pQueryHandle, NULL); } - return pDataBlock; + return TSDB_CODE_SUCCESS; } int32_t binarySearchForKey(char *pValue, int num, TSKEY key, int order) { @@ -2225,6 +2196,7 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { pQuery->order.order); TsdbQueryHandleT pQueryHandle = IS_MASTER_SCAN(pRuntimeEnv)? pRuntimeEnv->pQueryHandle : pRuntimeEnv->pSecQueryHandle; + int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER; while (tsdbNextDataBlock(pQueryHandle)) { @@ -2259,7 +2231,11 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { ensureOutputBuffer(pRuntimeEnv, &blockInfo); SDataStatis *pStatis = NULL; - SArray *pDataBlock = loadDataBlockOnDemand(pRuntimeEnv, pQueryHandle, &blockInfo, &pStatis); + SArray *pDataBlock = NULL; + if (loadDataBlockOnDemand(pRuntimeEnv, pQueryHandle, &blockInfo, &pStatis, &pDataBlock) == BLK_DATA_DISCARD) { + pQuery->current->lastKey = QUERY_IS_ASC_QUERY(pQuery)? blockInfo.window.ekey + step:blockInfo.window.skey + step; + continue; + } // query start position can not move into tableApplyFunctionsOnBlock due to limit/offset condition pQuery->pos = QUERY_IS_ASC_QUERY(pQuery)? 0 : blockInfo.rows - 1; @@ -2282,8 +2258,6 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { if (QUERY_IS_INTERVAL_QUERY(pQuery) && IS_MASTER_SCAN(pRuntimeEnv)) { if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) { -// int32_t step = QUERY_IS_ASC_QUERY(pQuery) ? QUERY_ASC_FORWARD_STEP : QUERY_DESC_FORWARD_STEP; - closeAllTimeWindow(&pRuntimeEnv->windowResInfo); // removeRedundantWindow(&pRuntimeEnv->windowResInfo, pTableQueryInfo->lastKey - step, step); pRuntimeEnv->windowResInfo.curIndex = pRuntimeEnv->windowResInfo.size - 1; // point to the last time window @@ -3700,7 +3674,7 @@ static void updateWindowResNumOfRes(SQueryRuntimeEnv *pRuntimeEnv) { } } -void stableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBlockInfo *pDataBlockInfo, SDataStatis *pStatis, +static void stableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBlockInfo *pDataBlockInfo, SDataStatis *pStatis, SArray *pDataBlock, __block_search_fn_t searchFn) { SQuery * pQuery = pRuntimeEnv->pQuery; STableQueryInfo* pTableQueryInfo = pQuery->current; @@ -3859,9 +3833,10 @@ static void queryCostStatis(SQInfo *pQInfo) { // pQInfo, pSummary->readDiskBlocks, pSummary->totalBlockSize, pSummary->loadBlocksUs / 1000.0, // pSummary->skippedFileBlocks, pSummary->totalGenData); - qDebug("QInfo:%p :cost summary: elpased time:%"PRId64" us, total blocks:%d, use block statis:%d, use block data:%d, " - "total rows:%"PRId64 ", check rows:%"PRId64, pQInfo, pSummary->elapsedTime, pSummary->totalBlocks, - pSummary->loadBlockStatis, pSummary->loadBlocks, pSummary->totalRows, pSummary->totalCheckedRows); + qDebug("QInfo:%p :cost summary: elapsed time:%"PRId64" us, io time:%"PRId64" us, total blocks:%d, load block statis:%d," + " load data block:%d, total rows:%"PRId64 ", check rows:%"PRId64, + pQInfo, pSummary->elapsedTime, pSummary->ioTime, pSummary->totalBlocks, pSummary->loadBlockStatis, + pSummary->loadBlocks, pSummary->totalRows, pSummary->totalCheckedRows); // qDebug("QInfo:%p cost: temp file:%d Bytes", pQInfo, pSummary->tmpBufferInDisk); // @@ -4247,10 +4222,11 @@ static int64_t scanMultiTableDataBlocks(SQInfo *pQInfo) { TsdbQueryHandleT pQueryHandle = IS_MASTER_SCAN(pRuntimeEnv)? pRuntimeEnv->pQueryHandle : pRuntimeEnv->pSecQueryHandle; SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER; + int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); + while (tsdbNextDataBlock(pQueryHandle)) { summary->totalBlocks += 1; if (IS_QUERY_KILLED(pQInfo)) { - finalizeQueryResult(pRuntimeEnv); // clean up allocated resource during query longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); } @@ -4263,12 +4239,8 @@ static int64_t scanMultiTableDataBlocks(SQInfo *pQInfo) { assert(*pTableQueryInfo != NULL); SET_CURRENT_QUERY_TABLE_INFO(pRuntimeEnv, *pTableQueryInfo); - SDataStatis *pStatis = NULL; - SArray *pDataBlock = loadDataBlockOnDemand(pRuntimeEnv, pQueryHandle, &blockInfo, &pStatis); - if (!pRuntimeEnv->groupbyNormalCol) { if (!QUERY_IS_INTERVAL_QUERY(pQuery)) { - int32_t step = QUERY_IS_ASC_QUERY(pQuery)? 1:-1; setExecutionContext(pQInfo, (*pTableQueryInfo)->groupIndex, blockInfo.window.ekey + step); } else { // interval query TSKEY nextKey = blockInfo.window.skey; @@ -4280,6 +4252,13 @@ static int64_t scanMultiTableDataBlocks(SQInfo *pQInfo) { } } + SDataStatis *pStatis = NULL; + SArray *pDataBlock = NULL; + if (loadDataBlockOnDemand(pRuntimeEnv, pQueryHandle, &blockInfo, &pStatis, &pDataBlock) == BLK_DATA_DISCARD) { + pQuery->current->lastKey = QUERY_IS_ASC_QUERY(pQuery)? blockInfo.window.ekey + step:blockInfo.window.skey + step; + continue; + } + summary->totalRows += blockInfo.rows; stableApplyFunctionsOnBlock(pRuntimeEnv, &blockInfo, pStatis, pDataBlock, binarySearchForKey); From 33c8ddd49c56763588c67dfd25c547d8578cd3e7 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Mon, 13 Jul 2020 14:28:16 +0800 Subject: [PATCH 05/14] add --random-file-fail-output argument to support redirect debug info to a file. --- src/dnode/src/dnodeSystem.c | 8 ++++++- src/util/inc/tfile.h | 13 +++++----- src/util/src/tfile.c | 47 ++++++++++++++++++++++++++++++++++--- 3 files changed, 58 insertions(+), 10 deletions(-) diff --git a/src/dnode/src/dnodeSystem.c b/src/dnode/src/dnodeSystem.c index 971bd0a110..4eba81e29d 100644 --- a/src/dnode/src/dnodeSystem.c +++ b/src/dnode/src/dnodeSystem.c @@ -70,7 +70,13 @@ int32_t main(int32_t argc, char *argv[]) { } #endif #ifdef TAOS_RANDOM_FILE_FAIL - else if (strcmp(argv[i], "--random-file-fail-factor") == 0) { + else if (strcmp(argv[i], "--random-file-fail-output") == 0) { + if ((i < argc - 1) && (argv[i + 1][0] != '-')) { + taosSetRandomFileFailOutput(argv[++i]); + } else { + taosSetRandomFileFailOutput(NULL); + } + } else if (strcmp(argv[i], "--random-file-fail-factor") == 0) { if ( (i+1) < argc ) { int factor = atoi(argv[i+1]); printf("The factor of random failure is %d\n", factor); diff --git a/src/util/inc/tfile.h b/src/util/inc/tfile.h index 04e500743c..de52a40967 100644 --- a/src/util/inc/tfile.h +++ b/src/util/inc/tfile.h @@ -19,13 +19,14 @@ #ifdef TAOS_RANDOM_FILE_FAIL void taosSetRandomFileFailFactor(int factor); -ssize_t taos_tread(int fd, void *buf, size_t count); -ssize_t taos_twrite(int fd, void *buf, size_t count); -off_t taos_lseek(int fd, off_t offset, int whence); +void taosSetRandomFileFailOutput(const char *path); +ssize_t taos_tread(int fd, void *buf, size_t count, const char *file, uint32_t line); +ssize_t taos_twrite(int fd, void *buf, size_t count, const char *file, uint32_t line); +off_t taos_lseek(int fd, off_t offset, int whence, const char *file, uint32_t line); -#define tread(fd, buf, count) taos_tread(fd, buf, count) -#define twrite(fd, buf, count) taos_twrite(fd, buf, count) -#define lseek(fd, offset, whence) taos_lseek(fd, offset, whence) +#define tread(fd, buf, count) taos_tread(fd, buf, count, __FILE__, __LINE__) +#define twrite(fd, buf, count) taos_twrite(fd, buf, count, __FILE__, __LINE__) +#define lseek(fd, offset, whence) taos_lseek(fd, offset, whence, __FILE__, __LINE__) #endif // TAOS_RANDOM_FILE_FAIL diff --git a/src/util/src/tfile.c b/src/util/src/tfile.c index 92eeaef126..d1a9c24615 100644 --- a/src/util/src/tfile.c +++ b/src/util/src/tfile.c @@ -29,14 +29,55 @@ #ifdef TAOS_RANDOM_FILE_FAIL static int random_file_fail_factor = 20; +static FILE *fpRandomFileFailOutput = NULL; void taosSetRandomFileFailFactor(int factor) { random_file_fail_factor = factor; } + +static void close_random_file_fail_output() +{ + if (fpRandomFileFailOutput != NULL) { + if (fpRandomFileFailOutput != stdout) { + fclose(fpRandomFileFailOutput); + } + fpRandomFileFailOutput = NULL; + } +} + +static void random_file_fail_output_sig(int sig) +{ + fprintf(fpRandomFileFailOutput, "signal %d received.\n", sig); + + struct sigaction act = {0}; + act.sa_handler = SIG_DFL; + sigaction(sig, &act, NULL); + + close_random_file_fail_output(); + exit(EXIT_FAILURE); +} + +void taosSetRandomFileFailOutput(const char *path) +{ + if (path == NULL) { + fpRandomFileFailOutput = stdout; + } else if ((fpRandomFileFailOutput = fopen(path, "w")) != NULL) { + atexit(close_random_file_fail_output); + } else { + printf("failed to open random file fail log file '%s', errno=%d\n", path, errno); + return; + } + + struct sigaction act = {0}; + act.sa_handler = random_file_fail_output_sig; + sigaction(SIGFPE, &act, NULL); + sigaction(SIGSEGV, &act, NULL); + sigaction(SIGILL, &act, NULL); +} #endif -ssize_t taos_tread(int fd, void *buf, size_t count) +ssize_t taos_tread(int fd, void *buf, size_t count, const char *file, uint32_t line) { #ifdef TAOS_RANDOM_FILE_FAIL if (random_file_fail_factor > 0) { @@ -49,7 +90,7 @@ ssize_t taos_tread(int fd, void *buf, size_t count) return tread(fd, buf, count); } -ssize_t taos_twrite(int fd, void *buf, size_t count) +ssize_t taos_twrite(int fd, void *buf, size_t count, const char *file, uint32_t line) { #ifdef TAOS_RANDOM_FILE_FAIL if (random_file_fail_factor > 0) { @@ -62,7 +103,7 @@ ssize_t taos_twrite(int fd, void *buf, size_t count) return twrite(fd, buf, count); } -off_t taos_lseek(int fd, off_t offset, int whence) +off_t taos_lseek(int fd, off_t offset, int whence, const char *file, uint32_t line) { #ifdef TAOS_RANDOM_FILE_FAIL if (random_file_fail_factor > 0) { From d9bd4f451805fbf11d69cad3d97461a93812e3b6 Mon Sep 17 00:00:00 2001 From: Hui Li Date: Mon, 13 Jul 2020 14:36:23 +0800 Subject: [PATCH 06/14] [add cluster sim cases] --- tests/script/unique/cluster/client3.sim | 55 +++ tests/script/unique/cluster/client4.sim | 113 ++++++ tests/script/unique/cluster/client5.sim | 113 ++++++ tests/script/unique/cluster/cluster_main.sim | 45 +-- tests/script/unique/cluster/cluster_main1.sim | 326 +++++++++++++++++ tests/script/unique/cluster/cluster_main2.sim | 330 ++++++++++++++++++ .../script/unique/cluster/main1_client1_0.sim | 94 +++++ .../script/unique/cluster/main1_client1_1.sim | 82 +++++ .../script/unique/cluster/main1_client1_2.sim | 81 +++++ .../script/unique/cluster/main1_client1_3.sim | 81 +++++ 10 files changed, 1299 insertions(+), 21 deletions(-) create mode 100644 tests/script/unique/cluster/client3.sim create mode 100644 tests/script/unique/cluster/client4.sim create mode 100644 tests/script/unique/cluster/client5.sim create mode 100644 tests/script/unique/cluster/cluster_main1.sim create mode 100644 tests/script/unique/cluster/cluster_main2.sim create mode 100644 tests/script/unique/cluster/main1_client1_0.sim create mode 100644 tests/script/unique/cluster/main1_client1_1.sim create mode 100644 tests/script/unique/cluster/main1_client1_2.sim create mode 100644 tests/script/unique/cluster/main1_client1_3.sim diff --git a/tests/script/unique/cluster/client3.sim b/tests/script/unique/cluster/client3.sim new file mode 100644 index 0000000000..4b3024f881 --- /dev/null +++ b/tests/script/unique/cluster/client3.sim @@ -0,0 +1,55 @@ +$tblStart = 0 +$tblEnd = 10000 +$tsStart = 1325347200000 # 2012-01-01 00:00:00.000 +############################################################### + +sql connect + +$db = db1 +$stb = stb1 + +sql use $db + +loop_lable: +print ====================== client3 start loop: dynamic create table and insert data, select, drop table ............ + +$totalRows = 0 + +#sql create table $stb ( ts timestamp, c1 int) tags ( t1 int, t2 binary(16) ) +$tagBinary = ' . client3 +$tagBinary = $tagBinary . ' + +$rowsPerLoop = 100 +$ts = $tsStart + +$i = $tblStart +while $i < $tblEnd + $tb = dtb . $i + $x = 0 + while $x < $rowsPerLoop + sql insert into $tb using $stb tags ( $i , $tagBinary ) values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) + $x = $x + 20 + $ts = $ts + 40a + endw + + $totalRows = $totalRows + $x +# print ====== app insert totalRows: $totalRows + $i = $i + 1 +endw + +sql select count(*) from $stb where t2 == $tagBinary +if $data00 != $totalRows then + print data00 $data00 totalRows $totalRows + print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + print ************ client3 insert loss: $deltaRows ***** + print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +endi + +print ====================== client3 drop table +$i = $tblStart +while $i < $tblEnd + $tb = dtb . $i + sql drop table if exists $tb + $i = $i + 1 +endw +goto loop_lable \ No newline at end of file diff --git a/tests/script/unique/cluster/client4.sim b/tests/script/unique/cluster/client4.sim new file mode 100644 index 0000000000..a4c347695e --- /dev/null +++ b/tests/script/unique/cluster/client4.sim @@ -0,0 +1,113 @@ +$tblStart = 0 +$tblEnd = 2000 +$tsStart = 1325347200000 # 2012-01-01 00:00:00.000 +############################################################### + +sql connect + +loop_lable: +print ====================== client4 start loop: create db2, tables and insert data, select, drop db2 ............ + +$db = db2 +$stb = stb2 + +print create database if not exists $db replica 2 +sql create database if not exists $db replica 2 +sql use $db + +print ==== client4start create table +$i = $tblStart +while $i < $tblEnd + $tb = dtb . $i + sql create table $tb (ts timestamp, c1 int) + $i = $i + 1 +endw + + +print ==== client4start insert, include multi table data in one insert sql +$totalRows = 0 +$totalRowsPerTbl = 0 + +$rowsPerLoop = 100 +$ts = $tsStart + +$i = $tblStart +while $i < $tblEnd + $tb0 = dtb . $i + $i = $i + 1 + $tb1 = dtb . $i + $i = $i + 1 + $tb2 = dtb . $i + $i = $i + 1 + $tb3 = dtb . $i + $i = $i + 1 + $tb4 = dtb . $i + $i = $i + 1 + + $x = 0 + while $x < $rowsPerLoop + sql insert into $tb0 values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) $tb1 values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) $tb2 values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) $tb3 values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) $tb4 values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) + $x = $x + 20 + $ts = $ts + 40a + endw + + $totalRowsPerTbl = $totalRowsPerTbl + $x + $x = $x * 5 + $totalRows = $totalRows + $x +endw + +sql select count(*) from tb10 +if $data00 != $totalRowsPerTbl then + print data00 $data00 totalRowsPerTbl $totalRowsPerTbl + print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + print ************ client4 insert loss: $deltaRows ***** + print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +endi + + +print ====client4 start alter table +$i = $tblStart +while $i < $tblEnd + $tb = dtb . $i + sql alter table $tb add c2 float + $i = $i + 1 +endw + +print ====client4 continue insert, include multi table data in one insert sql + +$i = $tblStart +while $i < $tblEnd + $tb0 = dtb . $i + $i = $i + 1 + $tb1 = dtb . $i + $i = $i + 1 + $tb2 = dtb . $i + $i = $i + 1 + $tb3 = dtb . $i + $i = $i + 1 + $tb4 = dtb . $i + $i = $i + 1 + + $x = 0 + while $x < $rowsPerLoop + sql insert into $tb0 values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) $tb1 values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) $tb2 values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) $tb3 values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) $tb4 values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) + $x = $x + 20 + $ts = $ts + 40a + endw + + $totalRowsPerTbl = $totalRowsPerTbl + $x + $x = $x * 5 + $totalRows = $totalRows + $x +endw + +sql select count(*) from tb10 +if $data00 != $totalRowsPerTbl then + print data00 $data00 totalRowsPerTbl $totalRowsPerTbl + print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + print ************ client4 insert loss: $deltaRows ***** + print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +endi + +print ====================== client4 drop database +sql drop if exists database $db +goto loop_lable \ No newline at end of file diff --git a/tests/script/unique/cluster/client5.sim b/tests/script/unique/cluster/client5.sim new file mode 100644 index 0000000000..5960b28fdd --- /dev/null +++ b/tests/script/unique/cluster/client5.sim @@ -0,0 +1,113 @@ +$tblStart = 0 +$tblEnd = 2000 +$tsStart = 1325347200000 # 2012-01-01 00:00:00.000 +############################################################### + +sql connect + +loop_lable: +print ====================== client5 start loop query + +$db = db2 +$stb = stb2 + +print create database if not exists $db replica 2 +sql create database if not exists $db replica 2 +sql use $db + +print ==== client4start create table +$i = $tblStart +while $i < $tblEnd + $tb = dtb . $i + sql create table $tb (ts timestamp, c1 int) + $i = $i + 1 +endw + + +print ==== client4start insert, include multi table data in one insert sql +$totalRows = 0 +$totalRowsPerTbl = 0 + +$rowsPerLoop = 100 +$ts = $tsStart + +$i = $tblStart +while $i < $tblEnd + $tb0 = dtb . $i + $i = $i + 1 + $tb1 = dtb . $i + $i = $i + 1 + $tb2 = dtb . $i + $i = $i + 1 + $tb3 = dtb . $i + $i = $i + 1 + $tb4 = dtb . $i + $i = $i + 1 + + $x = 0 + while $x < $rowsPerLoop + sql insert into $tb0 values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) $tb1 values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) $tb2 values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) $tb3 values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) $tb4 values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) + $x = $x + 20 + $ts = $ts + 40a + endw + + $totalRowsPerTbl = $totalRowsPerTbl + $x + $x = $x * 5 + $totalRows = $totalRows + $x +endw + +sql select count(*) from tb10 +if $data00 != $totalRowsPerTbl then + print data00 $data00 totalRowsPerTbl $totalRowsPerTbl + print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + print ************ client4 insert loss: $deltaRows ***** + print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +endi + + +print ====client4 start alter table +$i = $tblStart +while $i < $tblEnd + $tb = dtb . $i + sql alter table $tb add c2 float + $i = $i + 1 +endw + +print ====client4 continue insert, include multi table data in one insert sql + +$i = $tblStart +while $i < $tblEnd + $tb0 = dtb . $i + $i = $i + 1 + $tb1 = dtb . $i + $i = $i + 1 + $tb2 = dtb . $i + $i = $i + 1 + $tb3 = dtb . $i + $i = $i + 1 + $tb4 = dtb . $i + $i = $i + 1 + + $x = 0 + while $x < $rowsPerLoop + sql insert into $tb0 values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) $tb1 values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) $tb2 values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) $tb3 values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) $tb4 values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) + $x = $x + 20 + $ts = $ts + 40a + endw + + $totalRowsPerTbl = $totalRowsPerTbl + $x + $x = $x * 5 + $totalRows = $totalRows + $x +endw + +sql select count(*) from tb10 +if $data00 != $totalRowsPerTbl then + print data00 $data00 totalRowsPerTbl $totalRowsPerTbl + print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + print ************ client4 insert loss: $deltaRows ***** + print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +endi + +print ====================== client4 drop database +sql drop if exists database $db +goto loop_lable \ No newline at end of file diff --git a/tests/script/unique/cluster/cluster_main.sim b/tests/script/unique/cluster/cluster_main.sim index 236f1aa59a..f3cce9fd45 100644 --- a/tests/script/unique/cluster/cluster_main.sim +++ b/tests/script/unique/cluster/cluster_main.sim @@ -69,13 +69,14 @@ sql use $db print ============== step3: create stable stb1 $stb = stb1 -sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(8)) +print create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(16)) +sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(16)) -print ============== step4: start 10 client1/ 10 client2/ 10 client3/ 10 client4/ 1 client5 +print ============== step4: start others client run_back unique/cluster/client1_0.sim -#run_back unique/cluster/client1_1.sim -#run_back unique/cluster/client1_2.sim -#run_back unique/cluster/client1_3.sim +run_back unique/cluster/client1_1.sim +run_back unique/cluster/client1_2.sim +run_back unique/cluster/client1_3.sim #run_back unique/cluster/client2_0.sim #run_back unique/cluster/client2_1.sim #run_back unique/cluster/client2_2.sim @@ -118,14 +119,16 @@ sql create dnode $hostname5 sleep 5000 -print ============== step6: stop and drop dnode1, then remove data dir of dnode1 +print ============== step6: stop dnode1 system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 5000 -sql drop dnode $hostname1 -sleep 5000 - -system rm -rf ../../../sim/dnode1/data -sleep 20000 +sleep 10000 +#sql drop dnode $hostname1 +#sleep 5000 +#system rm -rf ../../../sim/dnode1/data +#sleep 20000 +print ============== step6-1: restart dnode1 +system sh/exec.sh -n dnode1 -s start +sleep 10000 sql show mnodes print show mnodes @@ -139,7 +142,6 @@ print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6 print $data0_7 $data1_7 $data2_7 $data3_7 $data4_7 print $data0_8 $data1_8 $data2_8 $data3_8 $data4_8 print $data0_9 $data1_9 $data2_9 $data3_9 $data4_9 -return -1 print ============== step7: stop dnode2 system sh/exec.sh -n dnode2 -s stop -x SIGINT @@ -239,11 +241,12 @@ endi print ============== step14: stop and drop dnode4/dnode5, then remove data dir of dnode4/dnode5 system sh/exec.sh -n dnode4 -s stop -x SIGINT system sh/exec.sh -n dnode5 -s stop -x SIGINT -sleep 20000 +sleep 3000 sql drop dnode $hostname4 sql drop dnode $hostname5 system rm -rf ../../../sim/dnode4/data system rm -rf ../../../sim/dnode5/data +sleep 20000 print ============== step15: alter replica 1 sql alter database $db replica 1 @@ -263,13 +266,13 @@ if $data04 != 2 then return -1 endi -print ============== step17: start dnode1 and add into cluster, then wait dnode1 ready -system sh/cfg.sh -n dnode1 -c first -v $hostname2 -system sh/cfg.sh -n dnode1 -c second -v $hostname3 - -system sh/exec.sh -n dnode1 -s start -sql create dnode $hostname1 -sleep 20000 +#print ============== step17: start dnode1 and add into cluster, then wait dnode1 ready +#system sh/cfg.sh -n dnode1 -c first -v $hostname2 +#system sh/cfg.sh -n dnode1 -c second -v $hostname3 +# +#system sh/exec.sh -n dnode1 -s start +#sql create dnode $hostname1 +#sleep 20000 print ============== step18: alter replica 3 sql alter database $db replica 3 diff --git a/tests/script/unique/cluster/cluster_main1.sim b/tests/script/unique/cluster/cluster_main1.sim new file mode 100644 index 0000000000..343924dfee --- /dev/null +++ b/tests/script/unique/cluster/cluster_main1.sim @@ -0,0 +1,326 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/deploy.sh -n dnode2 -i 2 +system sh/deploy.sh -n dnode3 -i 3 +system sh/deploy.sh -n dnode4 -i 4 +system sh/deploy.sh -n dnode5 -i 5 + +system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3 +system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3 +system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3 +system sh/cfg.sh -n dnode4 -c numOfMnodes -v 3 +system sh/cfg.sh -n dnode5 -c numOfMnodes -v 3 + +system sh/cfg.sh -n dnode1 -c walLevel -v 1 +system sh/cfg.sh -n dnode2 -c walLevel -v 1 +system sh/cfg.sh -n dnode3 -c walLevel -v 1 +system sh/cfg.sh -n dnode4 -c walLevel -v 1 +system sh/cfg.sh -n dnode5 -c walLevel -v 1 + +system sh/cfg.sh -n dnode1 -c balanceInterval -v 10 +system sh/cfg.sh -n dnode2 -c balanceInterval -v 10 +system sh/cfg.sh -n dnode3 -c balanceInterval -v 10 +system sh/cfg.sh -n dnode4 -c balanceInterval -v 10 +system sh/cfg.sh -n dnode5 -c balanceInterval -v 10 + +system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 256 +system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 256 +system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 256 +system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 256 +system sh/cfg.sh -n dnode5 -c numOfTotalVnodes -v 256 + +system sh/cfg.sh -n dnode1 -c alternativeRole -v 0 +system sh/cfg.sh -n dnode2 -c alternativeRole -v 0 +system sh/cfg.sh -n dnode3 -c alternativeRole -v 0 +system sh/cfg.sh -n dnode4 -c alternativeRole -v 0 +system sh/cfg.sh -n dnode5 -c alternativeRole -v 0 + +system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 5000 +system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 5000 +system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 5000 +system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 5000 +system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 5000 + +system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator +system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator +system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator +system sh/cfg.sh -n dnode4 -c arbitrator -v $arbitrator +system sh/cfg.sh -n dnode5 -c arbitrator -v $arbitrator + +print ============== step0: start tarbitrator +system sh/exec_tarbitrator.sh -s start + +print ============== step1: start dnode1/dnode2/dnode3 +system sh/exec.sh -n dnode1 -s start +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start +sleep 3000 +sql connect +sql create dnode $hostname2 +sql create dnode $hostname3 +sleep 3000 + +print ============== step2: create db1 with replica 3 +$replica = 3 +$db = db1 +print create database $db replica $replica +#sql create database $db replica 3 maxTables $totalTableNum +sql create database $db replica $replica +sql use $db + +print ============== step3: create stable stb1 +$stb = stb1 +print create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(16)) +sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(16)) + +print ============== step4: start others client +run_back unique/cluster/main1_client1_0.sim +run_back unique/cluster/main1_client1_1.sim +run_back unique/cluster/main1_client1_2.sim +run_back unique/cluster/main1_client1_3.sim +run_back unique/cluster/client3.sim +run_back unique/cluster/client4.sim + +sleep 20000 + +wait_subsim_insert_complete_create_tables: +sql select count(tbname) from $stb +print select count(tbname) from $stb +print data00 $data00 +if $data00 < 1000 then + sleep 3000 + goto wait_subsim_insert_complete_create_tables +endi + +wait_subsim_insert_data: +print select count(*) from $stb +sql select count(*) from $stb +print data00 $data00 +if $data00 < 1000 then + sleep 3000 + goto wait_subsim_insert_data +endi + +print wait for a while to let clients start insert data +sleep 5000 + +$loop_cnt = 0 +loop_cluster_do: +print **** **** **** START loop cluster do (loop_cnt: $loop_cnt )**** **** **** **** +print ============== step5: start dnode4/dnode5 and add into cluster, then wait ready +system sh/exec.sh -n dnode4 -s start +system sh/exec.sh -n dnode5 -s start +sql create dnode $hostname4 +sql create dnode $hostname5 +sleep 5000 + +print ============== step6: stop dnode1 +system sh/exec.sh -n dnode1 -s stop -x SIGINT +sleep 10000 +#sql drop dnode $hostname1 +#sleep 5000 +#system rm -rf ../../../sim/dnode1/data +#sleep 20000 +print ============== step6-1: restart dnode1 +system sh/exec.sh -n dnode1 -s start +sleep 10000 + +sql show mnodes +print show mnodes +print rows: $rows +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 +print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 +print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5 +print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6 +print $data0_7 $data1_7 $data2_7 $data3_7 $data4_7 +print $data0_8 $data1_8 $data2_8 $data3_8 $data4_8 +print $data0_9 $data1_9 $data2_9 $data3_9 $data4_9 + +print ============== step7: stop dnode2 +system sh/exec.sh -n dnode2 -s stop -x SIGINT +sleep 5000 + +sql show mnodes +print show mnodes +print rows: $rows +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 +print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 +print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5 +print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6 +print $data0_7 $data1_7 $data2_7 $data3_7 $data4_7 +print $data0_8 $data1_8 $data2_8 $data3_8 $data4_8 +print $data0_9 $data1_9 $data2_9 $data3_9 $data4_9 + +print ============== step8: restart dnode2, then wait sync end +system sh/exec.sh -n dnode2 -s start +sleep 20000 +sql show mnodes +print show mnodes +print rows: $rows +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 +print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 +print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5 +print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6 +print $data0_7 $data1_7 $data2_7 $data3_7 $data4_7 +print $data0_8 $data1_8 $data2_8 $data3_8 $data4_8 +print $data0_9 $data1_9 $data2_9 $data3_9 $data4_9 + +print ============== step9: stop dnode3, then wait sync end +system sh/exec.sh -n dnode3 -s stop -x SIGINT +sleep 20000 +sql show mnodes +print show mnodes +print rows: $rows +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 +print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 +print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5 +print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6 +print $data0_7 $data1_7 $data2_7 $data3_7 $data4_7 +print $data0_8 $data1_8 $data2_8 $data3_8 $data4_8 +print $data0_9 $data1_9 $data2_9 $data3_9 $data4_9 + +print ============== step10: restart dnode3, then wait sync end +system sh/exec.sh -n dnode3 -s start +sleep 20000 +sql show mnodes +print show mnodes +print rows: $rows +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 +print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 +print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5 +print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6 +print $data0_7 $data1_7 $data2_7 $data3_7 $data4_7 +print $data0_8 $data1_8 $data2_8 $data3_8 $data4_8 +print $data0_9 $data1_9 $data2_9 $data3_9 $data4_9 + +print ============== step11: stop dnode4, then wait sync end +system sh/exec.sh -n dnode4 -s stop -x SIGINT +sleep 20000 + +print ============== step12: restart dnode4, then wait sync end +system sh/exec.sh -n dnode4 -s start +sleep 20000 +sql show mnodes +print show mnodes +print rows: $rows +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 +print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 +print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5 +print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6 +print $data0_7 $data1_7 $data2_7 $data3_7 $data4_7 +print $data0_8 $data1_8 $data2_8 $data3_8 $data4_8 +print $data0_9 $data1_9 $data2_9 $data3_9 $data4_9 + +print ============== step13: alter replica 2 +sql alter database $db replica 2 +sql show databases +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09 +print $data10 $data11 $data12 $data13 $data14 $data15 $data16 $data17 $data18 $data19 + +if $data00 == db1 then + $replica = $data04 +elif $data10 == db1 then + $replica = $data14 +else then + print ==== db1 already not exists!!!!! + return -1 +endi + +if $replica != 2 then + print rplica is not modify to 2, error!!!!!! + return -1 +endi + +print ============== step14: drop dnode4, then remove data dir +sql drop dnode $hostname4 +sleep 20000 +system rm -rf ../../../sim/dnode4/data + +print ============== step14-1: drop dnode5, then remove data dir +sql drop dnode $hostname5 +sleep 20000 +system rm -rf ../../../sim/dnode5/data + +print ============== step15: alter replica 1 +sql alter database $db replica 1 +sql show databases +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09 +print $data10 $data11 $data12 $data13 $data14 $data15 $data16 $data17 $data18 $data19 + +if $data00 == db1 then + $replica = $data04 +elif $data10 == db1 then + $replica = $data14 +else then + print ==== db1 already not exists!!!!! + return -1 +endi + +if $replica != 1 then + print rplica is not modify to 1, error!!!!!! + return -1 +endi + +print ============== step16: alter replica 2 +sql alter database $db replica 2 +sql show databases +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09 +print $data10 $data11 $data12 $data13 $data14 $data15 $data16 $data17 $data18 $data19 + +if $data00 == db1 then + $replica = $data04 +elif $data10 == db1 then + $replica = $data14 +else then + print ==== db1 already not exists!!!!! + return -1 +endi + +if $replica != 2 then + print rplica is not modify to 2, error!!!!!! + return -1 +endi + +#print ============== step17: start dnode1 and add into cluster, then wait dnode1 ready +#system sh/cfg.sh -n dnode1 -c first -v $hostname2 +#system sh/cfg.sh -n dnode1 -c second -v $hostname3 +# +#system sh/exec.sh -n dnode1 -s start +#sql create dnode $hostname1 +#sleep 20000 + +print ============== step18: alter replica 3 +sql alter database $db replica 3 +sql show databases +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09 +print $data10 $data11 $data12 $data13 $data14 $data15 $data16 $data17 $data18 $data19 + +if $data00 == db1 then + $replica = $data04 +elif $data10 == db1 then + $replica = $data14 +else then + print ==== db1 already not exists!!!!! + return -1 +endi + +if $replica != 3 then + print rplica is not modify to 3, error!!!!!! + return -1 +endi + +print **** **** **** (loop_cnt: $loop_cnt ) end, continue...... **** **** **** **** +$loop_cnt = $loop_cnt + 1 +goto loop_cluster_do diff --git a/tests/script/unique/cluster/cluster_main2.sim b/tests/script/unique/cluster/cluster_main2.sim new file mode 100644 index 0000000000..83934da457 --- /dev/null +++ b/tests/script/unique/cluster/cluster_main2.sim @@ -0,0 +1,330 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/deploy.sh -n dnode2 -i 2 +system sh/deploy.sh -n dnode3 -i 3 +system sh/deploy.sh -n dnode4 -i 4 +system sh/deploy.sh -n dnode5 -i 5 + +system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3 +system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3 +system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3 +system sh/cfg.sh -n dnode4 -c numOfMnodes -v 3 +system sh/cfg.sh -n dnode5 -c numOfMnodes -v 3 + +system sh/cfg.sh -n dnode1 -c walLevel -v 1 +system sh/cfg.sh -n dnode2 -c walLevel -v 1 +system sh/cfg.sh -n dnode3 -c walLevel -v 1 +system sh/cfg.sh -n dnode4 -c walLevel -v 1 +system sh/cfg.sh -n dnode5 -c walLevel -v 1 + +system sh/cfg.sh -n dnode1 -c balanceInterval -v 10 +system sh/cfg.sh -n dnode2 -c balanceInterval -v 10 +system sh/cfg.sh -n dnode3 -c balanceInterval -v 10 +system sh/cfg.sh -n dnode4 -c balanceInterval -v 10 +system sh/cfg.sh -n dnode5 -c balanceInterval -v 10 + +system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 256 +system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 256 +system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 256 +system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 256 +system sh/cfg.sh -n dnode5 -c numOfTotalVnodes -v 256 + +system sh/cfg.sh -n dnode1 -c alternativeRole -v 0 +system sh/cfg.sh -n dnode2 -c alternativeRole -v 0 +system sh/cfg.sh -n dnode3 -c alternativeRole -v 0 +system sh/cfg.sh -n dnode4 -c alternativeRole -v 0 +system sh/cfg.sh -n dnode5 -c alternativeRole -v 0 + +system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 5000 +system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 5000 +system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 5000 +system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 5000 +system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 5000 + +system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator +system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator +system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator +system sh/cfg.sh -n dnode4 -c arbitrator -v $arbitrator +system sh/cfg.sh -n dnode5 -c arbitrator -v $arbitrator + +print ============== step0: start tarbitrator +system sh/exec_tarbitrator.sh -s start + +print ============== step1: start dnode1/dnode2/dnode3 +system sh/exec.sh -n dnode1 -s start +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start +sleep 3000 +sql connect +sql create dnode $hostname2 +sql create dnode $hostname3 +sleep 3000 + +print ============== step2: create db1 with replica 3 +$replica = 3 +$db = db1 +print create database $db replica $replica +#sql create database $db replica 3 maxTables $totalTableNum +sql create database $db replica $replica +sql use $db + +print ============== step3: create stable stb1 +$stb = stb1 +print create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(16)) +sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(16)) + +print ============== step4: start others client +run_back unique/cluster/main2_client1_0.sim +run_back unique/cluster/main2_client1_1.sim +run_back unique/cluster/main2_client1_2.sim +run_back unique/cluster/main2_client1_3.sim +run_back unique/cluster/main2_client2_0.sim +run_back unique/cluster/main2_client2_1.sim +run_back unique/cluster/main2_client2_2.sim +run_back unique/cluster/main2_client2_3.sim +run_back unique/cluster/client3.sim +run_back unique/cluster/client4.sim + +sleep 20000 + +wait_subsim_insert_complete_create_tables: +sql select count(tbname) from $stb +print select count(tbname) from $stb +print data00 $data00 +if $data00 < 1000 then + sleep 3000 + goto wait_subsim_insert_complete_create_tables +endi + +wait_subsim_insert_data: +print select count(*) from $stb +sql select count(*) from $stb +print data00 $data00 +if $data00 < 1000 then + sleep 3000 + goto wait_subsim_insert_data +endi + +print wait for a while to let clients start insert data +sleep 5000 + +$loop_cnt = 0 +loop_cluster_do: +print **** **** **** START loop cluster do (loop_cnt: $loop_cnt )**** **** **** **** +print ============== step5: start dnode4/dnode5 and add into cluster, then wait ready +system sh/exec.sh -n dnode4 -s start +system sh/exec.sh -n dnode5 -s start +sql create dnode $hostname4 +sql create dnode $hostname5 +sleep 5000 + +print ============== step6: stop dnode1 +system sh/exec.sh -n dnode1 -s stop -x SIGINT +sleep 10000 +#sql drop dnode $hostname1 +#sleep 5000 +#system rm -rf ../../../sim/dnode1/data +#sleep 20000 +print ============== step6-1: restart dnode1 +system sh/exec.sh -n dnode1 -s start +sleep 10000 + +sql show mnodes +print show mnodes +print rows: $rows +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 +print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 +print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5 +print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6 +print $data0_7 $data1_7 $data2_7 $data3_7 $data4_7 +print $data0_8 $data1_8 $data2_8 $data3_8 $data4_8 +print $data0_9 $data1_9 $data2_9 $data3_9 $data4_9 + +print ============== step7: stop dnode2 +system sh/exec.sh -n dnode2 -s stop -x SIGINT +sleep 5000 + +sql show mnodes +print show mnodes +print rows: $rows +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 +print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 +print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5 +print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6 +print $data0_7 $data1_7 $data2_7 $data3_7 $data4_7 +print $data0_8 $data1_8 $data2_8 $data3_8 $data4_8 +print $data0_9 $data1_9 $data2_9 $data3_9 $data4_9 + +print ============== step8: restart dnode2, then wait sync end +system sh/exec.sh -n dnode2 -s start +sleep 20000 +sql show mnodes +print show mnodes +print rows: $rows +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 +print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 +print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5 +print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6 +print $data0_7 $data1_7 $data2_7 $data3_7 $data4_7 +print $data0_8 $data1_8 $data2_8 $data3_8 $data4_8 +print $data0_9 $data1_9 $data2_9 $data3_9 $data4_9 + +print ============== step9: stop dnode3, then wait sync end +system sh/exec.sh -n dnode3 -s stop -x SIGINT +sleep 20000 +sql show mnodes +print show mnodes +print rows: $rows +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 +print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 +print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5 +print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6 +print $data0_7 $data1_7 $data2_7 $data3_7 $data4_7 +print $data0_8 $data1_8 $data2_8 $data3_8 $data4_8 +print $data0_9 $data1_9 $data2_9 $data3_9 $data4_9 + +print ============== step10: restart dnode3, then wait sync end +system sh/exec.sh -n dnode3 -s start +sleep 20000 +sql show mnodes +print show mnodes +print rows: $rows +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 +print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 +print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5 +print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6 +print $data0_7 $data1_7 $data2_7 $data3_7 $data4_7 +print $data0_8 $data1_8 $data2_8 $data3_8 $data4_8 +print $data0_9 $data1_9 $data2_9 $data3_9 $data4_9 + +print ============== step11: stop dnode4, then wait sync end +system sh/exec.sh -n dnode4 -s stop -x SIGINT +sleep 20000 + +print ============== step12: restart dnode4, then wait sync end +system sh/exec.sh -n dnode4 -s start +sleep 20000 +sql show mnodes +print show mnodes +print rows: $rows +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 +print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 +print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5 +print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6 +print $data0_7 $data1_7 $data2_7 $data3_7 $data4_7 +print $data0_8 $data1_8 $data2_8 $data3_8 $data4_8 +print $data0_9 $data1_9 $data2_9 $data3_9 $data4_9 + +print ============== step13: alter replica 2 +sql alter database $db replica 2 +sql show databases +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09 +print $data10 $data11 $data12 $data13 $data14 $data15 $data16 $data17 $data18 $data19 + +if $data00 == db1 then + $replica = $data04 +elif $data10 == db1 then + $replica = $data14 +else then + print ==== db1 already not exists!!!!! + return -1 +endi + +if $replica != 2 then + print rplica is not modify to 2, error!!!!!! + return -1 +endi + +print ============== step14: drop dnode4, then remove data dir +sql drop dnode $hostname4 +sleep 20000 +system rm -rf ../../../sim/dnode4/data + +print ============== step14-1: drop dnode5, then remove data dir +sql drop dnode $hostname5 +sleep 20000 +system rm -rf ../../../sim/dnode5/data + +print ============== step15: alter replica 1 +sql alter database $db replica 1 +sql show databases +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09 +print $data10 $data11 $data12 $data13 $data14 $data15 $data16 $data17 $data18 $data19 + +if $data00 == db1 then + $replica = $data04 +elif $data10 == db1 then + $replica = $data14 +else then + print ==== db1 already not exists!!!!! + return -1 +endi + +if $replica != 1 then + print rplica is not modify to 1, error!!!!!! + return -1 +endi + +print ============== step16: alter replica 2 +sql alter database $db replica 2 +sql show databases +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09 +print $data10 $data11 $data12 $data13 $data14 $data15 $data16 $data17 $data18 $data19 + +if $data00 == db1 then + $replica = $data04 +elif $data10 == db1 then + $replica = $data14 +else then + print ==== db1 already not exists!!!!! + return -1 +endi + +if $replica != 2 then + print rplica is not modify to 2, error!!!!!! + return -1 +endi + +#print ============== step17: start dnode1 and add into cluster, then wait dnode1 ready +#system sh/cfg.sh -n dnode1 -c first -v $hostname2 +#system sh/cfg.sh -n dnode1 -c second -v $hostname3 +# +#system sh/exec.sh -n dnode1 -s start +#sql create dnode $hostname1 +#sleep 20000 + +print ============== step18: alter replica 3 +sql alter database $db replica 3 +sql show databases +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09 +print $data10 $data11 $data12 $data13 $data14 $data15 $data16 $data17 $data18 $data19 + +if $data00 == db1 then + $replica = $data04 +elif $data10 == db1 then + $replica = $data14 +else then + print ==== db1 already not exists!!!!! + return -1 +endi + +if $replica != 3 then + print rplica is not modify to 3, error!!!!!! + return -1 +endi + +print **** **** **** (loop_cnt: $loop_cnt ) end, continue...... **** **** **** **** +$loop_cnt = $loop_cnt + 1 +goto loop_cluster_do diff --git a/tests/script/unique/cluster/main1_client1_0.sim b/tests/script/unique/cluster/main1_client1_0.sim new file mode 100644 index 0000000000..d4f2aa4294 --- /dev/null +++ b/tests/script/unique/cluster/main1_client1_0.sim @@ -0,0 +1,94 @@ +#system sh/stop_dnodes.sh +#system sh/deploy.sh -n dnode1 -i 1 +#system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 10000 +#system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 256 +#system sh/exec.sh -n dnode1 -s start +#sql connect +#$db = db1 +#sql create database $db +#sql use $db +#$stb = stb1 +#sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(8)) + + +$tblStart = 0 +$tblEnd = 1000 +$tsStart = 1325347200000 # 2012-01-01 00:00:00.000 +############################################################### + +sql connect + +$db = db1 +$stb = stb1 + +sql use $db + +######sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(8)) +$tagBinary = ' . client1_0 +$tagBinary = $tagBinary . ' +#print ======= tag: $tagBinary + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i +# print create table if not exists $tb using $stb tags ( $i , $tagBinary ) + sql create table if not exists $tb using $stb tags ( $i , $tagBinary ) + $i = $i + 1 +endw + +print ====================== client1_0 create table end, start insert data ............ +sql select count(tbname) from $stb +print select count(tbname) from $stb +print data00 $data00 + +$rowsPerLoop = 100 +$ts = $tsStart + +$lastLossRows = 0 + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $x = 0 + while $x < $rowsPerLoop + sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) -x insert_error_loop + $x = $x + 20 + $ts = $ts + 40a + goto continue_next_0 + insert_error_loop: + print ============== main1_client1_0 run error: sql insert into $tb values ( $ts + 0a , $x ) ... ... + continue_next_0: + endw + + $totalRows = $totalRows + $x + $i = $i + 1 + + if $i == $tblEnd then + $i = $tblStart + + sql select count(*) from $stb where t2 == $tagBinary -x query_error_loop + if $data00 != $totalRows then + print data00 $data00 totalRows $totalRows + $deltaRows = $totalRows - $data00 + if $lastLossRows == 0 then + $lastLossRows = $deltaRows + print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + print ************ client1_0 insert loss: $deltaRows ***** + print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + elif $deltaRows != $lastLossRows + $tmp = $deltaRows - $lastLossRows + print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + print ************ client1_0 insert loss: $tmp *********** + print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + $lastLossRows = $deltaRows + endi +# return -1 + endi + goto continue_next_1 + query_error_loop: + print ============== main1_client1_0 run error: sql select count(*) from $stb where t2 == $tagBinary -x query_error_loop + continue_next_1: + print ====================== client1_0 insert data complete once ............ + endi +endw +print ====================== client1_0 success and auto end ===================== \ No newline at end of file diff --git a/tests/script/unique/cluster/main1_client1_1.sim b/tests/script/unique/cluster/main1_client1_1.sim new file mode 100644 index 0000000000..b50f533ce1 --- /dev/null +++ b/tests/script/unique/cluster/main1_client1_1.sim @@ -0,0 +1,82 @@ +$tblStart = 1000 +$tblEnd = 2000 +$tsStart = 1325347200000 # 2012-01-01 00:00:00.000 +############################################################### + +sql connect + +$db = db1 +$stb = stb1 + +sql use $db + + +######sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(8)) +$tagBinary = ' . client1_1 +$tagBinary = $tagBinary . ' +#print ======= tag: $tagBinary + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i +# print create table if not exists $tb using $stb tags ( $i , $tagBinary ) + sql create table if not exists $tb using $stb tags ( $i , $tagBinary ) + $i = $i + 1 +endw + +print ====================== client1_1 create table end, start insert data ............ +sql select count(tbname) from $stb +print select count(tbname) from $stb +print data00 $data00 + +$rowsPerLoop = 100 +$ts = $tsStart + +$lastLossRows = 0 + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $x = 0 + while $x < $rowsPerLoop + sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) -x insert_error_loop + $x = $x + 20 + $ts = $ts + 40a + goto continue_next_0 + insert_error_loop: + print ============== main1_client1_1 run error: sql insert into $tb values ( $ts + 0a , $x ) ... ... + continue_next_0: + endw + + $totalRows = $totalRows + $x + $i = $i + 1 + + if $i == $tblEnd then + $i = $tblStart + + sql select count(*) from $stb where t2 == $tagBinary -x query_error_loop + if $data00 != $totalRows then + print data00 $data00 totalRows $totalRows + $deltaRows = $totalRows - $data00 + if $lastLossRows == 0 then + $lastLossRows = $deltaRows + print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + print ************ client1_1 insert loss: $deltaRows ***** + print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + elif $deltaRows != $lastLossRows + $tmp = $deltaRows - $lastLossRows + print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + print ************ client1_1 insert loss: $tmp *********** + print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + $lastLossRows = $deltaRows + endi +# return -1 + endi + goto continue_next_1 + query_error_loop: + print ============== main1_client1_1 run error: sql select count(*) from $stb where t2 == $tagBinary -x query_error_loop + continue_next_1: + print ====================== client1_2 insert data complete once ............ + endi +endw +print ====================== client1_1 success and auto end ===================== \ No newline at end of file diff --git a/tests/script/unique/cluster/main1_client1_2.sim b/tests/script/unique/cluster/main1_client1_2.sim new file mode 100644 index 0000000000..8cc39ded04 --- /dev/null +++ b/tests/script/unique/cluster/main1_client1_2.sim @@ -0,0 +1,81 @@ +$tblStart = 2000 +$tblEnd = 3000 +$tsStart = 1325347200000 # 2012-01-01 00:00:00.000 +############################################################### + +sql connect + +$db = db1 +$stb = stb1 + +sql use $db + +######sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(8)) +$tagBinary = ' . client1_2 +$tagBinary = $tagBinary . ' +#print ======= tag: $tagBinary + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i +# print create table if not exists $tb using $stb tags ( $i , $tagBinary ) + sql create table if not exists $tb using $stb tags ( $i , $tagBinary ) + $i = $i + 1 +endw + +print ====================== client1_2 create table end, start insert data ............ +sql select count(tbname) from $stb +print select count(tbname) from $stb +print data00 $data00 + +$rowsPerLoop = 100 +$ts = $tsStart + +$lastLossRows = 0 + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $x = 0 + while $x < $rowsPerLoop + sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) -x insert_error_loop + $x = $x + 20 + $ts = $ts + 40a + goto continue_next_0 + insert_error_loop: + print ============== main1_client1_2 run error: sql insert into $tb values ( $ts + 0a , $x ) ... ... + continue_next_0: + endw + + $totalRows = $totalRows + $x + $i = $i + 1 + + if $i == $tblEnd then + $i = $tblStart + + sql select count(*) from $stb where t2 == $tagBinary -x query_error_loop + if $data00 != $totalRows then + print data00 $data00 totalRows $totalRows + $deltaRows = $totalRows - $data00 + if $lastLossRows == 0 then + $lastLossRows = $deltaRows + print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + print ************ client1_2 insert loss: $deltaRows ***** + print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + elif $deltaRows != $lastLossRows + $tmp = $deltaRows - $lastLossRows + print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + print ************ client1_2 insert loss: $tmp *********** + print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + $lastLossRows = $deltaRows + endi +# return -1 + endi + goto continue_next_1 + query_error_loop: + print ============== main1_client1_2 run error: sql select count(*) from $stb where t2 == $tagBinary -x query_error_loop + continue_next_1: + print ====================== client1_2 insert data complete once ............ + endi +endw +print ====================== client1_2 success and auto end ===================== \ No newline at end of file diff --git a/tests/script/unique/cluster/main1_client1_3.sim b/tests/script/unique/cluster/main1_client1_3.sim new file mode 100644 index 0000000000..72e1be48ac --- /dev/null +++ b/tests/script/unique/cluster/main1_client1_3.sim @@ -0,0 +1,81 @@ +$tblStart = 3000 +$tblEnd = 4000 +$tsStart = 1325347200000 # 2012-01-01 00:00:00.000 +############################################################### + +sql connect + +$db = db1 +$stb = stb1 + +sql use $db + +######sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(8)) +$tagBinary = ' . client1_3 +$tagBinary = $tagBinary . ' +#print ======= tag: $tagBinary + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i +# print create table if not exists $tb using $stb tags ( $i , $tagBinary ) + sql create table if not exists $tb using $stb tags ( $i , $tagBinary ) + $i = $i + 1 +endw + +print ====================== client1_3 create table end, start insert data ............ +sql select count(tbname) from $stb +print select count(tbname) from $stb +print data00 $data00 + +$rowsPerLoop = 100 +$ts = $tsStart + +$lastLossRows = 0 + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $x = 0 + while $x < $rowsPerLoop + sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) -x insert_error_loop + $x = $x + 20 + $ts = $ts + 40a + goto continue_next_0 + insert_error_loop: + print ============== main1_client1_3 run error: sql insert into $tb values ( $ts + 0a , $x ) ... ... + continue_next_0: + endw + + $totalRows = $totalRows + $x + $i = $i + 1 + + if $i == $tblEnd then + $i = $tblStart + + sql select count(*) from $stb where t2 == $tagBinary -x query_error_loop + if $data00 != $totalRows then + print data00 $data00 totalRows $totalRows + $deltaRows = $totalRows - $data00 + if $lastLossRows == 0 then + $lastLossRows = $deltaRows + print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + print ************ client1_3 insert loss: $deltaRows ***** + print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + elif $deltaRows != $lastLossRows + $tmp = $deltaRows - $lastLossRows + print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + print ************ client1_3 insert loss: $tmp *********** + print ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + $lastLossRows = $deltaRows + endi +# return -1 + endi + goto continue_next_1 + query_error_loop: + print ============== main1_client1_3 run error: sql select count(*) from $stb where t2 == $tagBinary -x query_error_loop + continue_next_1: + print ====================== client1_3 insert data complete once ............ + endi +endw +print ====================== client1_3 success and auto end ===================== \ No newline at end of file From a08c7405c06151cfd0ade3ad64e6f78591453ad2 Mon Sep 17 00:00:00 2001 From: Hui Li Date: Mon, 13 Jul 2020 15:07:11 +0800 Subject: [PATCH 07/14] [add cluster sim cases] --- tests/script/unique/cluster/cluster_main0.sim | 288 ++++++++++++++++++ tests/script/unique/cluster/main1_client4.sim | 127 ++++++++ .../script/unique/cluster/main2_client1_0.sim | 69 +++++ .../script/unique/cluster/main2_client1_1.sim | 57 ++++ .../script/unique/cluster/main2_client1_2.sim | 56 ++++ .../script/unique/cluster/main2_client1_3.sim | 56 ++++ .../script/unique/cluster/main2_client2_0.sim | 56 ++++ .../script/unique/cluster/main2_client2_1.sim | 57 ++++ .../script/unique/cluster/main2_client2_2.sim | 57 ++++ .../script/unique/cluster/main2_client2_3.sim | 57 ++++ 10 files changed, 880 insertions(+) create mode 100644 tests/script/unique/cluster/cluster_main0.sim create mode 100644 tests/script/unique/cluster/main1_client4.sim create mode 100644 tests/script/unique/cluster/main2_client1_0.sim create mode 100644 tests/script/unique/cluster/main2_client1_1.sim create mode 100644 tests/script/unique/cluster/main2_client1_2.sim create mode 100644 tests/script/unique/cluster/main2_client1_3.sim create mode 100644 tests/script/unique/cluster/main2_client2_0.sim create mode 100644 tests/script/unique/cluster/main2_client2_1.sim create mode 100644 tests/script/unique/cluster/main2_client2_2.sim create mode 100644 tests/script/unique/cluster/main2_client2_3.sim diff --git a/tests/script/unique/cluster/cluster_main0.sim b/tests/script/unique/cluster/cluster_main0.sim new file mode 100644 index 0000000000..f3cce9fd45 --- /dev/null +++ b/tests/script/unique/cluster/cluster_main0.sim @@ -0,0 +1,288 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/deploy.sh -n dnode2 -i 2 +system sh/deploy.sh -n dnode3 -i 3 +system sh/deploy.sh -n dnode4 -i 4 +system sh/deploy.sh -n dnode5 -i 5 + +system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3 +system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3 +system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3 +system sh/cfg.sh -n dnode4 -c numOfMnodes -v 3 +system sh/cfg.sh -n dnode5 -c numOfMnodes -v 3 + +system sh/cfg.sh -n dnode1 -c walLevel -v 1 +system sh/cfg.sh -n dnode2 -c walLevel -v 1 +system sh/cfg.sh -n dnode3 -c walLevel -v 1 +system sh/cfg.sh -n dnode4 -c walLevel -v 1 +system sh/cfg.sh -n dnode5 -c walLevel -v 1 + +system sh/cfg.sh -n dnode1 -c balanceInterval -v 10 +system sh/cfg.sh -n dnode2 -c balanceInterval -v 10 +system sh/cfg.sh -n dnode3 -c balanceInterval -v 10 +system sh/cfg.sh -n dnode4 -c balanceInterval -v 10 +system sh/cfg.sh -n dnode5 -c balanceInterval -v 10 + +system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 256 +system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 256 +system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 256 +system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 256 +system sh/cfg.sh -n dnode5 -c numOfTotalVnodes -v 256 + +system sh/cfg.sh -n dnode1 -c alternativeRole -v 0 +system sh/cfg.sh -n dnode2 -c alternativeRole -v 0 +system sh/cfg.sh -n dnode3 -c alternativeRole -v 0 +system sh/cfg.sh -n dnode4 -c alternativeRole -v 0 +system sh/cfg.sh -n dnode5 -c alternativeRole -v 0 + +system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 5000 +system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 5000 +system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 5000 +system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 5000 +system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 5000 + +system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator +system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator +system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator +system sh/cfg.sh -n dnode4 -c arbitrator -v $arbitrator +system sh/cfg.sh -n dnode5 -c arbitrator -v $arbitrator + +print ============== step0: start tarbitrator +system sh/exec_tarbitrator.sh -s start + +print ============== step1: start dnode1/dnode2/dnode3 +system sh/exec.sh -n dnode1 -s start +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start +sleep 3000 +sql connect +sql create dnode $hostname2 +sql create dnode $hostname3 +sleep 3000 + +print ============== step2: create db1 with replica 3 +$db = db1 +print create database $db replica 3 +#sql create database $db replica 3 maxTables $totalTableNum +sql create database $db replica 3 +sql use $db + +print ============== step3: create stable stb1 +$stb = stb1 +print create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(16)) +sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(16)) + +print ============== step4: start others client +run_back unique/cluster/client1_0.sim +run_back unique/cluster/client1_1.sim +run_back unique/cluster/client1_2.sim +run_back unique/cluster/client1_3.sim +#run_back unique/cluster/client2_0.sim +#run_back unique/cluster/client2_1.sim +#run_back unique/cluster/client2_2.sim +#run_back unique/cluster/client2_3.sim +#run_back unique/cluster/client3.sim +#run_back unique/cluster/client4.sim + +sleep 20000 + +wait_subsim_insert_complete_create_tables: +sql select count(tbname) from $stb +print select count(tbname) from $stb +print data00 $data00 +if $data00 < 1000 then + sleep 3000 + goto wait_subsim_insert_complete_create_tables +endi + +wait_subsim_insert_data: +print select count(*) from $stb +sql select count(*) from $stb +print data00 $data00 +if $data00 < 1000 then + sleep 3000 + goto wait_subsim_insert_data +endi + +print wait for a while to let clients start insert data +sleep 5000 + +$loop_cnt = 0 +loop_cluster_do: +print **** **** **** START loop cluster do (loop_cnt: $loop_cnt )**** **** **** **** +print ============== step5: start dnode4/dnode5 and add into cluster, then wait ready +system sh/exec.sh -n dnode4 -s start +system sh/exec.sh -n dnode5 -s start +sql create dnode $hostname4 +sql create dnode $hostname5 + +sleep 5000 + + +print ============== step6: stop dnode1 +system sh/exec.sh -n dnode1 -s stop -x SIGINT +sleep 10000 +#sql drop dnode $hostname1 +#sleep 5000 +#system rm -rf ../../../sim/dnode1/data +#sleep 20000 +print ============== step6-1: restart dnode1 +system sh/exec.sh -n dnode1 -s start +sleep 10000 + +sql show mnodes +print show mnodes +print rows: $rows +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 +print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 +print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5 +print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6 +print $data0_7 $data1_7 $data2_7 $data3_7 $data4_7 +print $data0_8 $data1_8 $data2_8 $data3_8 $data4_8 +print $data0_9 $data1_9 $data2_9 $data3_9 $data4_9 + +print ============== step7: stop dnode2 +system sh/exec.sh -n dnode2 -s stop -x SIGINT +sleep 5000 + +sql show mnodes +print show mnodes +print rows: $rows +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 +print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 +print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5 +print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6 +print $data0_7 $data1_7 $data2_7 $data3_7 $data4_7 +print $data0_8 $data1_8 $data2_8 $data3_8 $data4_8 +print $data0_9 $data1_9 $data2_9 $data3_9 $data4_9 + +print ============== step8: restart dnode2, then wait sync end +system sh/exec.sh -n dnode2 -s start +sleep 20000 +sql show mnodes +print show mnodes +print rows: $rows +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 +print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 +print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5 +print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6 +print $data0_7 $data1_7 $data2_7 $data3_7 $data4_7 +print $data0_8 $data1_8 $data2_8 $data3_8 $data4_8 +print $data0_9 $data1_9 $data2_9 $data3_9 $data4_9 + +print ============== step9: stop dnode3, then wait sync end +system sh/exec.sh -n dnode3 -s stop -x SIGINT +sleep 20000 +sql show mnodes +print show mnodes +print rows: $rows +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 +print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 +print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5 +print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6 +print $data0_7 $data1_7 $data2_7 $data3_7 $data4_7 +print $data0_8 $data1_8 $data2_8 $data3_8 $data4_8 +print $data0_9 $data1_9 $data2_9 $data3_9 $data4_9 + +print ============== step10: restart dnode3, then wait sync end +system sh/exec.sh -n dnode3 -s start +sleep 20000 +sql show mnodes +print show mnodes +print rows: $rows +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 +print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 +print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5 +print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6 +print $data0_7 $data1_7 $data2_7 $data3_7 $data4_7 +print $data0_8 $data1_8 $data2_8 $data3_8 $data4_8 +print $data0_9 $data1_9 $data2_9 $data3_9 $data4_9 + +print ============== step11: stop dnode4, then wait sync end +system sh/exec.sh -n dnode4 -s stop -x SIGINT +sleep 20000 + +print ============== step12: restart dnode4, then wait sync end +system sh/exec.sh -n dnode4 -s start +sleep 20000 +sql show mnodes +print show mnodes +print rows: $rows +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 +print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 +print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5 +print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6 +print $data0_7 $data1_7 $data2_7 $data3_7 $data4_7 +print $data0_8 $data1_8 $data2_8 $data3_8 $data4_8 +print $data0_9 $data1_9 $data2_9 $data3_9 $data4_9 + +print ============== step13: alter replica 2 +sql alter database $db replica 2 +sql show databases +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09 + +if $data04 != 2 then + print rplica is not modify to 2, error!!!!!! + return -1 +endi + +print ============== step14: stop and drop dnode4/dnode5, then remove data dir of dnode4/dnode5 +system sh/exec.sh -n dnode4 -s stop -x SIGINT +system sh/exec.sh -n dnode5 -s stop -x SIGINT +sleep 3000 +sql drop dnode $hostname4 +sql drop dnode $hostname5 +system rm -rf ../../../sim/dnode4/data +system rm -rf ../../../sim/dnode5/data +sleep 20000 + +print ============== step15: alter replica 1 +sql alter database $db replica 1 +sql show databases +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09 +if $data04 != 1 then + print rplica is not modify to 1, error!!!!!! + return -1 +endi + +print ============== step16: alter replica 2 +sql alter database $db replica 2 +sql show databases +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09 +if $data04 != 2 then + print rplica is not modify to 2, error!!!!!! + return -1 +endi + +#print ============== step17: start dnode1 and add into cluster, then wait dnode1 ready +#system sh/cfg.sh -n dnode1 -c first -v $hostname2 +#system sh/cfg.sh -n dnode1 -c second -v $hostname3 +# +#system sh/exec.sh -n dnode1 -s start +#sql create dnode $hostname1 +#sleep 20000 + +print ============== step18: alter replica 3 +sql alter database $db replica 3 +sql show databases +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09 +if $data04 != 3 then + print rplica is not modify to 3, error!!!!!! + return -1 +endi + +print **** **** **** (loop_cnt: $loop_cnt ) end, continue...... **** **** **** **** +$loop_cnt = $loop_cnt + 1 +goto loop_cluster_do diff --git a/tests/script/unique/cluster/main1_client4.sim b/tests/script/unique/cluster/main1_client4.sim new file mode 100644 index 0000000000..eb8049b04b --- /dev/null +++ b/tests/script/unique/cluster/main1_client4.sim @@ -0,0 +1,127 @@ +$tblStart = 0 +$tblEnd = 10000 +$tsStart = 1325347200000 # 2012-01-01 00:00:00.000 +############################################################### + +sql connect + +$db = db2 +$stb = stb2 + +loop_lable: +print ========= start loop create db, table, inset data, alter column/tag, select, drop db + +sql create database if not exists $db +sql use $db +sql create table if not exists $stb ( ts timestamp, c1 int, c2 float ) tags ( t1 int , t2 binary(16) ) +$tagBinary = ' . client4 +$tagBinary = $tagBinary . ' + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + sql create table if not exists $tb using $stb tags ( $i , $tagBinary ) + $i = $i + 1 +endw + +$rowsPerLoop = 100 +$ts = $tsStart + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $x = 0 + while $x < $rowsPerLoop + sql insert into $tb values ( $ts + 0a , $x , $x ) ( $ts + 2a , $x , $x ) ( $ts + 4a , $x , $x ) ( $ts + 6a , $x , $x ) ( $ts + 8a , $x , $x ) ( $ts + 10a , $x , $x ) ( $ts + 12a , $x , $x ) ( $ts + 14a , $x , $x ) ( $ts + 16a , $x , $x ) ( $ts + 18a , $x , $x ) ( $ts + 20a , $x , $x ) ( $ts + 22a , $x , $x ) ( $ts + 24a , $x , $x ) ( $ts + 26a , $x , $x ) ( $ts + 28a , $x , $x ) ( $ts + 30a , $x , $x ) ( $ts + 32a , $x , $x ) ( $ts + 34a , $x , $x ) ( $ts + 36a , $x , $x ) ( $ts + 38a , $x , $x ) + $x = $x + 20 + $ts = $ts + 40a + endw + + $totalRows = $totalRows + $x + $i = $i + 1 + + if $i == $tblEnd then + $i = $tblStart + + sql select count(*) from $stb -x continue_loop + print data00 $data00 totalRows $totalRows + if $data00 != $totalRows then + print ********************** select error ********************** + endi + continue_loop: + print ====================== client4 insert data complete once ............ + endi +endw + +##################### alter column +sql alter table $stb add column c3 double +sql alter table $stb drop column c2 + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $x = 0 + while $x < $rowsPerLoop + sql insert into $tb values ( $ts + 0a , $x , $x ) ( $ts + 2a , $x , $x ) ( $ts + 4a , $x , $x ) ( $ts + 6a , $x , $x ) ( $ts + 8a , $x , $x ) ( $ts + 10a , $x , $x ) ( $ts + 12a , $x , $x ) ( $ts + 14a , $x , $x ) ( $ts + 16a , $x , $x ) ( $ts + 18a , $x , $x ) ( $ts + 20a , $x , $x ) ( $ts + 22a , $x , $x ) ( $ts + 24a , $x , $x ) ( $ts + 26a , $x , $x ) ( $ts + 28a , $x , $x ) ( $ts + 30a , $x , $x ) ( $ts + 32a , $x , $x ) ( $ts + 34a , $x , $x ) ( $ts + 36a , $x , $x ) ( $ts + 38a , $x , $x ) + $x = $x + 20 + $ts = $ts + 40a + endw + + $totalRows = $totalRows + $x + $i = $i + 1 + + if $i == $tblEnd then + $i = $tblStart + + sql select count(*) from $stb -x continue_loop + print data00 $data00 totalRows $totalRows + if $data00 != $totalRows then + print ********************** select error ********************** + endi + continue_loop: + print ====================== client4 insert data complete once ............ + endi +endw + + +##################### alter tag +sql alter table $stb add tag t3 int +sql alter table $stb drop tag t2 + +$i = $tblStart +while $i < $tblEnd + $tb = ttb . $i + sql create table if not exists $tb using $stb tags ($i, $i) + $i = $i + 1 +endw + +$rowsPerLoop = 100 +$ts = $tsStart + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $x = 0 + while $x < $rowsPerLoop + sql insert into $tb values ( $ts + 0a , $x , $x ) ( $ts + 2a , $x , $x ) ( $ts + 4a , $x , $x ) ( $ts + 6a , $x , $x ) ( $ts + 8a , $x , $x ) ( $ts + 10a , $x , $x ) ( $ts + 12a , $x , $x ) ( $ts + 14a , $x , $x ) ( $ts + 16a , $x , $x ) ( $ts + 18a , $x , $x ) ( $ts + 20a , $x , $x ) ( $ts + 22a , $x , $x ) ( $ts + 24a , $x , $x ) ( $ts + 26a , $x , $x ) ( $ts + 28a , $x , $x ) ( $ts + 30a , $x , $x ) ( $ts + 32a , $x , $x ) ( $ts + 34a , $x , $x ) ( $ts + 36a , $x , $x ) ( $ts + 38a , $x , $x ) + $x = $x + 20 + $ts = $ts + 40a + endw + + $totalRows = $totalRows + $x + $i = $i + 1 + + if $i == $tblEnd then + $i = $tblStart + + sql select count(*) from $stb -x continue_loop + print data00 $data00 totalRows $totalRows + if $data00 != $totalRows then + print ********************** select error ********************** + endi + continue_loop: + print ====================== client4 insert data complete once ............ + endi +endw + +goto loop_lable \ No newline at end of file diff --git a/tests/script/unique/cluster/main2_client1_0.sim b/tests/script/unique/cluster/main2_client1_0.sim new file mode 100644 index 0000000000..a7fd181363 --- /dev/null +++ b/tests/script/unique/cluster/main2_client1_0.sim @@ -0,0 +1,69 @@ +#system sh/stop_dnodes.sh +#system sh/deploy.sh -n dnode1 -i 1 +#system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 10000 +#system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 256 +#system sh/exec.sh -n dnode1 -s start +#sql connect +#$db = db1 +#sql create database $db +#sql use $db +#$stb = stb1 +#sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(8)) + + +$tblStart = 0 +$tblEnd = 1000 +$tsStart = 1325347200000 # 2012-01-01 00:00:00.000 +############################################################### + +sql connect + +$db = db1 +$stb = stb1 + +sql use $db + +######sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(8)) +$tagBinary = ' . client1_0 +$tagBinary = $tagBinary . ' +#print ======= tag: $tagBinary + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i +# print create table if not exists $tb using $stb tags ( $i , $tagBinary ) + sql create table if not exists $tb using $stb tags ( $i , $tagBinary ) + $i = $i + 1 +endw + +print ====================== client1_0 create table end, start insert data ............ +sql select count(tbname) from $stb +print select count(tbname) from $stb +print data00 $data00 + +$rowsPerLoop = 100 +$ts = $tsStart + +$lastLossRows = 0 + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $x = 0 + while $x < $rowsPerLoop + sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) + $x = $x + 20 + $ts = $ts + 40a + endw + + $totalRows = $totalRows + $x + $i = $i + 1 + + if $i == $tblEnd then + $i = $tblStart + + sql select count(*) from $stb where t2 == $tagBinary + print ====================== client1_0 insert data complete once ............ + endi +endw +print ====================== client1_0 success and auto end ===================== \ No newline at end of file diff --git a/tests/script/unique/cluster/main2_client1_1.sim b/tests/script/unique/cluster/main2_client1_1.sim new file mode 100644 index 0000000000..f22d62a741 --- /dev/null +++ b/tests/script/unique/cluster/main2_client1_1.sim @@ -0,0 +1,57 @@ +$tblStart = 1000 +$tblEnd = 2000 +$tsStart = 1325347200000 # 2012-01-01 00:00:00.000 +############################################################### + +sql connect + +$db = db1 +$stb = stb1 + +sql use $db + + +######sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(8)) +$tagBinary = ' . client1_1 +$tagBinary = $tagBinary . ' +#print ======= tag: $tagBinary + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i +# print create table if not exists $tb using $stb tags ( $i , $tagBinary ) + sql create table if not exists $tb using $stb tags ( $i , $tagBinary ) + $i = $i + 1 +endw + +print ====================== client1_1 create table end, start insert data ............ +sql select count(tbname) from $stb +print select count(tbname) from $stb +print data00 $data00 + +$rowsPerLoop = 100 +$ts = $tsStart + +$lastLossRows = 0 + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $x = 0 + while $x < $rowsPerLoop + sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) + $x = $x + 20 + $ts = $ts + 40a + endw + + $totalRows = $totalRows + $x + $i = $i + 1 + + if $i == $tblEnd then + $i = $tblStart + + sql select count(*) from $stb where t2 == $tagBinary + print ====================== client1_1 insert data complete once ............ + endi +endw +print ====================== client1_1 success and auto end ===================== \ No newline at end of file diff --git a/tests/script/unique/cluster/main2_client1_2.sim b/tests/script/unique/cluster/main2_client1_2.sim new file mode 100644 index 0000000000..df3a925c59 --- /dev/null +++ b/tests/script/unique/cluster/main2_client1_2.sim @@ -0,0 +1,56 @@ +$tblStart = 2000 +$tblEnd = 3000 +$tsStart = 1325347200000 # 2012-01-01 00:00:00.000 +############################################################### + +sql connect + +$db = db1 +$stb = stb1 + +sql use $db + +######sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(8)) +$tagBinary = ' . client1_2 +$tagBinary = $tagBinary . ' +#print ======= tag: $tagBinary + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i +# print create table if not exists $tb using $stb tags ( $i , $tagBinary ) + sql create table if not exists $tb using $stb tags ( $i , $tagBinary ) + $i = $i + 1 +endw + +print ====================== client1_2 create table end, start insert data ............ +sql select count(tbname) from $stb +print select count(tbname) from $stb +print data00 $data00 + +$rowsPerLoop = 100 +$ts = $tsStart + +$lastLossRows = 0 + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $x = 0 + while $x < $rowsPerLoop + sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) + $x = $x + 20 + $ts = $ts + 40a + endw + + $totalRows = $totalRows + $x + $i = $i + 1 + + if $i == $tblEnd then + $i = $tblStart + + sql select count(*) from $stb where t2 == $tagBinary + print ====================== client1_2 insert data complete once ............ + endi +endw +print ====================== client1_2 success and auto end ===================== \ No newline at end of file diff --git a/tests/script/unique/cluster/main2_client1_3.sim b/tests/script/unique/cluster/main2_client1_3.sim new file mode 100644 index 0000000000..9c22432fa5 --- /dev/null +++ b/tests/script/unique/cluster/main2_client1_3.sim @@ -0,0 +1,56 @@ +$tblStart = 3000 +$tblEnd = 4000 +$tsStart = 1325347200000 # 2012-01-01 00:00:00.000 +############################################################### + +sql connect + +$db = db1 +$stb = stb1 + +sql use $db + +######sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(8)) +$tagBinary = ' . client1_3 +$tagBinary = $tagBinary . ' +#print ======= tag: $tagBinary + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i +# print create table if not exists $tb using $stb tags ( $i , $tagBinary ) + sql create table if not exists $tb using $stb tags ( $i , $tagBinary ) + $i = $i + 1 +endw + +print ====================== client1_3 create table end, start insert data ............ +sql select count(tbname) from $stb +print select count(tbname) from $stb +print data00 $data00 + +$rowsPerLoop = 100 +$ts = $tsStart + +$lastLossRows = 0 + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $x = 0 + while $x < $rowsPerLoop + sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) + $x = $x + 20 + $ts = $ts + 40a + endw + + $totalRows = $totalRows + $x + $i = $i + 1 + + if $i == $tblEnd then + $i = $tblStart + + sql select count(*) from $stb where t2 == $tagBinary + print ====================== client1_3 insert data complete once ............ + endi +endw +print ====================== client1_3 success and auto end ===================== \ No newline at end of file diff --git a/tests/script/unique/cluster/main2_client2_0.sim b/tests/script/unique/cluster/main2_client2_0.sim new file mode 100644 index 0000000000..e44efc49fc --- /dev/null +++ b/tests/script/unique/cluster/main2_client2_0.sim @@ -0,0 +1,56 @@ +$tblStart = 0 +$tblEnd = 1000 +$tsStart = 1325347200001 # 2012-01-01 00:00:00.001 +############################################################### + +sql connect + +$db = db1 +$stb = stb1 + +sql use $db + +######sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(8)) +$tagBinary = ' . client2_0 +$tagBinary = $tagBinary . ' +#print ======= tag: $tagBinary + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i +# print create table if not exists $tb using $stb tags ( $i , $tagBinary ) + sql create table if not exists $tb using $stb tags ( $i , $tagBinary ) + $i = $i + 1 +endw + +print ====================== client2_0 create table end, start insert data ............ +sql select count(tbname) from $stb +print select count(tbname) from $stb +print data00 $data00 + +$rowsPerLoop = 100 +$ts = $tsStart + +$lastLossRows = 0 + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $x = 0 + while $x < $rowsPerLoop + sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) + $x = $x + 20 + $ts = $ts + 40a + endw + + $totalRows = $totalRows + $x + $i = $i + 1 + + if $i == $tblEnd then + $i = $tblStart + + sql select count(*) from $stb where t2 == $tagBinary + print ====================== client2_0 insert data complete once ............ + endi +endw +print ====================== client2_0 success and auto end ===================== \ No newline at end of file diff --git a/tests/script/unique/cluster/main2_client2_1.sim b/tests/script/unique/cluster/main2_client2_1.sim new file mode 100644 index 0000000000..19de147d3e --- /dev/null +++ b/tests/script/unique/cluster/main2_client2_1.sim @@ -0,0 +1,57 @@ +$tblStart = 1000 +$tblEnd = 2000 +$tsStart = 1325347200001 # 2012-01-01 00:00:00.000 +############################################################### + +sql connect + +$db = db1 +$stb = stb1 + +sql use $db + + +######sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(8)) +$tagBinary = ' . client2_1 +$tagBinary = $tagBinary . ' +#print ======= tag: $tagBinary + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i +# print create table if not exists $tb using $stb tags ( $i , $tagBinary ) + sql create table if not exists $tb using $stb tags ( $i , $tagBinary ) + $i = $i + 1 +endw + +print ====================== client2_1 create table end, start insert data ............ +sql select count(tbname) from $stb +print select count(tbname) from $stb +print data00 $data00 + +$rowsPerLoop = 100 +$ts = $tsStart + +$lossRows = 0 + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $x = 0 + while $x < $rowsPerLoop + sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) + $x = $x + 20 + $ts = $ts + 40a + endw + + $totalRows = $totalRows + $x + $i = $i + 1 + + if $i == $tblEnd then + $i = $tblStart + + sql select count(*) from $stb where t2 == $tagBinary + print ====================== client2_1 insert data complete once ............ + endi +endw +print ====================== client2_1 success and auto end ===================== \ No newline at end of file diff --git a/tests/script/unique/cluster/main2_client2_2.sim b/tests/script/unique/cluster/main2_client2_2.sim new file mode 100644 index 0000000000..19e3540bd1 --- /dev/null +++ b/tests/script/unique/cluster/main2_client2_2.sim @@ -0,0 +1,57 @@ +$tblStart = 2000 +$tblEnd = 3000 +$tsStart = 1325347200001 # 2012-01-01 00:00:00.000 +############################################################### + +sql connect + +$db = db1 +$stb = stb1 + +sql use $db + + +######sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(8)) +$tagBinary = ' . client2_2 +$tagBinary = $tagBinary . ' +#print ======= tag: $tagBinary + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i +# print create table if not exists $tb using $stb tags ( $i , $tagBinary ) + sql create table if not exists $tb using $stb tags ( $i , $tagBinary ) + $i = $i + 1 +endw + +print ====================== client2_2 create table end, start insert data ............ +sql select count(tbname) from $stb +print select count(tbname) from $stb +print data00 $data00 + +$rowsPerLoop = 100 +$ts = $tsStart + +$lossRows = 0 + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $x = 0 + while $x < $rowsPerLoop + sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) + $x = $x + 20 + $ts = $ts + 40a + endw + + $totalRows = $totalRows + $x + $i = $i + 1 + + if $i == $tblEnd then + $i = $tblStart + + sql select count(*) from $stb where t2 == $tagBinary + print ====================== client2_2 insert data complete once ............ + endi +endw +print ====================== client2_2 success and auto end ===================== \ No newline at end of file diff --git a/tests/script/unique/cluster/main2_client2_3.sim b/tests/script/unique/cluster/main2_client2_3.sim new file mode 100644 index 0000000000..219df131c8 --- /dev/null +++ b/tests/script/unique/cluster/main2_client2_3.sim @@ -0,0 +1,57 @@ +$tblStart = 3000 +$tblEnd = 4000 +$tsStart = 1325347200001 # 2012-01-01 00:00:00.000 +############################################################### + +sql connect + +$db = db1 +$stb = stb1 + +sql use $db + + +######sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(8)) +$tagBinary = ' . client2_3 +$tagBinary = $tagBinary . ' +#print ======= tag: $tagBinary + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i +# print create table if not exists $tb using $stb tags ( $i , $tagBinary ) + sql create table if not exists $tb using $stb tags ( $i , $tagBinary ) + $i = $i + 1 +endw + +print ====================== client2_3 create table end, start insert data ............ +sql select count(tbname) from $stb +print select count(tbname) from $stb +print data00 $data00 + +$rowsPerLoop = 100 +$ts = $tsStart + +$lossRows = 0 + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $x = 0 + while $x < $rowsPerLoop + sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) + $x = $x + 20 + $ts = $ts + 40a + endw + + $totalRows = $totalRows + $x + $i = $i + 1 + + if $i == $tblEnd then + $i = $tblStart + + sql select count(*) from $stb where t2 == $tagBinary + print ====================== client2_3 insert data complete once ............ + endi +endw +print ====================== client2_3 success and auto end ===================== \ No newline at end of file From 5539b262f1a73047eb2730a2a550fc178b0ee331 Mon Sep 17 00:00:00 2001 From: Hui Li Date: Mon, 13 Jul 2020 15:08:07 +0800 Subject: [PATCH 08/14] [modify] --- tests/script/unique/cluster/cluster_main.sim | 44 +++++++++----------- 1 file changed, 20 insertions(+), 24 deletions(-) diff --git a/tests/script/unique/cluster/cluster_main.sim b/tests/script/unique/cluster/cluster_main.sim index f3cce9fd45..da4f709f46 100644 --- a/tests/script/unique/cluster/cluster_main.sim +++ b/tests/script/unique/cluster/cluster_main.sim @@ -69,14 +69,13 @@ sql use $db print ============== step3: create stable stb1 $stb = stb1 -print create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(16)) -sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(16)) +sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(8)) -print ============== step4: start others client +print ============== step4: start 10 client1/ 10 client2/ 10 client3/ 10 client4/ 1 client5 run_back unique/cluster/client1_0.sim -run_back unique/cluster/client1_1.sim -run_back unique/cluster/client1_2.sim -run_back unique/cluster/client1_3.sim +#run_back unique/cluster/client1_1.sim +#run_back unique/cluster/client1_2.sim +#run_back unique/cluster/client1_3.sim #run_back unique/cluster/client2_0.sim #run_back unique/cluster/client2_1.sim #run_back unique/cluster/client2_2.sim @@ -119,16 +118,14 @@ sql create dnode $hostname5 sleep 5000 -print ============== step6: stop dnode1 +print ============== step6: stop and drop dnode1, then remove data dir of dnode1 system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 10000 -#sql drop dnode $hostname1 -#sleep 5000 -#system rm -rf ../../../sim/dnode1/data -#sleep 20000 -print ============== step6-1: restart dnode1 -system sh/exec.sh -n dnode1 -s start -sleep 10000 +sleep 5000 +sql drop dnode $hostname1 +sleep 5000 + +system rm -rf ../../../sim/dnode1/data +sleep 20000 sql show mnodes print show mnodes @@ -241,12 +238,11 @@ endi print ============== step14: stop and drop dnode4/dnode5, then remove data dir of dnode4/dnode5 system sh/exec.sh -n dnode4 -s stop -x SIGINT system sh/exec.sh -n dnode5 -s stop -x SIGINT -sleep 3000 +sleep 20000 sql drop dnode $hostname4 sql drop dnode $hostname5 system rm -rf ../../../sim/dnode4/data system rm -rf ../../../sim/dnode5/data -sleep 20000 print ============== step15: alter replica 1 sql alter database $db replica 1 @@ -266,13 +262,13 @@ if $data04 != 2 then return -1 endi -#print ============== step17: start dnode1 and add into cluster, then wait dnode1 ready -#system sh/cfg.sh -n dnode1 -c first -v $hostname2 -#system sh/cfg.sh -n dnode1 -c second -v $hostname3 -# -#system sh/exec.sh -n dnode1 -s start -#sql create dnode $hostname1 -#sleep 20000 +print ============== step17: start dnode1 and add into cluster, then wait dnode1 ready +system sh/cfg.sh -n dnode1 -c first -v $hostname2 +system sh/cfg.sh -n dnode1 -c second -v $hostname3 + +system sh/exec.sh -n dnode1 -s start +sql create dnode $hostname1 +sleep 20000 print ============== step18: alter replica 3 sql alter database $db replica 3 From e51ea2d305b3fef468e894c3728e974f46710d71 Mon Sep 17 00:00:00 2001 From: Hui Li Date: Mon, 13 Jul 2020 15:11:39 +0800 Subject: [PATCH 09/14] [fix bug] --- src/vnode/src/vnodeRead.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/vnode/src/vnodeRead.c b/src/vnode/src/vnodeRead.c index ff58e219b0..4c7970076c 100644 --- a/src/vnode/src/vnodeRead.c +++ b/src/vnode/src/vnodeRead.c @@ -155,7 +155,7 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { pRetrieve->qhandle = htobe64(pRetrieve->qhandle); pRetrieve->free = htons(pRetrieve->free); - vDebug("vgId:%d, QInfo:%p, retrieve msg is disposed", pVnode->vgId, *(void**) pRetrieve->qhandle); + vDebug("vgId:%d, QInfo:%p, retrieve msg is disposed", pVnode->vgId, (void*) pRetrieve->qhandle); memset(pRet, 0, sizeof(SRspRet)); From a531f81bec4e11380923de4b3e3ddce704adaaff Mon Sep 17 00:00:00 2001 From: Hui Li Date: Mon, 13 Jul 2020 16:12:25 +0800 Subject: [PATCH 10/14] [temp not test drop dnode] --- tests/script/unique/cluster/cluster_main.sim | 77 ++++++++++++------- tests/script/unique/cluster/cluster_main1.sim | 35 +++++---- tests/script/unique/cluster/cluster_main2.sim | 35 +++++---- 3 files changed, 86 insertions(+), 61 deletions(-) diff --git a/tests/script/unique/cluster/cluster_main.sim b/tests/script/unique/cluster/cluster_main.sim index da4f709f46..4c02d416f3 100644 --- a/tests/script/unique/cluster/cluster_main.sim +++ b/tests/script/unique/cluster/cluster_main.sim @@ -71,7 +71,7 @@ print ============== step3: create stable stb1 $stb = stb1 sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(8)) -print ============== step4: start 10 client1/ 10 client2/ 10 client3/ 10 client4/ 1 client5 +print ============== step4: start back client1_0.sim run_back unique/cluster/client1_0.sim #run_back unique/cluster/client1_1.sim #run_back unique/cluster/client1_2.sim @@ -106,27 +106,43 @@ endi print wait for a while to let clients start insert data sleep 5000 -$loop_cnt = 0 -loop_cluster_do: -print **** **** **** START loop cluster do (loop_cnt: $loop_cnt )**** **** **** **** -print ============== step5: start dnode4/dnode5 and add into cluster, then wait ready -system sh/exec.sh -n dnode4 -s start -system sh/exec.sh -n dnode5 -s start +print ============== step4-1: add dnode4/dnode5 into cluster sql create dnode $hostname4 sql create dnode $hostname5 -sleep 5000 - - -print ============== step6: stop and drop dnode1, then remove data dir of dnode1 -system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 5000 -sql drop dnode $hostname1 -sleep 5000 - -system rm -rf ../../../sim/dnode1/data +$loop_cnt = 0 +loop_cluster_do: +print **** **** **** START loop cluster do (loop_cnt: $loop_cnt )**** **** **** **** +print ============== step5: start dnode4/dnode5 +system sh/exec.sh -n dnode4 -s start +system sh/exec.sh -n dnode5 -s start sleep 20000 +print ============== step6: stop dnode1 +system sh/exec.sh -n dnode1 -s stop -x SIGINT +sleep 10000 +#sql drop dnode $hostname1 +#sleep 5000 + +#system rm -rf ../../../sim/dnode1/data +#sleep 20000 + +sql show mnodes +print show mnodes +print rows: $rows +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 +print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 +print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5 +print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6 +print $data0_7 $data1_7 $data2_7 $data3_7 $data4_7 +print $data0_8 $data1_8 $data2_8 $data3_8 $data4_8 +print $data0_9 $data1_9 $data2_9 $data3_9 $data4_9 + +print ============== step6-1: restart dnode1 +system sh/exec.sh -n dnode1 -s start +sleep 10000 sql show mnodes print show mnodes print rows: $rows @@ -235,14 +251,17 @@ if $data04 != 2 then return -1 endi -print ============== step14: stop and drop dnode4/dnode5, then remove data dir of dnode4/dnode5 +print ============== step14: stop dnode4/dnode5 system sh/exec.sh -n dnode4 -s stop -x SIGINT system sh/exec.sh -n dnode5 -s stop -x SIGINT sleep 20000 -sql drop dnode $hostname4 -sql drop dnode $hostname5 -system rm -rf ../../../sim/dnode4/data -system rm -rf ../../../sim/dnode5/data +#system sh/exec.sh -n dnode4 -s start +#system sh/exec.sh -n dnode5 -s start +#sleep 10000 +#sql drop dnode $hostname4 +#sql drop dnode $hostname5 +#system rm -rf ../../../sim/dnode4/data +#system rm -rf ../../../sim/dnode5/data print ============== step15: alter replica 1 sql alter database $db replica 1 @@ -262,13 +281,13 @@ if $data04 != 2 then return -1 endi -print ============== step17: start dnode1 and add into cluster, then wait dnode1 ready -system sh/cfg.sh -n dnode1 -c first -v $hostname2 -system sh/cfg.sh -n dnode1 -c second -v $hostname3 - -system sh/exec.sh -n dnode1 -s start -sql create dnode $hostname1 -sleep 20000 +#print ============== step17: start dnode1 and add into cluster, then wait dnode1 ready +#system sh/cfg.sh -n dnode1 -c first -v $hostname2 +#system sh/cfg.sh -n dnode1 -c second -v $hostname3 +# +#system sh/exec.sh -n dnode1 -s start +#sql create dnode $hostname1 +#sleep 20000 print ============== step18: alter replica 3 sql alter database $db replica 3 diff --git a/tests/script/unique/cluster/cluster_main1.sim b/tests/script/unique/cluster/cluster_main1.sim index 343924dfee..82e1e2be83 100644 --- a/tests/script/unique/cluster/cluster_main1.sim +++ b/tests/script/unique/cluster/cluster_main1.sim @@ -104,16 +104,19 @@ endi print wait for a while to let clients start insert data sleep 5000 -$loop_cnt = 0 -loop_cluster_do: -print **** **** **** START loop cluster do (loop_cnt: $loop_cnt )**** **** **** **** -print ============== step5: start dnode4/dnode5 and add into cluster, then wait ready -system sh/exec.sh -n dnode4 -s start -system sh/exec.sh -n dnode5 -s start +print ============== step4-1: add dnode4/dnode5 into cluster sql create dnode $hostname4 sql create dnode $hostname5 sleep 5000 +$loop_cnt = 0 +loop_cluster_do: +print **** **** **** START loop cluster do (loop_cnt: $loop_cnt )**** **** **** **** +print ============== step5: start dnode4/dnode5 +system sh/exec.sh -n dnode4 -s start +system sh/exec.sh -n dnode5 -s start +sleep 20000 + print ============== step6: stop dnode1 system sh/exec.sh -n dnode1 -s stop -x SIGINT sleep 10000 @@ -207,9 +210,9 @@ print ============== step11: stop dnode4, then wait sync end system sh/exec.sh -n dnode4 -s stop -x SIGINT sleep 20000 -print ============== step12: restart dnode4, then wait sync end -system sh/exec.sh -n dnode4 -s start -sleep 20000 +#print ============== step12: restart dnode4, then wait sync end +#system sh/exec.sh -n dnode4 -s start +#sleep 20000 sql show mnodes print show mnodes print rows: $rows @@ -243,15 +246,15 @@ if $replica != 2 then return -1 endi -print ============== step14: drop dnode4, then remove data dir -sql drop dnode $hostname4 +print ============== step14: stop dnode5 +system sh/exec.sh -n dnode5 -s stop -x SIGINT sleep 20000 -system rm -rf ../../../sim/dnode4/data +#system rm -rf ../../../sim/dnode5/data -print ============== step14-1: drop dnode5, then remove data dir -sql drop dnode $hostname5 -sleep 20000 -system rm -rf ../../../sim/dnode5/data +#print ============== step14-1: drop dnode5, then remove data dir +#sql drop dnode $hostname5 +#sleep 20000 +#system rm -rf ../../../sim/dnode5/data print ============== step15: alter replica 1 sql alter database $db replica 1 diff --git a/tests/script/unique/cluster/cluster_main2.sim b/tests/script/unique/cluster/cluster_main2.sim index 83934da457..e5fe3f2a21 100644 --- a/tests/script/unique/cluster/cluster_main2.sim +++ b/tests/script/unique/cluster/cluster_main2.sim @@ -108,16 +108,19 @@ endi print wait for a while to let clients start insert data sleep 5000 -$loop_cnt = 0 -loop_cluster_do: -print **** **** **** START loop cluster do (loop_cnt: $loop_cnt )**** **** **** **** -print ============== step5: start dnode4/dnode5 and add into cluster, then wait ready -system sh/exec.sh -n dnode4 -s start -system sh/exec.sh -n dnode5 -s start +print ============== step4-1: add dnode4/dnode5 into cluster sql create dnode $hostname4 sql create dnode $hostname5 sleep 5000 + +$loop_cnt = 0 +loop_cluster_do: +print **** **** **** START loop cluster do (loop_cnt: $loop_cnt )**** **** **** **** +print ============== step5: start dnode4/dnode5 +system sh/exec.sh -n dnode4 -s start +system sh/exec.sh -n dnode5 -s start + print ============== step6: stop dnode1 system sh/exec.sh -n dnode1 -s stop -x SIGINT sleep 10000 @@ -211,9 +214,9 @@ print ============== step11: stop dnode4, then wait sync end system sh/exec.sh -n dnode4 -s stop -x SIGINT sleep 20000 -print ============== step12: restart dnode4, then wait sync end -system sh/exec.sh -n dnode4 -s start -sleep 20000 +#print ============== step12: restart dnode4, then wait sync end +#system sh/exec.sh -n dnode4 -s start +#sleep 20000 sql show mnodes print show mnodes print rows: $rows @@ -247,15 +250,15 @@ if $replica != 2 then return -1 endi -print ============== step14: drop dnode4, then remove data dir -sql drop dnode $hostname4 +print ============== step14: drop dnode5, then remove data dir +system sh/exec.sh -n dnode5 -s stop -x SIGINT sleep 20000 -system rm -rf ../../../sim/dnode4/data +#system rm -rf ../../../sim/dnode5/data -print ============== step14-1: drop dnode5, then remove data dir -sql drop dnode $hostname5 -sleep 20000 -system rm -rf ../../../sim/dnode5/data +#print ============== step14-1: drop dnode5, then remove data dir +#sql drop dnode $hostname5 +#sleep 20000 +#system rm -rf ../../../sim/dnode5/data print ============== step15: alter replica 1 sql alter database $db replica 1 From 8c472009b680ebc0bcf0e7f5dc1f5efef791dd4e Mon Sep 17 00:00:00 2001 From: Bomin Zhang Date: Mon, 13 Jul 2020 16:56:13 +0800 Subject: [PATCH 11/14] fix td-834 fix bug: parser/commit.sim cause taosd crash when execute with 'alloc-random-fail' --- src/query/inc/qUtil.h | 2 +- src/query/src/qExecutor.c | 33 +++++++++++++++++++++++++++------ src/query/src/qUtil.c | 16 ++++++++++++---- 3 files changed, 40 insertions(+), 11 deletions(-) diff --git a/src/query/inc/qUtil.h b/src/query/inc/qUtil.h index 9b818b367f..8fe3c0f495 100644 --- a/src/query/inc/qUtil.h +++ b/src/query/inc/qUtil.h @@ -37,7 +37,7 @@ SWindowResult *getWindowResult(SWindowResInfo *pWindowResInfo, int32_t slot); #define curTimeWindow(_winres) ((_winres)->curIndex) bool isWindowResClosed(SWindowResInfo *pWindowResInfo, int32_t slot); -void createQueryResultInfo(SQuery *pQuery, SWindowResult *pResultRow, bool isSTableQuery, SPosInfo *posInfo, size_t interBufSize); +int32_t createQueryResultInfo(SQuery *pQuery, SWindowResult *pResultRow, bool isSTableQuery, SPosInfo *posInfo, size_t interBufSize); char *getPosInResultPage(SQueryRuntimeEnv *pRuntimeEnv, int32_t columnIndex, SWindowResult *pResult); diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 1d9068be16..95fc838842 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -2978,16 +2978,23 @@ void switchCtxOrder(SQueryRuntimeEnv *pRuntimeEnv) { } } -void createQueryResultInfo(SQuery *pQuery, SWindowResult *pResultRow, bool isSTableQuery, SPosInfo *posInfo, size_t interBufSize) { +int32_t createQueryResultInfo(SQuery *pQuery, SWindowResult *pResultRow, bool isSTableQuery, SPosInfo *posInfo, size_t interBufSize) { int32_t numOfCols = pQuery->numOfOutput; pResultRow->resultInfo = calloc((size_t)numOfCols, sizeof(SResultInfo)); + if (pResultRow->resultInfo == NULL) { + return TSDB_CODE_QRY_OUT_OF_MEMORY; + } pResultRow->pos = *posInfo; char* buf = calloc(1, interBufSize); + if (buf == NULL) { + return TSDB_CODE_QRY_OUT_OF_MEMORY; + } // set the intermediate result output buffer setWindowResultInfo(pResultRow->resultInfo, pQuery, isSTableQuery, buf); + return TSDB_CODE_SUCCESS; } void resetCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv) { @@ -3368,7 +3375,10 @@ static STableQueryInfo *createTableQueryInfo(SQueryRuntimeEnv *pRuntimeEnv, void if (QUERY_IS_INTERVAL_QUERY(pQuery) || pRuntimeEnv->groupbyNormalCol) { int32_t initialSize = 16; int32_t initialThreshold = 100; - initWindowResInfo(&pTableQueryInfo->windowResInfo, pRuntimeEnv, initialSize, initialThreshold, TSDB_DATA_TYPE_INT); + int32_t code = initWindowResInfo(&pTableQueryInfo->windowResInfo, pRuntimeEnv, initialSize, initialThreshold, TSDB_DATA_TYPE_INT); + if (code != TSDB_CODE_SUCCESS) { + return NULL; + } } else { // in other aggregate query, do not initialize the windowResInfo } @@ -4189,7 +4199,10 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo type = TSDB_DATA_TYPE_INT; // group id } - initWindowResInfo(&pRuntimeEnv->windowResInfo, pRuntimeEnv, 32, 4096, type); + code = initWindowResInfo(&pRuntimeEnv->windowResInfo, pRuntimeEnv, 32, 4096, type); + if (code != TSDB_CODE_SUCCESS) { + return code; + } } } else if (pRuntimeEnv->groupbyNormalCol || QUERY_IS_INTERVAL_QUERY(pQuery)) { @@ -4206,7 +4219,10 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo type = TSDB_DATA_TYPE_TIMESTAMP; } - initWindowResInfo(&pRuntimeEnv->windowResInfo, pRuntimeEnv, rows, 4096, type); + code = initWindowResInfo(&pRuntimeEnv->windowResInfo, pRuntimeEnv, rows, 4096, type); + if (code != TSDB_CODE_SUCCESS) { + return code; + } } if (pQuery->fillType != TSDB_FILL_NONE && !isPointInterpoQuery(pQuery)) { @@ -5737,6 +5753,10 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList, size_t s = taosArrayGetSize(pa); SArray* p1 = taosArrayInit(s, POINTER_BYTES); + if (p1 == NULL) { + goto _cleanup; + } + taosArrayPush(pQInfo->tableqinfoGroupInfo.pGroupList, &p1); for(int32_t j = 0; j < s; ++j) { void* pTable = taosArrayGetP(pa, j); @@ -5751,13 +5771,14 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList, void* buf = pQInfo->pBuf + index * sizeof(STableQueryInfo); STableQueryInfo* item = createTableQueryInfo(&pQInfo->runtimeEnv, pTable, window, buf); + if (item == NULL) { + goto _cleanup; + } item->groupIndex = i; taosArrayPush(p1, &item); taosHashPut(pQInfo->tableqinfoGroupInfo.map, &id->tid, sizeof(id->tid), &item, POINTER_BYTES); index += 1; } - - taosArrayPush(pQInfo->tableqinfoGroupInfo.pGroupList, &p1); } pQInfo->arrTableIdInfo = taosArrayInit(tableIndex, sizeof(STableIdInfo)); diff --git a/src/query/src/qUtil.c b/src/query/src/qUtil.c index dce2c24ea0..acdc46fcc1 100644 --- a/src/query/src/qUtil.c +++ b/src/query/src/qUtil.c @@ -50,9 +50,15 @@ int32_t initWindowResInfo(SWindowResInfo *pWindowResInfo, SQueryRuntimeEnv *pRun // use the pointer arraylist pWindowResInfo->pResult = calloc(threshold, sizeof(SWindowResult)); + if (pWindowResInfo->pResult == NULL) { + return TSDB_CODE_QRY_OUT_OF_MEMORY; + } for (int32_t i = 0; i < pWindowResInfo->capacity; ++i) { SPosInfo posInfo = {-1, -1}; - createQueryResultInfo(pRuntimeEnv->pQuery, &pWindowResInfo->pResult[i], pRuntimeEnv->stableQuery, &posInfo, pRuntimeEnv->interBufSize); + int32_t code = createQueryResultInfo(pRuntimeEnv->pQuery, &pWindowResInfo->pResult[i], pRuntimeEnv->stableQuery, &posInfo, pRuntimeEnv->interBufSize); + if (code != TSDB_CODE_SUCCESS) { + return code; + } } return TSDB_CODE_SUCCESS; @@ -76,9 +82,11 @@ void cleanupTimeWindowInfo(SWindowResInfo *pWindowResInfo, int32_t numOfCols) { return; } - for (int32_t i = 0; i < pWindowResInfo->capacity; ++i) { - SWindowResult *pResult = &pWindowResInfo->pResult[i]; - destroyTimeWindowRes(pResult, numOfCols); + if (pWindowResInfo->pResult != NULL) { + for (int32_t i = 0; i < pWindowResInfo->capacity; ++i) { + SWindowResult *pResult = &pWindowResInfo->pResult[i]; + destroyTimeWindowRes(pResult, numOfCols); + } } taosHashCleanup(pWindowResInfo->hashList); From d496869ee72141e2511d90df47613b10c0f7a375 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 13 Jul 2020 18:04:52 +0800 Subject: [PATCH 12/14] [td-225] opt query perf --- src/client/src/tscAsync.c | 36 ++++++++++--------- src/client/src/tscFunctionImpl.c | 36 +++++++++++++------ src/client/src/tscUtil.c | 9 ++--- src/query/inc/tsqlfunction.h | 3 +- src/query/src/qExecutor.c | 61 +++++++++++++++++--------------- src/tsdb/src/tsdbRead.c | 10 +++++- 6 files changed, 93 insertions(+), 62 deletions(-) diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c index 17998e1981..7371a8a578 100644 --- a/src/client/src/tscAsync.c +++ b/src/client/src/tscAsync.c @@ -45,6 +45,7 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, void (*fp)(), void* param, const pSql->pTscObj = pObj; pSql->maxRetry = TSDB_MAX_REPLICA_NUM; pSql->fp = fp; + pSql->fetchFp = fp; pSql->sqlstr = calloc(1, sqlLen + 1); if (pSql->sqlstr == NULL) { @@ -159,7 +160,7 @@ static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRo pRes->code = numOfRows; } - tscQueueAsyncError(pSql->fetchFp, param, pRes->code); + tscQueueAsyncRes(pSql); return; } @@ -346,31 +347,32 @@ void tscProcessFetchRow(SSchedMsg *pMsg) { void tscProcessAsyncRes(SSchedMsg *pMsg) { SSqlObj *pSql = (SSqlObj *)pMsg->ahandle; - SSqlCmd *pCmd = &pSql->cmd; +// SSqlCmd *pCmd = &pSql->cmd; SSqlRes *pRes = &pSql->res; - void *taosres = pSql; +// void *taosres = pSql; // pCmd may be released, so cache pCmd->command - int cmd = pCmd->command; - int code = pRes->code; +// int cmd = pCmd->command; +// int code = pRes->code; // in case of async insert, restore the user specified callback function - bool shouldFree = tscShouldBeFreed(pSql); +// bool shouldFree = tscShouldBeFreed(pSql); - if (cmd == TSDB_SQL_INSERT) { - assert(pSql->fp != NULL); - pSql->fp = pSql->fetchFp; - } +// if (pCmd->command == TSDB_SQL_INSERT) { +// assert(pSql->fp != NULL); + assert(pSql->fp != NULL && pSql->fetchFp != NULL); +// } - if (pSql->fp) { - (*pSql->fp)(pSql->param, taosres, code); - } +// if (pSql->fp) { + pSql->fp = pSql->fetchFp; + (*pSql->fp)(pSql->param, pSql, pRes->code); +// } - if (shouldFree) { - tscDebug("%p sqlObj is automatically freed in async res", pSql); - tscFreeSqlObj(pSql); - } +// if (shouldFree) { +// tscDebug("%p sqlObj is automatically freed in async res", pSql); +// tscFreeSqlObj(pSql); +// } } static void tscProcessAsyncError(SSchedMsg *pMsg) { diff --git a/src/client/src/tscFunctionImpl.c b/src/client/src/tscFunctionImpl.c index 8e6878f449..1ec84f023a 100644 --- a/src/client/src/tscFunctionImpl.c +++ b/src/client/src/tscFunctionImpl.c @@ -708,6 +708,11 @@ static int32_t firstDistFuncRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY en if (pCtx->order == TSDB_ORDER_DESC) { return BLK_DATA_NO_NEEDED; } + + // not initialized yet, it is the first block, load it. + if (pCtx->aOutputBuf == NULL) { + return BLK_DATA_ALL_NEEDED; + } SFirstLastInfo *pInfo = (SFirstLastInfo*) (pCtx->aOutputBuf + pCtx->inputBytes); if (pInfo->hasResult != DATA_SET_FLAG) { @@ -721,7 +726,12 @@ static int32_t lastDistFuncRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end if (pCtx->order != pCtx->param[0].i64Key) { return BLK_DATA_NO_NEEDED; } - + + // not initialized yet, it is the first block, load it. + if (pCtx->aOutputBuf == NULL) { + return BLK_DATA_ALL_NEEDED; + } + SFirstLastInfo *pInfo = (SFirstLastInfo*) (pCtx->aOutputBuf + pCtx->inputBytes); if (pInfo->hasResult != DATA_SET_FLAG) { return BLK_DATA_ALL_NEEDED; @@ -1540,6 +1550,8 @@ static void first_data_assign_impl(SQLFunctionCtx *pCtx, char *pData, int32_t in * to decide if the value is earlier than current intermediate result */ static void first_dist_function(SQLFunctionCtx *pCtx) { + assert(pCtx->size > 0); + if (pCtx->size == 0) { return; } @@ -1554,7 +1566,12 @@ static void first_dist_function(SQLFunctionCtx *pCtx) { } int32_t notNullElems = 0; - + + // data block is discard, not loaded, do not need to check it + if (!pCtx->preAggVals.dataBlockLoaded) { + return; + } + // find the first not null value for (int32_t i = 0; i < pCtx->size; ++i) { char *data = GET_INPUT_CHAR_INDEX(pCtx, i); @@ -1575,10 +1592,6 @@ static void first_dist_function(SQLFunctionCtx *pCtx) { } static void first_dist_function_f(SQLFunctionCtx *pCtx, int32_t index) { - if (pCtx->size == 0) { - return; - } - char *pData = GET_INPUT_CHAR_INDEX(pCtx, index); if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { return; @@ -1706,10 +1719,6 @@ static void last_data_assign_impl(SQLFunctionCtx *pCtx, char *pData, int32_t ind } static void last_dist_function(SQLFunctionCtx *pCtx) { - if (pCtx->size == 0) { - return; - } - /* * 1. for scan data in asc order, no need to check data * 2. for data blocks that are not loaded, no need to check data @@ -1717,7 +1726,12 @@ static void last_dist_function(SQLFunctionCtx *pCtx) { if (pCtx->order != pCtx->param[0].i64Key) { return; } - + + // data block is discard, not loaded, do not need to check it + if (!pCtx->preAggVals.dataBlockLoaded) { + return; + } + int32_t notNullElems = 0; for (int32_t i = pCtx->size - 1; i >= 0; --i) { diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index c4641afbf3..24c78f2534 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1648,6 +1648,7 @@ SSqlObj* createSimpleSubObj(SSqlObj* pSql, void (*fp)(), void* param, int32_t cm } pNew->fp = fp; + pNew->fetchFp = fp; pNew->param = param; pNew->maxRetry = TSDB_MAX_REPLICA_NUM; @@ -1803,6 +1804,8 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void } pNew->fp = fp; + pNew->fetchFp = fp; + pNew->param = param; pNew->maxRetry = TSDB_MAX_REPLICA_NUM; @@ -2041,10 +2044,8 @@ void tscTryQueryNextVnode(SSqlObj* pSql, __async_cb_func_t fp) { // set the callback function pSql->fp = fp; - int32_t ret = tscProcessSql(pSql); - if (ret == TSDB_CODE_SUCCESS) { - return; - } else {// todo check for failure + if (tscProcessSql(pSql) != 0) { + break; } } } diff --git a/src/query/inc/tsqlfunction.h b/src/query/inc/tsqlfunction.h index 6a4b9874d7..e57cb26456 100644 --- a/src/query/inc/tsqlfunction.h +++ b/src/query/inc/tsqlfunction.h @@ -125,7 +125,8 @@ typedef struct SArithmeticSupport { } SArithmeticSupport; typedef struct SQLPreAggVal { - bool isSet; + bool isSet; // statistics info set or not + bool dataBlockLoaded; // data block is loaded or not SDataStatis statis; } SQLPreAggVal; diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 841e75249f..26b8454594 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -1358,6 +1358,8 @@ void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, void* inputData, TSKEY pCtx->preAggVals.isSet = false; } + pCtx->preAggVals.dataBlockLoaded = (inputData != NULL); + // limit/offset query will affect this value pCtx->startOffset = QUERY_IS_ASC_QUERY(pQuery) ? pQuery->pos:0; pCtx->size = QUERY_IS_ASC_QUERY(pQuery) ? pBlockInfo->rows - pQuery->pos : pQuery->pos + 1; @@ -1928,7 +1930,7 @@ char *getPosInResultPage(SQueryRuntimeEnv *pRuntimeEnv, int32_t columnIndex, SWi pQuery->pSelectExpr[columnIndex].bytes * realRowId; } -#define IS_PREFILTER_TYPE(_t) ((_t) != TSDB_DATA_TYPE_DOUBLE && (_t) != TSDB_DATA_TYPE_FLOAT) +#define IS_PREFILTER_TYPE(_t) ((_t) != TSDB_DATA_TYPE_BINARY && (_t) != TSDB_DATA_TYPE_NCHAR) static bool needToLoadDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SDataStatis *pDataStatis, SQLFunctionCtx *pCtx, int32_t numOfRows) { @@ -1948,13 +1950,14 @@ static bool needToLoadDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SDataStatis *pDat } } + // no statistics data if (index == -1) { - continue; + return true; } // not support pre-filter operation on binary/nchar data type if (!IS_PREFILTER_TYPE(pFilterInfo->info.type)) { - continue; + return true; } // all points in current column are NULL, no need to check its boundary value @@ -2203,7 +2206,6 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { summary->totalBlocks += 1; if (IS_QUERY_KILLED(GET_QINFO_ADDR(pRuntimeEnv))) { - finalizeQueryResult(pRuntimeEnv); // clean up allocated resource during query longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); } @@ -3357,12 +3359,10 @@ void destroyTableQueryInfo(STableQueryInfo *pTableQueryInfo, int32_t numOfCols) cleanupTimeWindowInfo(&pTableQueryInfo->windowResInfo, numOfCols); } -#define SET_CURRENT_QUERY_TABLE_INFO(_runtime, _tableInfo) \ - do { \ - SQuery *_query = (_runtime)->pQuery; \ - _query->current = _tableInfo; \ - assert((((_tableInfo)->lastKey >= (_tableInfo)->win.skey) && QUERY_IS_ASC_QUERY(_query)) || \ - (((_tableInfo)->lastKey <= (_tableInfo)->win.skey) && !QUERY_IS_ASC_QUERY(_query))); \ +#define CHECK_QUERY_TIME_RANGE(_q, _tableInfo) \ + do { \ + assert((((_tableInfo)->lastKey >= (_tableInfo)->win.skey) && QUERY_IS_ASC_QUERY(_q)) || \ + (((_tableInfo)->lastKey <= (_tableInfo)->win.skey) && !QUERY_IS_ASC_QUERY(_q))); \ } while (0) /** @@ -4212,6 +4212,23 @@ static void enableExecutionForNextTable(SQueryRuntimeEnv *pRuntimeEnv) { } } +static FORCE_INLINE void setEnvForEachBlock(SQInfo* pQInfo, STableQueryInfo* pTableQueryInfo, SDataBlockInfo* pBlockInfo) { + SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv; + SQuery* pQuery = pQInfo->runtimeEnv.pQuery; + int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); + + if (!QUERY_IS_INTERVAL_QUERY(pQuery)) { + setExecutionContext(pQInfo, pTableQueryInfo->groupIndex, pBlockInfo->window.ekey + step); + } else { // interval query + TSKEY nextKey = pBlockInfo->window.skey; + setIntervalQueryRange(pQInfo, nextKey); + + if (pRuntimeEnv->hasTagResults || pRuntimeEnv->pTSBuf != NULL) { + setAdditionalInfo(pQInfo, pTableQueryInfo->pTable, pTableQueryInfo); + } + } +} + static int64_t scanMultiTableDataBlocks(SQInfo *pQInfo) { SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv; SQuery* pQuery = pRuntimeEnv->pQuery; @@ -4226,6 +4243,7 @@ static int64_t scanMultiTableDataBlocks(SQInfo *pQInfo) { while (tsdbNextDataBlock(pQueryHandle)) { summary->totalBlocks += 1; + if (IS_QUERY_KILLED(pQInfo)) { longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); } @@ -4236,24 +4254,16 @@ static int64_t scanMultiTableDataBlocks(SQInfo *pQInfo) { break; } - assert(*pTableQueryInfo != NULL); - SET_CURRENT_QUERY_TABLE_INFO(pRuntimeEnv, *pTableQueryInfo); + pQuery->current = *pTableQueryInfo; + CHECK_QUERY_TIME_RANGE(pQuery, *pTableQueryInfo); if (!pRuntimeEnv->groupbyNormalCol) { - if (!QUERY_IS_INTERVAL_QUERY(pQuery)) { - setExecutionContext(pQInfo, (*pTableQueryInfo)->groupIndex, blockInfo.window.ekey + step); - } else { // interval query - TSKEY nextKey = blockInfo.window.skey; - setIntervalQueryRange(pQInfo, nextKey); - - if (pRuntimeEnv->hasTagResults || pRuntimeEnv->pTSBuf != NULL) { - setAdditionalInfo(pQInfo, (*pTableQueryInfo)->pTable, *pTableQueryInfo); - } - } + setEnvForEachBlock(pQInfo, *pTableQueryInfo, &blockInfo); } SDataStatis *pStatis = NULL; SArray *pDataBlock = NULL; + if (loadDataBlockOnDemand(pRuntimeEnv, pQueryHandle, &blockInfo, &pStatis, &pDataBlock) == BLK_DATA_DISCARD) { pQuery->current->lastKey = QUERY_IS_ASC_QUERY(pQuery)? blockInfo.window.ekey + step:blockInfo.window.skey + step; continue; @@ -4516,7 +4526,6 @@ static void sequentialTableProcess(SQInfo *pQInfo) { while (pQInfo->tableIndex < pQInfo->tableqinfoGroupInfo.numOfTables) { if (IS_QUERY_KILLED(pQInfo)) { - finalizeQueryResult(pRuntimeEnv); // clean up allocated resource during query longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); } @@ -5014,6 +5023,7 @@ static void stableQueryImpl(SQInfo *pQInfo) { isFirstLastRowQuery(pQuery) || pRuntimeEnv->groupbyNormalCol); sequentialTableProcess(pQInfo); + } // record the total elapsed time @@ -6112,11 +6122,6 @@ _over: //pQInfo already freed in initQInfo, but *pQInfo may not pointer to null; if (code != TSDB_CODE_SUCCESS) { *pQInfo = NULL; - } else { -// SQInfo* pq = (SQInfo*) (*pQInfo); - -// T_REF_INC(pq); -// T_REF_INC(pq); } // if failed to add ref for all meters in this query, abort current query diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index c055a27c39..3311a0f13c 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -1801,7 +1801,8 @@ int32_t tsdbRetrieveDataBlockStatisInfo(TsdbQueryHandleT* pQueryHandle, SDataSta } tsdbLoadCompData(&pHandle->rhelper, pBlockInfo->compBlock, NULL); - + + // todo opt perf size_t numOfCols = QH_GET_NUM_OF_COLS(pHandle); for(int32_t i = 0; i < numOfCols; ++i) { SDataStatis* st = &pHandle->statis[i]; @@ -1820,6 +1821,13 @@ int32_t tsdbRetrieveDataBlockStatisInfo(TsdbQueryHandleT* pQueryHandle, SDataSta if (pHandle->statis[i].numOfNull == -1) { // set the column data are all NULL pHandle->statis[i].numOfNull = pBlockInfo->compBlock->numOfRows; } + + // todo opt perf + SColumnInfo* pColInfo = taosArrayGet(pHandle->pColumns, i); + if (pColInfo->type == TSDB_DATA_TYPE_TIMESTAMP) { + pHandle->statis[i].min = pBlockInfo->compBlock->keyFirst; + pHandle->statis[i].max = pBlockInfo->compBlock->keyLast; + } } return TSDB_CODE_SUCCESS; From 4918aac1eb4a3501f6429939cfb238484921802b Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Mon, 13 Jul 2020 10:41:12 +0000 Subject: [PATCH 13/14] adjust max tags number --- src/query/src/qExecutor.c | 2 +- src/tsdb/src/tsdbRead.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index d1b336fb4b..c0cb79a4ef 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -5366,7 +5366,7 @@ static int32_t buildAirthmeticExprFromMsg(SExprInfo *pArithExprInfo, SQueryTable qDebug("qmsg:%p create arithmetic expr from binary string: %s", pQueryMsg, pArithExprInfo->base.arg[0].argValue.pz); tExprNode* pExprNode = NULL; - TRY(32) { + TRY(TSDB_MAX_TAGS) { pExprNode = exprTreeFromBinary(pArithExprInfo->base.arg[0].argValue.pz, pArithExprInfo->base.arg[0].argBytes); } CATCH( code ) { CLEANUP_EXECUTE(); diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 9e5da903b1..789706506d 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -2193,7 +2193,7 @@ int32_t tsdbQuerySTableByTagCond(TSDB_REPO_T* tsdb, uint64_t uid, const char* pT int32_t ret = TSDB_CODE_SUCCESS; tExprNode* expr = NULL; - TRY(32) { + TRY(TSDB_MAX_TAGS) { expr = exprTreeFromTableName(tbnameCond); if (expr == NULL) { expr = exprTreeFromBinary(pTagCond, len); From 621f842c0fa1f53820fdb127214f42d222c1b41d Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 13 Jul 2020 19:27:07 +0800 Subject: [PATCH 14/14] [td-225] fix bugs. --- src/client/src/tscAsync.c | 1 + src/client/src/tscServer.c | 5 ++++- src/client/src/tscUtil.c | 8 ++++---- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c index 2eff96bac9..4e05e1ce4c 100644 --- a/src/client/src/tscAsync.c +++ b/src/client/src/tscAsync.c @@ -168,6 +168,7 @@ static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRo if (pCmd->command != TSDB_SQL_RETRIEVE_LOCALMERGE && pCmd->command < TSDB_SQL_LOCAL) { pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH; } + tscProcessSql(pSql); } diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 1ae12aaf3d..564d5ae23f 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -339,7 +339,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcIpSet *pIpSet) { } if (rpcMsg->code != TSDB_CODE_TSC_ACTION_IN_PROGRESS) { - rpcMsg->code = (pRes->code == TSDB_CODE_SUCCESS) ? pRes->numOfRows: pRes->code; + rpcMsg->code = (pRes->code == TSDB_CODE_SUCCESS)? pRes->numOfRows: pRes->code; bool shouldFree = tscShouldBeFreed(pSql); (*pSql->fp)(pSql->param, pSql, rpcMsg->code); @@ -476,6 +476,8 @@ int tscBuildFetchMsg(SSqlObj *pSql, SSqlInfo *pInfo) { int32_t vgIndex = pTableMetaInfo->vgroupIndex; SVgroupsInfo* pVgroupInfo = pTableMetaInfo->vgroupList; + assert(pVgroupInfo->vgroups[vgIndex].vgId > 0 && vgIndex < pTableMetaInfo->vgroupList->numOfVgroups); + pRetrieveMsg->header.vgId = htonl(pVgroupInfo->vgroups[vgIndex].vgId); } else { STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; @@ -549,6 +551,7 @@ static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char assert(index >= 0); if (pTableMetaInfo->vgroupList->numOfVgroups > 0) { + assert(index < pTableMetaInfo->vgroupList->numOfVgroups); pVgroupInfo = &pTableMetaInfo->vgroupList->vgroups[index]; } tscDebug("%p query on stable, vgIndex:%d, numOfVgroups:%d", pSql, index, pTableMetaInfo->vgroupList->numOfVgroups); diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 6ec25aedf9..3e0fe0b4be 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -2008,7 +2008,7 @@ void tscTryQueryNextVnode(SSqlObj* pSql, __async_cb_func_t fp) { STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); int32_t totalVgroups = pTableMetaInfo->vgroupList->numOfVgroups; - while (++pTableMetaInfo->vgroupIndex < totalVgroups) { + if (++pTableMetaInfo->vgroupIndex < totalVgroups) { tscDebug("%p results from vgroup index:%d completed, try next:%d. total vgroups:%d. current numOfRes:%" PRId64, pSql, pTableMetaInfo->vgroupIndex - 1, pTableMetaInfo->vgroupIndex, totalVgroups, pRes->numOfClauseTotal); @@ -2044,9 +2044,9 @@ void tscTryQueryNextVnode(SSqlObj* pSql, __async_cb_func_t fp) { // set the callback function pSql->fp = fp; - if (tscProcessSql(pSql) != 0) { - break; - } + tscProcessSql(pSql); + } else { + tscDebug("%p try all %d vnodes, query complete. current numOfRes:%" PRId64, pSql, totalVgroups, pRes->numOfClauseTotal); } }