Merge remote-tracking branch 'origin/develop' into feature/crash_gen

This commit is contained in:
Steven Li 2020-07-14 21:46:10 +00:00
commit 2f8714e5cf
152 changed files with 4091 additions and 1300 deletions

View File

@ -51,18 +51,15 @@
# number of threads per CPU core # number of threads per CPU core
# numOfThreadsPerCore 1.0 # numOfThreadsPerCore 1.0
# number of vnodes per core in DNode # number of vgroups per db
# numOfVnodesPerCore 8 # maxVgroupsPerDb 0
# max number of tables per vnode
# maxTablesPerVnode 1000000
# the ratio of threads responsible for querying in the total thread # the ratio of threads responsible for querying in the total thread
# ratioOfQueryThreads 0.5 # ratioOfQueryThreads 0.5
# number of total vnodes in DNode
# numOfTotalVnodes 0
# max number of tables per vnode
# maxtablesPerVnode 1000
# interval of check load balance when the management node is in normal operation # interval of check load balance when the management node is in normal operation
# balanceInterval 300 # balanceInterval 300

View File

@ -415,7 +415,7 @@ int32_t tscToSQLCmd(SSqlObj *pSql, struct SSqlInfo *pInfo);
//void tscGetResultColumnChr(SSqlRes *pRes, SFieldInfo* pFieldInfo, int32_t column); //void tscGetResultColumnChr(SSqlRes *pRes, SFieldInfo* pFieldInfo, int32_t column);
static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pFieldInfo, int32_t columnIndex) { static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pFieldInfo, int32_t columnIndex) {
SFieldSupInfo* pInfo = TARRAY_GET_ELEM(pFieldInfo->pSupportInfo, columnIndex); SFieldSupInfo* pInfo = (SFieldSupInfo*) TARRAY_GET_ELEM(pFieldInfo->pSupportInfo, columnIndex);
assert(pInfo->pSqlExpr != NULL); assert(pInfo->pSqlExpr != NULL);
int32_t type = pInfo->pSqlExpr->resType; int32_t type = pInfo->pSqlExpr->resType;

View File

@ -43,8 +43,9 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, void (*fp)(), void* param, const
pSql->signature = pSql; pSql->signature = pSql;
pSql->param = param; pSql->param = param;
pSql->pTscObj = pObj; pSql->pTscObj = pObj;
pSql->maxRetry = TSDB_MAX_REPLICA_NUM; pSql->maxRetry = TSDB_MAX_REPLICA;
pSql->fp = fp; pSql->fp = fp;
pSql->fetchFp = fp;
pSql->sqlstr = calloc(1, sqlLen + 1); pSql->sqlstr = calloc(1, sqlLen + 1);
if (pSql->sqlstr == NULL) { if (pSql->sqlstr == NULL) {
@ -159,7 +160,7 @@ static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRo
pRes->code = numOfRows; pRes->code = numOfRows;
} }
tscQueueAsyncError(pSql->fetchFp, param, pRes->code); tscQueueAsyncRes(pSql);
return; return;
} }
@ -167,6 +168,7 @@ static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRo
if (pCmd->command != TSDB_SQL_RETRIEVE_LOCALMERGE && pCmd->command < TSDB_SQL_LOCAL) { if (pCmd->command != TSDB_SQL_RETRIEVE_LOCALMERGE && pCmd->command < TSDB_SQL_LOCAL) {
pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH; pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH;
} }
tscProcessSql(pSql); tscProcessSql(pSql);
} }
@ -196,6 +198,10 @@ void taos_fetch_rows_a(TAOS_RES *taosa, void (*fp)(void *, TAOS_RES *, int), voi
SSqlRes *pRes = &pSql->res; SSqlRes *pRes = &pSql->res;
SSqlCmd *pCmd = &pSql->cmd; SSqlCmd *pCmd = &pSql->cmd;
// user-defined callback function is stored in fetchFp
pSql->fetchFp = fp;
pSql->fp = tscAsyncFetchRowsProxy;
if (pRes->qhandle == 0) { if (pRes->qhandle == 0) {
tscError("qhandle is NULL"); tscError("qhandle is NULL");
pRes->code = TSDB_CODE_TSC_INVALID_QHANDLE; pRes->code = TSDB_CODE_TSC_INVALID_QHANDLE;
@ -203,10 +209,6 @@ void taos_fetch_rows_a(TAOS_RES *taosa, void (*fp)(void *, TAOS_RES *, int), voi
return; return;
} }
// user-defined callback function is stored in fetchFp
pSql->fetchFp = fp;
pSql->fp = tscAsyncFetchRowsProxy;
pSql->param = param; pSql->param = param;
tscResetForNextRetrieve(pRes); tscResetForNextRetrieve(pRes);
@ -346,31 +348,32 @@ void tscProcessFetchRow(SSchedMsg *pMsg) {
void tscProcessAsyncRes(SSchedMsg *pMsg) { void tscProcessAsyncRes(SSchedMsg *pMsg) {
SSqlObj *pSql = (SSqlObj *)pMsg->ahandle; SSqlObj *pSql = (SSqlObj *)pMsg->ahandle;
SSqlCmd *pCmd = &pSql->cmd; // SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res; SSqlRes *pRes = &pSql->res;
void *taosres = pSql; // void *taosres = pSql;
// pCmd may be released, so cache pCmd->command // pCmd may be released, so cache pCmd->command
int cmd = pCmd->command; // int cmd = pCmd->command;
int code = pRes->code; // int code = pRes->code;
// in case of async insert, restore the user specified callback function // in case of async insert, restore the user specified callback function
bool shouldFree = tscShouldBeFreed(pSql); // bool shouldFree = tscShouldBeFreed(pSql);
if (cmd == TSDB_SQL_INSERT) { // if (pCmd->command == TSDB_SQL_INSERT) {
assert(pSql->fp != NULL); // assert(pSql->fp != NULL);
assert(pSql->fp != NULL && pSql->fetchFp != NULL);
// }
// if (pSql->fp) {
pSql->fp = pSql->fetchFp; pSql->fp = pSql->fetchFp;
} (*pSql->fp)(pSql->param, pSql, pRes->code);
// }
if (pSql->fp) { // if (shouldFree) {
(*pSql->fp)(pSql->param, taosres, code); // tscDebug("%p sqlObj is automatically freed in async res", pSql);
} // tscFreeSqlObj(pSql);
// }
if (shouldFree) {
tscDebug("%p sqlObj is automatically freed in async res", pSql);
tscFreeSqlObj(pSql);
}
} }
static void tscProcessAsyncError(SSchedMsg *pMsg) { static void tscProcessAsyncError(SSchedMsg *pMsg) {
@ -420,14 +423,14 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
SSqlCmd *pCmd = &pSql->cmd; SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res; SSqlRes *pRes = &pSql->res;
pRes->code = code;
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
pRes->code = code; tscError("%p ge tableMeta failed, code:%s", pSql, tstrerror(code));
tscQueueAsyncRes(pSql); goto _error;
return; } else {
}
tscDebug("%p get tableMeta successfully", pSql); tscDebug("%p get tableMeta successfully", pSql);
}
if (pSql->pStream == NULL) { if (pSql->pStream == NULL) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
@ -453,11 +456,9 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
assert(pParObj->signature == pParObj && trs->subqueryIndex == pTableMetaInfo->vgroupIndex && assert(pParObj->signature == pParObj && trs->subqueryIndex == pTableMetaInfo->vgroupIndex &&
pTableMetaInfo->vgroupIndex >= 0 && pTableMetaInfo->vgroupList != NULL); pTableMetaInfo->vgroupIndex >= 0 && pTableMetaInfo->vgroupList != NULL);
if ((code = tscProcessSql(pSql)) == TSDB_CODE_SUCCESS) { // tscProcessSql can add error into async res
tscProcessSql(pSql);
return; return;
}
goto _error;
} else { // continue to process normal async query } else { // continue to process normal async query
if (pCmd->parseFinished) { if (pCmd->parseFinished) {
tscDebug("%p update table meta in local cache, continue to process sql and send corresponding query", pSql); tscDebug("%p update table meta in local cache, continue to process sql and send corresponding query", pSql);
@ -481,26 +482,21 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
return; return;
} else if (code != TSDB_CODE_SUCCESS) {
goto _error;
} }
if (code == TSDB_CODE_SUCCESS) {
/* /*
* Discard previous built submit blocks, and then parse the sql string again and build up all submit blocks, * Discard previous built submit blocks, and then parse the sql string again and build up all submit blocks,
* and send the required submit block according to index value in supporter to server. * and send the required submit block according to index value in supporter to server.
*/ */
pSql->fp = pSql->fetchFp; // restore the fp pSql->fp = pSql->fetchFp; // restore the fp
if ((code = tscHandleInsertRetry(pSql)) == TSDB_CODE_SUCCESS) { tscHandleInsertRetry(pSql);
return;
}
}
} else {// in case of other query type, continue } else {// in case of other query type, continue
if ((code = tscProcessSql(pSql)) == TSDB_CODE_SUCCESS) { tscProcessSql(pSql);
return;
}
} }
goto _error; return;
} else { } else {
tscDebug("%p continue parse sql after get table meta", pSql); tscDebug("%p continue parse sql after get table meta", pSql);
@ -538,7 +534,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
goto _error; goto _error;
} }
if (code == TSDB_CODE_SUCCESS && UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
code = tscGetSTableVgroupInfo(pSql, pCmd->clauseIndex); code = tscGetSTableVgroupInfo(pSql, pCmd->clauseIndex);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
return; return;

View File

@ -330,10 +330,6 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
bool stableQueryFunctChanged(int32_t funcId) {
return (aAggs[funcId].stableFuncId != funcId);
}
/** /**
* the numOfRes should be kept, since it may be used later * the numOfRes should be kept, since it may be used later
* and allow the ResultInfo to be re initialized * and allow the ResultInfo to be re initialized
@ -361,7 +357,6 @@ static bool function_setup(SQLFunctionCtx *pCtx) {
} }
memset(pCtx->aOutputBuf, 0, (size_t)pCtx->outputBytes); memset(pCtx->aOutputBuf, 0, (size_t)pCtx->outputBytes);
initResultInfo(pResInfo); initResultInfo(pResInfo);
return true; return true;
} }
@ -675,16 +670,16 @@ static void sum_func_second_merge(SQLFunctionCtx *pCtx) {
} }
} }
static int32_t precal_req_load_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) { static int32_t statisRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) {
return BLK_DATA_STATIS_NEEDED; return BLK_DATA_STATIS_NEEDED;
} }
static int32_t data_req_load_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) { static int32_t dataBlockRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) {
return BLK_DATA_ALL_NEEDED; return BLK_DATA_ALL_NEEDED;
} }
// todo: if column in current data block are null, opt for this case // todo: if column in current data block are null, opt for this case
static int32_t first_data_req_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) { static int32_t firstFuncRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) {
if (pCtx->order == TSDB_ORDER_DESC) { if (pCtx->order == TSDB_ORDER_DESC) {
return BLK_DATA_NO_NEEDED; return BLK_DATA_NO_NEEDED;
} }
@ -697,7 +692,7 @@ static int32_t first_data_req_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end,
} }
} }
static int32_t last_data_req_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) { static int32_t lastFuncRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) {
if (pCtx->order != pCtx->param[0].i64Key) { if (pCtx->order != pCtx->param[0].i64Key) {
return BLK_DATA_NO_NEEDED; return BLK_DATA_NO_NEEDED;
} }
@ -709,34 +704,40 @@ static int32_t last_data_req_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end,
} }
} }
static int32_t first_dist_data_req_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) { static int32_t firstDistFuncRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) {
if (pCtx->order == TSDB_ORDER_DESC) { if (pCtx->order == TSDB_ORDER_DESC) {
return BLK_DATA_NO_NEEDED; return BLK_DATA_NO_NEEDED;
} }
// result buffer has not been set yet. // not initialized yet, it is the first block, load it.
if (pCtx->aOutputBuf == NULL) {
return BLK_DATA_ALL_NEEDED; return BLK_DATA_ALL_NEEDED;
//todo optimize the filter info }
// SFirstLastInfo *pInfo = (SFirstLastInfo*) (pCtx->aOutputBuf + pCtx->inputBytes);
// if (pInfo->hasResult != DATA_SET_FLAG) { SFirstLastInfo *pInfo = (SFirstLastInfo*) (pCtx->aOutputBuf + pCtx->inputBytes);
// return BLK_DATA_ALL_NEEDED; if (pInfo->hasResult != DATA_SET_FLAG) {
// } else { // data in current block is not earlier than current result return BLK_DATA_ALL_NEEDED;
// return (pInfo->ts <= start) ? BLK_DATA_NO_NEEDED : BLK_DATA_ALL_NEEDED; } else { // data in current block is not earlier than current result
// } return (pInfo->ts <= start) ? BLK_DATA_NO_NEEDED : BLK_DATA_ALL_NEEDED;
}
} }
static int32_t last_dist_data_req_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) { static int32_t lastDistFuncRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) {
if (pCtx->order != pCtx->param[0].i64Key) { if (pCtx->order != pCtx->param[0].i64Key) {
return BLK_DATA_NO_NEEDED; return BLK_DATA_NO_NEEDED;
} }
// not initialized yet, it is the first block, load it.
if (pCtx->aOutputBuf == NULL) {
return BLK_DATA_ALL_NEEDED; return BLK_DATA_ALL_NEEDED;
// SFirstLastInfo *pInfo = (SFirstLastInfo*) (pCtx->aOutputBuf + pCtx->inputBytes); }
// if (pInfo->hasResult != DATA_SET_FLAG) {
// return BLK_DATA_ALL_NEEDED; SFirstLastInfo *pInfo = (SFirstLastInfo*) (pCtx->aOutputBuf + pCtx->inputBytes);
// } else { if (pInfo->hasResult != DATA_SET_FLAG) {
// return (pInfo->ts > end) ? BLK_DATA_NO_NEEDED : BLK_DATA_ALL_NEEDED; return BLK_DATA_ALL_NEEDED;
// } } else {
return (pInfo->ts > end) ? BLK_DATA_NO_NEEDED : BLK_DATA_ALL_NEEDED;
}
} }
////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////
@ -1549,6 +1550,8 @@ static void first_data_assign_impl(SQLFunctionCtx *pCtx, char *pData, int32_t in
* to decide if the value is earlier than current intermediate result * to decide if the value is earlier than current intermediate result
*/ */
static void first_dist_function(SQLFunctionCtx *pCtx) { static void first_dist_function(SQLFunctionCtx *pCtx) {
assert(pCtx->size > 0);
if (pCtx->size == 0) { if (pCtx->size == 0) {
return; return;
} }
@ -1564,6 +1567,11 @@ static void first_dist_function(SQLFunctionCtx *pCtx) {
int32_t notNullElems = 0; int32_t notNullElems = 0;
// data block is discard, not loaded, do not need to check it
if (!pCtx->preAggVals.dataBlockLoaded) {
return;
}
// find the first not null value // find the first not null value
for (int32_t i = 0; i < pCtx->size; ++i) { for (int32_t i = 0; i < pCtx->size; ++i) {
char *data = GET_INPUT_CHAR_INDEX(pCtx, i); char *data = GET_INPUT_CHAR_INDEX(pCtx, i);
@ -1584,10 +1592,6 @@ static void first_dist_function(SQLFunctionCtx *pCtx) {
} }
static void first_dist_function_f(SQLFunctionCtx *pCtx, int32_t index) { static void first_dist_function_f(SQLFunctionCtx *pCtx, int32_t index) {
if (pCtx->size == 0) {
return;
}
char *pData = GET_INPUT_CHAR_INDEX(pCtx, index); char *pData = GET_INPUT_CHAR_INDEX(pCtx, index);
if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
return; return;
@ -1715,10 +1719,6 @@ static void last_data_assign_impl(SQLFunctionCtx *pCtx, char *pData, int32_t ind
} }
static void last_dist_function(SQLFunctionCtx *pCtx) { static void last_dist_function(SQLFunctionCtx *pCtx) {
if (pCtx->size == 0) {
return;
}
/* /*
* 1. for scan data in asc order, no need to check data * 1. for scan data in asc order, no need to check data
* 2. for data blocks that are not loaded, no need to check data * 2. for data blocks that are not loaded, no need to check data
@ -1727,6 +1727,11 @@ static void last_dist_function(SQLFunctionCtx *pCtx) {
return; return;
} }
// data block is discard, not loaded, do not need to check it
if (!pCtx->preAggVals.dataBlockLoaded) {
return;
}
int32_t notNullElems = 0; int32_t notNullElems = 0;
for (int32_t i = pCtx->size - 1; i >= 0; --i) { for (int32_t i = pCtx->size - 1; i >= 0; --i) {
@ -2123,55 +2128,6 @@ static void copyTopBotRes(SQLFunctionCtx *pCtx, int32_t type) {
tfree(pData); tfree(pData);
} }
bool top_bot_datablock_filter(SQLFunctionCtx *pCtx, int32_t functionId, char *minval, char *maxval) {
STopBotInfo *pTopBotInfo = (STopBotInfo *)GET_RES_INFO(pCtx)->interResultBuf;
int32_t numOfExistsRes = pTopBotInfo->num;
// required number of results are not reached, continue load data block
if (numOfExistsRes < pCtx->param[0].i64Key) {
return true;
}
tValuePair *pRes = (tValuePair*) pTopBotInfo->res;
if (functionId == TSDB_FUNC_TOP) {
switch (pCtx->inputType) {
case TSDB_DATA_TYPE_TINYINT:
return GET_INT8_VAL(maxval) > pRes[0].v.i64Key;
case TSDB_DATA_TYPE_SMALLINT:
return GET_INT16_VAL(maxval) > pRes[0].v.i64Key;
case TSDB_DATA_TYPE_INT:
return GET_INT32_VAL(maxval) > pRes[0].v.i64Key;
case TSDB_DATA_TYPE_BIGINT:
return GET_INT64_VAL(maxval) > pRes[0].v.i64Key;
case TSDB_DATA_TYPE_FLOAT:
return GET_FLOAT_VAL(maxval) > pRes[0].v.dKey;
case TSDB_DATA_TYPE_DOUBLE:
return GET_DOUBLE_VAL(maxval) > pRes[0].v.dKey;
default:
return true;
}
} else {
switch (pCtx->inputType) {
case TSDB_DATA_TYPE_TINYINT:
return GET_INT8_VAL(minval) < pRes[0].v.i64Key;
case TSDB_DATA_TYPE_SMALLINT:
return GET_INT16_VAL(minval) < pRes[0].v.i64Key;
case TSDB_DATA_TYPE_INT:
return GET_INT32_VAL(minval) < pRes[0].v.i64Key;
case TSDB_DATA_TYPE_BIGINT:
return GET_INT64_VAL(minval) < pRes[0].v.i64Key;
case TSDB_DATA_TYPE_FLOAT:
return GET_FLOAT_VAL(minval) < pRes[0].v.dKey;
case TSDB_DATA_TYPE_DOUBLE:
return GET_DOUBLE_VAL(minval) < pRes[0].v.dKey;
default:
return true;
}
}
}
/* /*
* Parameters values: * Parameters values:
* 1. param[0]: maximum allowable results * 1. param[0]: maximum allowable results
@ -2191,6 +2147,53 @@ static STopBotInfo *getTopBotOutputInfo(SQLFunctionCtx *pCtx) {
} }
} }
bool topbot_datablock_filter(SQLFunctionCtx *pCtx, int32_t functionId, const char *minval, const char *maxval) {
STopBotInfo *pTopBotInfo = getTopBotOutputInfo(pCtx);
// required number of results are not reached, continue load data block
if (pTopBotInfo->num < pCtx->param[0].i64Key) {
return true;
}
tValuePair **pRes = (tValuePair**) pTopBotInfo->res;
if (functionId == TSDB_FUNC_TOP) {
switch (pCtx->inputType) {
case TSDB_DATA_TYPE_TINYINT:
return GET_INT8_VAL(maxval) > pRes[0]->v.i64Key;
case TSDB_DATA_TYPE_SMALLINT:
return GET_INT16_VAL(maxval) > pRes[0]->v.i64Key;
case TSDB_DATA_TYPE_INT:
return GET_INT32_VAL(maxval) > pRes[0]->v.i64Key;
case TSDB_DATA_TYPE_BIGINT:
return GET_INT64_VAL(maxval) > pRes[0]->v.i64Key;
case TSDB_DATA_TYPE_FLOAT:
return GET_FLOAT_VAL(maxval) > pRes[0]->v.dKey;
case TSDB_DATA_TYPE_DOUBLE:
return GET_DOUBLE_VAL(maxval) > pRes[0]->v.dKey;
default:
return true;
}
} else {
switch (pCtx->inputType) {
case TSDB_DATA_TYPE_TINYINT:
return GET_INT8_VAL(minval) < pRes[0]->v.i64Key;
case TSDB_DATA_TYPE_SMALLINT:
return GET_INT16_VAL(minval) < pRes[0]->v.i64Key;
case TSDB_DATA_TYPE_INT:
return GET_INT32_VAL(minval) < pRes[0]->v.i64Key;
case TSDB_DATA_TYPE_BIGINT:
return GET_INT64_VAL(minval) < pRes[0]->v.i64Key;
case TSDB_DATA_TYPE_FLOAT:
return GET_FLOAT_VAL(minval) < pRes[0]->v.dKey;
case TSDB_DATA_TYPE_DOUBLE:
return GET_DOUBLE_VAL(minval) < pRes[0]->v.dKey;
default:
return true;
}
}
}
/* /*
* keep the intermediate results during scan data blocks in the format of: * keep the intermediate results during scan data blocks in the format of:
* +-----------------------------------+-------------one value pair-----------+------------next value pair-----------+ * +-----------------------------------+-------------one value pair-----------+------------next value pair-----------+
@ -3376,7 +3379,7 @@ static void spread_function(SQLFunctionCtx *pCtx) {
SResultInfo *pResInfo = GET_RES_INFO(pCtx); SResultInfo *pResInfo = GET_RES_INFO(pCtx);
SSpreadInfo *pInfo = pResInfo->interResultBuf; SSpreadInfo *pInfo = pResInfo->interResultBuf;
int32_t numOfElems = pCtx->size; int32_t numOfElems = 0;
// todo : opt with pre-calculated result // todo : opt with pre-calculated result
// column missing cause the hasNull to be true // column missing cause the hasNull to be true
@ -4412,7 +4415,7 @@ static void sumrate_finalizer(SQLFunctionCtx *pCtx) {
* e.g., count/sum/avg/min/max/stddev/percentile/apercentile/first/last... * e.g., count/sum/avg/min/max/stddev/percentile/apercentile/first/last...
* *
*/ */
int32_t funcCompatDefList[] = { int32_t functionCompatList[] = {
// count, sum, avg, min, max, stddev, percentile, apercentile, first, last // count, sum, avg, min, max, stddev, percentile, apercentile, first, last
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
// last_row, top, bottom, spread, twa, leastsqr, ts, ts_dummy, tag_dummy, ts_z // last_row, top, bottom, spread, twa, leastsqr, ts, ts_dummy, tag_dummy, ts_z
@ -4451,7 +4454,7 @@ SQLAggFuncElem aAggs[] = {{
function_finalizer, function_finalizer,
sum_func_merge, sum_func_merge,
sum_func_second_merge, sum_func_second_merge,
precal_req_load_info, statisRequired,
}, },
{ {
// 2 // 2
@ -4466,7 +4469,7 @@ SQLAggFuncElem aAggs[] = {{
avg_finalizer, avg_finalizer,
avg_func_merge, avg_func_merge,
avg_func_second_merge, avg_func_second_merge,
precal_req_load_info, statisRequired,
}, },
{ {
// 3 // 3
@ -4481,7 +4484,7 @@ SQLAggFuncElem aAggs[] = {{
function_finalizer, function_finalizer,
min_func_merge, min_func_merge,
min_func_second_merge, min_func_second_merge,
precal_req_load_info, statisRequired,
}, },
{ {
// 4 // 4
@ -4496,7 +4499,7 @@ SQLAggFuncElem aAggs[] = {{
function_finalizer, function_finalizer,
max_func_merge, max_func_merge,
max_func_second_merge, max_func_second_merge,
precal_req_load_info, statisRequired,
}, },
{ {
// 5 // 5
@ -4511,7 +4514,7 @@ SQLAggFuncElem aAggs[] = {{
stddev_finalizer, stddev_finalizer,
noop1, noop1,
noop1, noop1,
data_req_load_info, dataBlockRequired,
}, },
{ {
// 6 // 6
@ -4526,7 +4529,7 @@ SQLAggFuncElem aAggs[] = {{
percentile_finalizer, percentile_finalizer,
noop1, noop1,
noop1, noop1,
data_req_load_info, dataBlockRequired,
}, },
{ {
// 7 // 7
@ -4541,7 +4544,7 @@ SQLAggFuncElem aAggs[] = {{
apercentile_finalizer, apercentile_finalizer,
apercentile_func_merge, apercentile_func_merge,
apercentile_func_second_merge, apercentile_func_second_merge,
data_req_load_info, dataBlockRequired,
}, },
{ {
// 8 // 8
@ -4556,7 +4559,7 @@ SQLAggFuncElem aAggs[] = {{
function_finalizer, function_finalizer,
noop1, noop1,
noop1, noop1,
first_data_req_info, firstFuncRequired,
}, },
{ {
// 9 // 9
@ -4571,7 +4574,7 @@ SQLAggFuncElem aAggs[] = {{
function_finalizer, function_finalizer,
noop1, noop1,
noop1, noop1,
last_data_req_info, lastFuncRequired,
}, },
{ {
// 10 // 10
@ -4587,7 +4590,7 @@ SQLAggFuncElem aAggs[] = {{
last_row_finalizer, last_row_finalizer,
noop1, noop1,
last_dist_func_second_merge, last_dist_func_second_merge,
data_req_load_info, dataBlockRequired,
}, },
{ {
// 11 // 11
@ -4603,7 +4606,7 @@ SQLAggFuncElem aAggs[] = {{
top_bottom_func_finalizer, top_bottom_func_finalizer,
top_func_merge, top_func_merge,
top_func_second_merge, top_func_second_merge,
data_req_load_info, dataBlockRequired,
}, },
{ {
// 12 // 12
@ -4619,7 +4622,7 @@ SQLAggFuncElem aAggs[] = {{
top_bottom_func_finalizer, top_bottom_func_finalizer,
bottom_func_merge, bottom_func_merge,
bottom_func_second_merge, bottom_func_second_merge,
data_req_load_info, dataBlockRequired,
}, },
{ {
// 13 // 13
@ -4649,7 +4652,7 @@ SQLAggFuncElem aAggs[] = {{
twa_function_finalizer, twa_function_finalizer,
twa_func_merge, twa_func_merge,
twa_function_copy, twa_function_copy,
data_req_load_info, dataBlockRequired,
}, },
{ {
// 15 // 15
@ -4664,7 +4667,7 @@ SQLAggFuncElem aAggs[] = {{
leastsquares_finalizer, leastsquares_finalizer,
noop1, noop1,
noop1, noop1,
data_req_load_info, dataBlockRequired,
}, },
{ {
// 16 // 16
@ -4694,7 +4697,7 @@ SQLAggFuncElem aAggs[] = {{
doFinalizer, doFinalizer,
copy_function, copy_function,
copy_function, copy_function,
data_req_load_info, dataBlockRequired,
}, },
{ {
// 18 // 18
@ -4724,7 +4727,7 @@ SQLAggFuncElem aAggs[] = {{
ts_comp_finalize, ts_comp_finalize,
copy_function, copy_function,
copy_function, copy_function,
data_req_load_info, dataBlockRequired,
}, },
{ {
// 20 // 20
@ -4754,7 +4757,7 @@ SQLAggFuncElem aAggs[] = {{
doFinalizer, doFinalizer,
copy_function, copy_function,
copy_function, copy_function,
data_req_load_info, dataBlockRequired,
}, },
{ {
// 22, multi-output, tag function has only one result // 22, multi-output, tag function has only one result
@ -4784,7 +4787,7 @@ SQLAggFuncElem aAggs[] = {{
doFinalizer, doFinalizer,
copy_function, copy_function,
copy_function, copy_function,
data_req_load_info, dataBlockRequired,
}, },
{ {
// 24 // 24
@ -4799,7 +4802,7 @@ SQLAggFuncElem aAggs[] = {{
doFinalizer, doFinalizer,
noop1, noop1,
noop1, noop1,
data_req_load_info, dataBlockRequired,
}, },
// distributed version used in two-stage aggregation processes // distributed version used in two-stage aggregation processes
{ {
@ -4815,7 +4818,7 @@ SQLAggFuncElem aAggs[] = {{
function_finalizer, function_finalizer,
first_dist_func_merge, first_dist_func_merge,
first_dist_func_second_merge, first_dist_func_second_merge,
first_dist_data_req_info, firstDistFuncRequired,
}, },
{ {
// 26 // 26
@ -4830,7 +4833,7 @@ SQLAggFuncElem aAggs[] = {{
function_finalizer, function_finalizer,
last_dist_func_merge, last_dist_func_merge,
last_dist_func_second_merge, last_dist_func_second_merge,
last_dist_data_req_info, lastDistFuncRequired,
}, },
{ {
// 27 // 27
@ -4845,7 +4848,7 @@ SQLAggFuncElem aAggs[] = {{
doFinalizer, doFinalizer,
noop1, noop1,
copy_function, copy_function,
data_req_load_info, dataBlockRequired,
}, },
{ {
// 28 // 28
@ -4860,7 +4863,7 @@ SQLAggFuncElem aAggs[] = {{
rate_finalizer, rate_finalizer,
rate_func_merge, rate_func_merge,
rate_func_copy, rate_func_copy,
data_req_load_info, dataBlockRequired,
}, },
{ {
// 29 // 29
@ -4875,7 +4878,7 @@ SQLAggFuncElem aAggs[] = {{
rate_finalizer, rate_finalizer,
rate_func_merge, rate_func_merge,
rate_func_copy, rate_func_copy,
data_req_load_info, dataBlockRequired,
}, },
{ {
// 30 // 30
@ -4890,7 +4893,7 @@ SQLAggFuncElem aAggs[] = {{
sumrate_finalizer, sumrate_finalizer,
sumrate_func_merge, sumrate_func_merge,
sumrate_func_second_merge, sumrate_func_second_merge,
data_req_load_info, dataBlockRequired,
}, },
{ {
// 31 // 31
@ -4905,7 +4908,7 @@ SQLAggFuncElem aAggs[] = {{
sumrate_finalizer, sumrate_finalizer,
sumrate_func_merge, sumrate_func_merge,
sumrate_func_second_merge, sumrate_func_second_merge,
data_req_load_info, dataBlockRequired,
}, },
{ {
// 32 // 32
@ -4920,7 +4923,7 @@ SQLAggFuncElem aAggs[] = {{
sumrate_finalizer, sumrate_finalizer,
sumrate_func_merge, sumrate_func_merge,
sumrate_func_second_merge, sumrate_func_second_merge,
data_req_load_info, dataBlockRequired,
}, },
{ {
// 33 // 33
@ -4935,7 +4938,7 @@ SQLAggFuncElem aAggs[] = {{
sumrate_finalizer, sumrate_finalizer,
sumrate_func_merge, sumrate_func_merge,
sumrate_func_second_merge, sumrate_func_second_merge,
data_req_load_info, dataBlockRequired,
}, },
{ {
// 34 // 34
@ -4950,5 +4953,5 @@ SQLAggFuncElem aAggs[] = {{
noop1, noop1,
noop1, noop1,
noop1, noop1,
data_req_load_info, dataBlockRequired,
}}; }};

View File

@ -497,7 +497,7 @@ TAOS_STMT* taos_stmt_init(TAOS* taos) {
tsem_init(&pSql->rspSem, 0, 0); tsem_init(&pSql->rspSem, 0, 0);
pSql->signature = pSql; pSql->signature = pSql;
pSql->pTscObj = pObj; pSql->pTscObj = pObj;
pSql->maxRetry = TSDB_MAX_REPLICA_NUM; pSql->maxRetry = TSDB_MAX_REPLICA;
pStmt->pSql = pSql; pStmt->pSql = pSql;
return pStmt; return pStmt;

View File

@ -2471,7 +2471,7 @@ static bool functionCompatibleCheck(SQueryInfo* pQueryInfo) {
startIdx++; startIdx++;
} }
int32_t factor = funcCompatDefList[tscSqlExprGet(pQueryInfo, startIdx)->functionId]; int32_t factor = functionCompatList[tscSqlExprGet(pQueryInfo, startIdx)->functionId];
// diff function cannot be executed with other function // diff function cannot be executed with other function
// arithmetic function can be executed with other arithmetic functions // arithmetic function can be executed with other arithmetic functions
@ -2489,7 +2489,7 @@ static bool functionCompatibleCheck(SQueryInfo* pQueryInfo) {
continue; continue;
} }
if (funcCompatDefList[functionId] != factor) { if (functionCompatList[functionId] != factor) {
return false; return false;
} }
} }
@ -5489,9 +5489,9 @@ int32_t tscCheckCreateDbParams(SSqlCmd* pCmd, SCMCreateDbMsg* pCreate) {
} }
if (pCreate->replications != -1 && if (pCreate->replications != -1 &&
(pCreate->replications < TSDB_MIN_REPLICA_NUM || pCreate->replications > TSDB_MAX_REPLICA_NUM)) { (pCreate->replications < TSDB_MIN_DB_REPLICA_OPTION || pCreate->replications > TSDB_MAX_DB_REPLICA_OPTION)) {
snprintf(msg, tListLen(msg), "invalid db option replications: %d valid range: [%d, %d]", pCreate->replications, snprintf(msg, tListLen(msg), "invalid db option replications: %d valid range: [%d, %d]", pCreate->replications,
TSDB_MIN_REPLICA_NUM, TSDB_MAX_REPLICA_NUM); TSDB_MIN_DB_REPLICA_OPTION, TSDB_MAX_DB_REPLICA_OPTION);
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
} }

View File

@ -339,7 +339,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcIpSet *pIpSet) {
} }
if (rpcMsg->code != TSDB_CODE_TSC_ACTION_IN_PROGRESS) { if (rpcMsg->code != TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
rpcMsg->code = (pRes->code == TSDB_CODE_SUCCESS) ? pRes->numOfRows: pRes->code; rpcMsg->code = (pRes->code == TSDB_CODE_SUCCESS)? pRes->numOfRows: pRes->code;
bool shouldFree = tscShouldBeFreed(pSql); bool shouldFree = tscShouldBeFreed(pSql);
(*pSql->fp)(pSql->param, pSql, rpcMsg->code); (*pSql->fp)(pSql->param, pSql, rpcMsg->code);
@ -412,7 +412,7 @@ int tscProcessSql(SSqlObj *pSql) {
return pSql->res.code; return pSql->res.code;
} }
} else if (pCmd->command < TSDB_SQL_LOCAL) { } else if (pCmd->command < TSDB_SQL_LOCAL) {
pSql->ipList = tscMgmtIpSet; //? pSql->ipList = tscMgmtIpSet;
} else { // local handler } else { // local handler
return (*tscProcessMsgRsp[pCmd->command])(pSql); return (*tscProcessMsgRsp[pCmd->command])(pSql);
} }
@ -476,6 +476,8 @@ int tscBuildFetchMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
int32_t vgIndex = pTableMetaInfo->vgroupIndex; int32_t vgIndex = pTableMetaInfo->vgroupIndex;
SVgroupsInfo* pVgroupInfo = pTableMetaInfo->vgroupList; SVgroupsInfo* pVgroupInfo = pTableMetaInfo->vgroupList;
assert(pVgroupInfo->vgroups[vgIndex].vgId > 0 && vgIndex < pTableMetaInfo->vgroupList->numOfVgroups);
pRetrieveMsg->header.vgId = htonl(pVgroupInfo->vgroups[vgIndex].vgId); pRetrieveMsg->header.vgId = htonl(pVgroupInfo->vgroups[vgIndex].vgId);
} else { } else {
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
@ -549,6 +551,7 @@ static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char
assert(index >= 0); assert(index >= 0);
if (pTableMetaInfo->vgroupList->numOfVgroups > 0) { if (pTableMetaInfo->vgroupList->numOfVgroups > 0) {
assert(index < pTableMetaInfo->vgroupList->numOfVgroups);
pVgroupInfo = &pTableMetaInfo->vgroupList->vgroups[index]; pVgroupInfo = &pTableMetaInfo->vgroupList->vgroups[index];
} }
tscDebug("%p query on stable, vgIndex:%d, numOfVgroups:%d", pSql, index, pTableMetaInfo->vgroupList->numOfVgroups); tscDebug("%p query on stable, vgIndex:%d, numOfVgroups:%d", pSql, index, pTableMetaInfo->vgroupList->numOfVgroups);
@ -1372,7 +1375,6 @@ static int tscLocalResultCommonBuilder(SSqlObj *pSql, int32_t numOfRes) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
pRes->code = TSDB_CODE_SUCCESS; pRes->code = TSDB_CODE_SUCCESS;
if (pRes->rspType == 0) { if (pRes->rspType == 0) {
pRes->numOfRows = numOfRes; pRes->numOfRows = numOfRes;
pRes->row = 0; pRes->row = 0;

View File

@ -113,7 +113,7 @@ SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pass, con
pSql->pTscObj = pObj; pSql->pTscObj = pObj;
pSql->signature = pSql; pSql->signature = pSql;
pSql->maxRetry = TSDB_MAX_REPLICA_NUM; pSql->maxRetry = TSDB_MAX_REPLICA;
tsem_init(&pSql->rspSem, 0, 0); tsem_init(&pSql->rspSem, 0, 0);
pObj->pDnodeConn = pDnodeConn; pObj->pDnodeConn = pDnodeConn;
@ -484,22 +484,11 @@ static bool tscFreeQhandleInVnode(SSqlObj* pSql) {
pCmd->command == TSDB_SQL_SHOW || pCmd->command == TSDB_SQL_SHOW ||
pCmd->command == TSDB_SQL_RETRIEVE || pCmd->command == TSDB_SQL_RETRIEVE ||
pCmd->command == TSDB_SQL_FETCH) && pCmd->command == TSDB_SQL_FETCH) &&
(pCmd->command == TSDB_SQL_SELECT && pSql->pStream == NULL && pTableMetaInfo->pTableMeta != NULL)) { (pSql->pStream == NULL && pTableMetaInfo->pTableMeta != NULL)) {
pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH; pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH;
tscDebug("%p send msg to dnode to free qhandle ASAP, command:%s", pSql, sqlCmd[pCmd->command]); tscDebug("%p send msg to dnode to free qhandle ASAP, command:%s, ", pSql, sqlCmd[pCmd->command]);
tscProcessSql(pSql); tscProcessSql(pSql);
// in case of sync model query, waits for response and then goes on
// if (pSql->fp == waitForQueryRsp || pSql->fp == waitForRetrieveRsp) {
// sem_wait(&pSql->rspSem);
// tscFreeSqlObj(pSql);
// tscDebug("%p sqlObj is freed by app", pSql);
// } else {
tscDebug("%p sqlObj will be freed while rsp received", pSql);
// }
return true; return true;
} }

View File

@ -107,7 +107,7 @@ static SSub* tscCreateSubscription(STscObj* pObj, const char* topic, const char*
pSql->signature = pSql; pSql->signature = pSql;
pSql->param = pSql; pSql->param = pSql;
pSql->pTscObj = pObj; pSql->pTscObj = pObj;
pSql->maxRetry = TSDB_MAX_REPLICA_NUM; pSql->maxRetry = TSDB_MAX_REPLICA;
pSql->fp = asyncCallback; pSql->fp = asyncCallback;
int code = tscAllocPayload(pCmd, TSDB_DEFAULT_PAYLOAD_SIZE); int code = tscAllocPayload(pCmd, TSDB_DEFAULT_PAYLOAD_SIZE);

View File

@ -550,7 +550,7 @@ static bool checkForDuplicateTagVal(SQueryInfo* pQueryInfo, SJoinSupporter* p1,
return true; return true;
} }
static void getIntersectionOfTableTuple(SQueryInfo* pQueryInfo, SSqlObj* pParentSql, SArray** s1, SArray** s2) { static int32_t getIntersectionOfTableTuple(SQueryInfo* pQueryInfo, SSqlObj* pParentSql, SArray** s1, SArray** s2) {
tscDebug("%p all subqueries retrieve <tid, tags> complete, do tags match", pParentSql); tscDebug("%p all subqueries retrieve <tid, tags> complete, do tags match", pParentSql);
SJoinSupporter* p1 = pParentSql->pSubs[0]->param; SJoinSupporter* p1 = pParentSql->pSubs[0]->param;
@ -568,10 +568,7 @@ static void getIntersectionOfTableTuple(SQueryInfo* pQueryInfo, SSqlObj* pParent
*s2 = taosArrayInit(p2->num, p2->tagSize); *s2 = taosArrayInit(p2->num, p2->tagSize);
if (!(checkForDuplicateTagVal(pQueryInfo, p1, pParentSql) && checkForDuplicateTagVal(pQueryInfo, p2, pParentSql))) { if (!(checkForDuplicateTagVal(pQueryInfo, p1, pParentSql) && checkForDuplicateTagVal(pQueryInfo, p2, pParentSql))) {
freeJoinSubqueryObj(pParentSql); return TSDB_CODE_QRY_DUP_JOIN_KEY;
pParentSql->res.code = TSDB_CODE_QRY_DUP_JOIN_KEY;
tscQueueAsyncRes(pParentSql);
return;
} }
int32_t i = 0, j = 0; int32_t i = 0, j = 0;
@ -594,6 +591,8 @@ static void getIntersectionOfTableTuple(SQueryInfo* pQueryInfo, SSqlObj* pParent
i++; i++;
} }
} }
return TSDB_CODE_SUCCESS;
} }
static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRows) { static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRows) {
@ -680,7 +679,14 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
} }
SArray *s1 = NULL, *s2 = NULL; SArray *s1 = NULL, *s2 = NULL;
getIntersectionOfTableTuple(pQueryInfo, pParentSql, &s1, &s2); int32_t code = getIntersectionOfTableTuple(pQueryInfo, pParentSql, &s1, &s2);
if (code != TSDB_CODE_SUCCESS) {
freeJoinSubqueryObj(pParentSql);
pParentSql->res.code = code;
tscQueueAsyncRes(pParentSql);
return;
}
if (taosArrayGetSize(s1) == 0 || taosArrayGetSize(s2) == 0) { // no results,return. if (taosArrayGetSize(s1) == 0 || taosArrayGetSize(s2) == 0) { // no results,return.
tscDebug("%p tag intersect does not generated qualified tables for join, free all sub SqlObj and quit", pParentSql); tscDebug("%p tag intersect does not generated qualified tables for join, free all sub SqlObj and quit", pParentSql);
freeJoinSubqueryObj(pParentSql); freeJoinSubqueryObj(pParentSql);
@ -1778,7 +1784,6 @@ static SSqlObj *tscCreateSqlObjForSubquery(SSqlObj *pSql, SRetrieveSupport *trsu
pSql->pSubs[trsupport->subqueryIndex] = pNew; pSql->pSubs[trsupport->subqueryIndex] = pNew;
} }
printf("------------alloc:%p\n", pNew);
return pNew; return pNew;
} }
@ -1890,9 +1895,11 @@ int32_t tscHandleInsertRetry(SSqlObj* pSql) {
assert(pSupporter->index < pSupporter->pState->numOfTotal); assert(pSupporter->index < pSupporter->pState->numOfTotal);
STableDataBlocks* pTableDataBlock = taosArrayGetP(pCmd->pDataBlocks, pSupporter->index); STableDataBlocks* pTableDataBlock = taosArrayGetP(pCmd->pDataBlocks, pSupporter->index);
pRes->code = tscCopyDataBlockToPayload(pSql, pTableDataBlock); int32_t code = tscCopyDataBlockToPayload(pSql, pTableDataBlock);
if (pRes->code != TSDB_CODE_SUCCESS) {
return pRes->code; if ((pRes->code = code)!= TSDB_CODE_SUCCESS) {
tscQueueAsyncRes(pSql);
return code; // here the pSql may have been released already.
} }
return tscProcessSql(pSql); return tscProcessSql(pSql);

View File

@ -1648,8 +1648,9 @@ SSqlObj* createSimpleSubObj(SSqlObj* pSql, void (*fp)(), void* param, int32_t cm
} }
pNew->fp = fp; pNew->fp = fp;
pNew->fetchFp = fp;
pNew->param = param; pNew->param = param;
pNew->maxRetry = TSDB_MAX_REPLICA_NUM; pNew->maxRetry = TSDB_MAX_REPLICA;
pNew->sqlstr = strdup(pSql->sqlstr); pNew->sqlstr = strdup(pSql->sqlstr);
if (pNew->sqlstr == NULL) { if (pNew->sqlstr == NULL) {
@ -1803,8 +1804,10 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
} }
pNew->fp = fp; pNew->fp = fp;
pNew->fetchFp = fp;
pNew->param = param; pNew->param = param;
pNew->maxRetry = TSDB_MAX_REPLICA_NUM; pNew->maxRetry = TSDB_MAX_REPLICA;
char* name = pTableMetaInfo->name; char* name = pTableMetaInfo->name;
STableMetaInfo* pFinalInfo = NULL; STableMetaInfo* pFinalInfo = NULL;
@ -2005,7 +2008,7 @@ void tscTryQueryNextVnode(SSqlObj* pSql, __async_cb_func_t fp) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
int32_t totalVgroups = pTableMetaInfo->vgroupList->numOfVgroups; int32_t totalVgroups = pTableMetaInfo->vgroupList->numOfVgroups;
while (++pTableMetaInfo->vgroupIndex < totalVgroups) { if (++pTableMetaInfo->vgroupIndex < totalVgroups) {
tscDebug("%p results from vgroup index:%d completed, try next:%d. total vgroups:%d. current numOfRes:%" PRId64, pSql, tscDebug("%p results from vgroup index:%d completed, try next:%d. total vgroups:%d. current numOfRes:%" PRId64, pSql,
pTableMetaInfo->vgroupIndex - 1, pTableMetaInfo->vgroupIndex, totalVgroups, pRes->numOfClauseTotal); pTableMetaInfo->vgroupIndex - 1, pTableMetaInfo->vgroupIndex, totalVgroups, pRes->numOfClauseTotal);
@ -2041,11 +2044,9 @@ void tscTryQueryNextVnode(SSqlObj* pSql, __async_cb_func_t fp) {
// set the callback function // set the callback function
pSql->fp = fp; pSql->fp = fp;
int32_t ret = tscProcessSql(pSql); tscProcessSql(pSql);
if (ret == TSDB_CODE_SUCCESS) { } else {
return; tscDebug("%p try all %d vnodes, query complete. current numOfRes:%" PRId64, pSql, totalVgroups, pRes->numOfClauseTotal);
} else {// todo check for failure
}
} }
} }

View File

@ -68,7 +68,10 @@ extern int64_t tsMaxRetentWindow;
// db parameters in client // db parameters in client
extern int32_t tsCacheBlockSize; extern int32_t tsCacheBlockSize;
extern int32_t tsBlocksPerVnode; extern int32_t tsBlocksPerVnode;
extern int32_t tsMinTablePerVnode;
extern int32_t tsMaxTablePerVnode; extern int32_t tsMaxTablePerVnode;
extern int32_t tsTableIncStepPerVnode;
extern int32_t tsMaxVgroupsPerDb;
extern int16_t tsDaysPerFile; extern int16_t tsDaysPerFile;
extern int32_t tsDaysToKeep; extern int32_t tsDaysToKeep;
extern int32_t tsMinRowsInFileBlock; extern int32_t tsMinRowsInFileBlock;

View File

@ -38,7 +38,7 @@ uint16_t tsDnodeShellPort = 6030; // udp[6035-6039] tcp[6035]
uint16_t tsDnodeDnodePort = 6035; // udp/tcp uint16_t tsDnodeDnodePort = 6035; // udp/tcp
uint16_t tsSyncPort = 6040; uint16_t tsSyncPort = 6040;
int32_t tsStatusInterval = 1; // second int32_t tsStatusInterval = 1; // second
int16_t tsNumOfVnodesPerCore = 8; int16_t tsNumOfVnodesPerCore = 32;
int16_t tsNumOfTotalVnodes = TSDB_INVALID_VNODE_NUM; int16_t tsNumOfTotalVnodes = TSDB_INVALID_VNODE_NUM;
int32_t tsNumOfMnodes = 3; int32_t tsNumOfMnodes = 3;
int32_t tsEnableVnodeBak = 1; int32_t tsEnableVnodeBak = 1;
@ -110,13 +110,11 @@ int16_t tsCommitTime = TSDB_DEFAULT_COMMIT_TIME; // seconds
int32_t tsTimePrecision = TSDB_DEFAULT_PRECISION; int32_t tsTimePrecision = TSDB_DEFAULT_PRECISION;
int16_t tsCompression = TSDB_DEFAULT_COMP_LEVEL; int16_t tsCompression = TSDB_DEFAULT_COMP_LEVEL;
int16_t tsWAL = TSDB_DEFAULT_WAL_LEVEL; int16_t tsWAL = TSDB_DEFAULT_WAL_LEVEL;
int32_t tsReplications = TSDB_DEFAULT_REPLICA_NUM; int32_t tsReplications = TSDB_DEFAULT_DB_REPLICA_OPTION;
int32_t tsMaxVgroupsPerDb = 0;
#ifdef _TD_ARM_32_ int32_t tsMinTablePerVnode = 100;
int32_t tsMaxTablePerVnode = 100; int32_t tsMaxTablePerVnode = TSDB_DEFAULT_TABLES;
#else int32_t tsTableIncStepPerVnode = TSDB_TABLES_STEP;
int32_t tsMaxTablePerVnode = TSDB_DEFAULT_TABLES;
#endif
// balance // balance
int32_t tsEnableBalance = 1; int32_t tsEnableBalance = 1;
@ -394,16 +392,6 @@ static void doInitGlobalConfig() {
cfg.unitType = TAOS_CFG_UTYPE_NONE; cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg); taosInitConfigOption(cfg);
cfg.option = "numOfVnodesPerCore";
cfg.ptr = &tsNumOfVnodesPerCore;
cfg.valType = TAOS_CFG_VTYPE_INT16;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = 1;
cfg.maxValue = 64;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "numOfTotalVnodes"; cfg.option = "numOfTotalVnodes";
cfg.ptr = &tsNumOfTotalVnodes; cfg.ptr = &tsNumOfTotalVnodes;
cfg.valType = TAOS_CFG_VTYPE_INT16; cfg.valType = TAOS_CFG_VTYPE_INT16;
@ -606,8 +594,18 @@ static void doInitGlobalConfig() {
cfg.unitType = TAOS_CFG_UTYPE_NONE; cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg); taosInitConfigOption(cfg);
cfg.option = "maxVgroupsPerDb";
cfg.ptr = &tsMaxVgroupsPerDb;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = 0;
cfg.maxValue = 8192;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
// database configs // database configs
cfg.option = "maxtablesPerVnode"; cfg.option = "maxTablesPerVnode";
cfg.ptr = &tsMaxTablePerVnode; cfg.ptr = &tsMaxTablePerVnode;
cfg.valType = TAOS_CFG_VTYPE_INT32; cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW; cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
@ -617,6 +615,26 @@ static void doInitGlobalConfig() {
cfg.unitType = TAOS_CFG_UTYPE_NONE; cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg); taosInitConfigOption(cfg);
cfg.option = "minTablesPerVnode";
cfg.ptr = &tsMinTablePerVnode;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = TSDB_MIN_TABLES;
cfg.maxValue = TSDB_MAX_TABLES;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "tableIncStepPerVnode";
cfg.ptr = &tsTableIncStepPerVnode;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = TSDB_MIN_TABLES;
cfg.maxValue = TSDB_MAX_TABLES;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "cache"; cfg.option = "cache";
cfg.ptr = &tsCacheBlockSize; cfg.ptr = &tsCacheBlockSize;
cfg.valType = TAOS_CFG_VTYPE_INT32; cfg.valType = TAOS_CFG_VTYPE_INT32;
@ -701,8 +719,8 @@ static void doInitGlobalConfig() {
cfg.ptr = &tsReplications; cfg.ptr = &tsReplications;
cfg.valType = TAOS_CFG_VTYPE_INT32; cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW; cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = TSDB_MIN_REPLICA_NUM; cfg.minValue = TSDB_MIN_DB_REPLICA_OPTION;
cfg.maxValue = TSDB_MAX_REPLICA_NUM; cfg.maxValue = TSDB_MAX_DB_REPLICA_OPTION;
cfg.ptrLength = 0; cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE; cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg); taosInitConfigOption(cfg);

View File

@ -698,10 +698,12 @@ static void dnodeSendStatusMsg(void *handle, void *tmrId) {
pStatus->alternativeRole = (uint8_t) tsAlternativeRole; pStatus->alternativeRole = (uint8_t) tsAlternativeRole;
// fill cluster cfg parameters // fill cluster cfg parameters
pStatus->clusterCfg.numOfMnodes = tsNumOfMnodes; pStatus->clusterCfg.numOfMnodes = htonl(tsNumOfMnodes);
pStatus->clusterCfg.mnodeEqualVnodeNum = tsMnodeEqualVnodeNum; pStatus->clusterCfg.mnodeEqualVnodeNum = htonl(tsMnodeEqualVnodeNum);
pStatus->clusterCfg.offlineThreshold = tsOfflineThreshold; pStatus->clusterCfg.offlineThreshold = htonl(tsOfflineThreshold);
pStatus->clusterCfg.statusInterval = tsStatusInterval; pStatus->clusterCfg.statusInterval = htonl(tsStatusInterval);
pStatus->clusterCfg.maxtablesPerVnode = htonl(tsMaxTablePerVnode);
pStatus->clusterCfg.maxVgroupsPerDb = htonl(tsMaxVgroupsPerDb);
strcpy(pStatus->clusterCfg.arbitrator, tsArbitrator); strcpy(pStatus->clusterCfg.arbitrator, tsArbitrator);
strcpy(pStatus->clusterCfg.timezone, tsTimezone); strcpy(pStatus->clusterCfg.timezone, tsTimezone);
strcpy(pStatus->clusterCfg.locale, tsLocale); strcpy(pStatus->clusterCfg.locale, tsLocale);

View File

@ -60,7 +60,7 @@ int32_t dnodeInitServer() {
rpcInit.label = "DND-S"; rpcInit.label = "DND-S";
rpcInit.numOfThreads = 1; rpcInit.numOfThreads = 1;
rpcInit.cfp = dnodeProcessReqMsgFromDnode; rpcInit.cfp = dnodeProcessReqMsgFromDnode;
rpcInit.sessions = 100; rpcInit.sessions = TSDB_MAX_VNODES;
rpcInit.connType = TAOS_CONN_SERVER; rpcInit.connType = TAOS_CONN_SERVER;
rpcInit.idleTime = tsShellActivityTimer * 1000; rpcInit.idleTime = tsShellActivityTimer * 1000;
@ -122,7 +122,7 @@ int32_t dnodeInitClient() {
rpcInit.label = "DND-C"; rpcInit.label = "DND-C";
rpcInit.numOfThreads = 1; rpcInit.numOfThreads = 1;
rpcInit.cfp = dnodeProcessRspFromDnode; rpcInit.cfp = dnodeProcessRspFromDnode;
rpcInit.sessions = 100; rpcInit.sessions = TSDB_MAX_VNODES;
rpcInit.connType = TAOS_CONN_CLIENT; rpcInit.connType = TAOS_CONN_CLIENT;
rpcInit.idleTime = tsShellActivityTimer * 1000; rpcInit.idleTime = tsShellActivityTimer * 1000;
rpcInit.user = "t"; rpcInit.user = "t";

View File

@ -70,7 +70,13 @@ int32_t main(int32_t argc, char *argv[]) {
} }
#endif #endif
#ifdef TAOS_RANDOM_FILE_FAIL #ifdef TAOS_RANDOM_FILE_FAIL
else if (strcmp(argv[i], "--random-file-fail-factor") == 0) { else if (strcmp(argv[i], "--random-file-fail-output") == 0) {
if ((i < argc - 1) && (argv[i + 1][0] != '-')) {
taosSetRandomFileFailOutput(argv[++i]);
} else {
taosSetRandomFileFailOutput(NULL);
}
} else if (strcmp(argv[i], "--random-file-fail-factor") == 0) {
if ( (i+1) < argc ) { if ( (i+1) < argc ) {
int factor = atoi(argv[i+1]); int factor = atoi(argv[i+1]);
printf("The factor of random failure is %d\n", factor); printf("The factor of random failure is %d\n", factor);

View File

@ -232,9 +232,10 @@ static void *dnodeProcessWriteQueue(void *param) {
pHead->msgType = pWrite->rpcMsg.msgType; pHead->msgType = pWrite->rpcMsg.msgType;
pHead->version = 0; pHead->version = 0;
pHead->len = pWrite->contLen; pHead->len = pWrite->contLen;
dDebug("%p, msg:%s will be processed in vwrite queue", pWrite->rpcMsg.ahandle, taosMsg[pWrite->rpcMsg.msgType]); dDebug("%p, rpc msg:%s will be processed in vwrite queue", pWrite->rpcMsg.ahandle, taosMsg[pWrite->rpcMsg.msgType]);
} else { } else {
pHead = (SWalHead *)item; pHead = (SWalHead *)item;
dTrace("%p, wal msg:%s will be processed in vwrite queue, version:%" PRIu64, pHead, taosMsg[pHead->msgType], pHead->version);
} }
int32_t code = vnodeProcessWrite(pVnode, type, pHead, pRspRet); int32_t code = vnodeProcessWrite(pVnode, type, pHead, pRspRet);

View File

@ -274,8 +274,8 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
#define TSDB_DEFAULT_PAYLOAD_SIZE 5120 // default payload size, greater than PATH_MAX value #define TSDB_DEFAULT_PAYLOAD_SIZE 5120 // default payload size, greater than PATH_MAX value
#define TSDB_EXTRA_PAYLOAD_SIZE 128 // extra bytes for auth #define TSDB_EXTRA_PAYLOAD_SIZE 128 // extra bytes for auth
#define TSDB_CQ_SQL_SIZE 1024 #define TSDB_CQ_SQL_SIZE 1024
#define TSDB_MAX_VNODES 256 #define TSDB_MAX_VNODES 2048
#define TSDB_MIN_VNODES 50 #define TSDB_MIN_VNODES 256
#define TSDB_INVALID_VNODE_NUM 0 #define TSDB_INVALID_VNODE_NUM 0
#define TSDB_DNODE_ROLE_ANY 0 #define TSDB_DNODE_ROLE_ANY 0
@ -296,8 +296,9 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
#define TSDB_DEFAULT_TOTAL_BLOCKS 4 #define TSDB_DEFAULT_TOTAL_BLOCKS 4
#define TSDB_MIN_TABLES 4 #define TSDB_MIN_TABLES 4
#define TSDB_MAX_TABLES 200000 #define TSDB_MAX_TABLES 10000000
#define TSDB_DEFAULT_TABLES 1000 #define TSDB_DEFAULT_TABLES 1000000
#define TSDB_TABLES_STEP 1000
#define TSDB_MIN_DAYS_PER_FILE 1 #define TSDB_MIN_DAYS_PER_FILE 1
#define TSDB_MAX_DAYS_PER_FILE 3650 #define TSDB_MAX_DAYS_PER_FILE 3650
@ -331,9 +332,9 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
#define TSDB_MAX_WAL_LEVEL 2 #define TSDB_MAX_WAL_LEVEL 2
#define TSDB_DEFAULT_WAL_LEVEL 1 #define TSDB_DEFAULT_WAL_LEVEL 1
#define TSDB_MIN_REPLICA_NUM 1 #define TSDB_MIN_DB_REPLICA_OPTION 1
#define TSDB_MAX_REPLICA_NUM 3 #define TSDB_MAX_DB_REPLICA_OPTION 3
#define TSDB_DEFAULT_REPLICA_NUM 1 #define TSDB_DEFAULT_DB_REPLICA_OPTION 1
#define TSDB_MAX_JOIN_TABLE_NUM 5 #define TSDB_MAX_JOIN_TABLE_NUM 5
#define TSDB_MAX_UNION_CLAUSE 5 #define TSDB_MAX_UNION_CLAUSE 5

View File

@ -200,6 +200,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_CREATE_TB_MSG, 0, 0x060E, "tsdb inval
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_NO_TABLE_DATA_IN_MEM, 0, 0x060F, "tsdb no table data in memory skiplist") TAOS_DEFINE_ERROR(TSDB_CODE_TDB_NO_TABLE_DATA_IN_MEM, 0, 0x060F, "tsdb no table data in memory skiplist")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_FILE_ALREADY_EXISTS, 0, 0x0610, "tsdb file already exists") TAOS_DEFINE_ERROR(TSDB_CODE_TDB_FILE_ALREADY_EXISTS, 0, 0x0610, "tsdb file already exists")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_TABLE_RECONFIGURE, 0, 0x0611, "tsdb need to reconfigure table") TAOS_DEFINE_ERROR(TSDB_CODE_TDB_TABLE_RECONFIGURE, 0, 0x0611, "tsdb need to reconfigure table")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_IVD_CREATE_TABLE_INFO, 0, 0x0612, "tsdb create table information")
// query // query
TAOS_DEFINE_ERROR(TSDB_CODE_QRY_INVALID_QHANDLE, 0, 0x0700, "query invalid handle") TAOS_DEFINE_ERROR(TSDB_CODE_QRY_INVALID_QHANDLE, 0, 0x0700, "query invalid handle")
@ -208,6 +209,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_QRY_NO_DISKSPACE, 0, 0x0702, "query no d
TAOS_DEFINE_ERROR(TSDB_CODE_QRY_OUT_OF_MEMORY, 0, 0x0703, "query out of memory") TAOS_DEFINE_ERROR(TSDB_CODE_QRY_OUT_OF_MEMORY, 0, 0x0703, "query out of memory")
TAOS_DEFINE_ERROR(TSDB_CODE_QRY_APP_ERROR, 0, 0x0704, "query app error") TAOS_DEFINE_ERROR(TSDB_CODE_QRY_APP_ERROR, 0, 0x0704, "query app error")
TAOS_DEFINE_ERROR(TSDB_CODE_QRY_DUP_JOIN_KEY, 0, 0x0705, "query duplicated join key") TAOS_DEFINE_ERROR(TSDB_CODE_QRY_DUP_JOIN_KEY, 0, 0x0705, "query duplicated join key")
TAOS_DEFINE_ERROR(TSDB_CODE_QRY_EXCEED_TAGS_LIMIT, 0, 0x0706, "query tag conditon too many")
// grant // grant
TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_EXPIRED, 0, 0x0800, "grant expired") TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_EXPIRED, 0, 0x0800, "grant expired")

View File

@ -569,6 +569,8 @@ typedef struct {
char timezone[64]; // tsTimezone char timezone[64]; // tsTimezone
char locale[TSDB_LOCALE_LEN]; // tsLocale char locale[TSDB_LOCALE_LEN]; // tsLocale
char charset[TSDB_LOCALE_LEN]; // tsCharset char charset[TSDB_LOCALE_LEN]; // tsCharset
int32_t maxtablesPerVnode;
int32_t maxVgroupsPerDb;
} SClusterCfg; } SClusterCfg;
typedef struct { typedef struct {
@ -644,7 +646,7 @@ typedef struct SCMSTableVgroupMsg {
typedef struct { typedef struct {
int32_t vgId; int32_t vgId;
int8_t numOfIps; int8_t numOfIps;
SIpAddr ipAddr[TSDB_MAX_REPLICA_NUM]; SIpAddr ipAddr[TSDB_MAX_REPLICA];
} SCMVgroupInfo; } SCMVgroupInfo;
typedef struct { typedef struct {

View File

@ -136,7 +136,7 @@ static void shellGetDirectoryFileList(char *inputDir)
static void shellSourceFile(TAOS *con, char *fptr) { static void shellSourceFile(TAOS *con, char *fptr) {
wordexp_t full_path; wordexp_t full_path;
int read_len = 0; int read_len = 0;
char * cmd = malloc(MAX_COMMAND_SIZE); char * cmd = malloc(tsMaxSQLStringLen);
size_t cmd_len = 0; size_t cmd_len = 0;
char * line = NULL; char * line = NULL;
size_t line_len = 0; size_t line_len = 0;
@ -185,7 +185,7 @@ static void shellSourceFile(TAOS *con, char *fptr) {
int lineNo = 0; int lineNo = 0;
while ((read_len = getline(&line, &line_len, f)) != -1) { while ((read_len = getline(&line, &line_len, f)) != -1) {
++lineNo; ++lineNo;
if (read_len >= MAX_COMMAND_SIZE) continue; if (read_len >= tsMaxSQLStringLen) continue;
line[--read_len] = '\0'; line[--read_len] = '\0';
if (read_len == 0 || isCommentLine(line)) { // line starts with # if (read_len == 0 || isCommentLine(line)) { // line starts with #

View File

@ -44,10 +44,7 @@ void mnodeRemoveSuperTableFromDb(SDbObj *pDb);
void mnodeAddTableIntoDb(SDbObj *pDb); void mnodeAddTableIntoDb(SDbObj *pDb);
void mnodeRemoveTableFromDb(SDbObj *pDb); void mnodeRemoveTableFromDb(SDbObj *pDb);
void mnodeAddVgroupIntoDb(SVgObj *pVgroup); void mnodeAddVgroupIntoDb(SVgObj *pVgroup);
void mnodeAddVgroupIntoDbTail(SVgObj *pVgroup);
void mnodeRemoveVgroupFromDb(SVgObj *pVgroup); void mnodeRemoveVgroupFromDb(SVgObj *pVgroup);
void mnodeMoveVgroupToTail(SVgObj *pVgroup);
void mnodeMoveVgroupToHead(SVgObj *pVgroup);
#ifdef __cplusplus #ifdef __cplusplus
} }

View File

@ -145,10 +145,8 @@ typedef struct SVgObj {
int64_t totalStorage; int64_t totalStorage;
int64_t compStorage; int64_t compStorage;
int64_t pointsWritten; int64_t pointsWritten;
struct SVgObj *prev, *next;
struct SDbObj *pDb; struct SDbObj *pDb;
void * idPool; void * idPool;
SChildTableObj **tableList;
} SVgObj; } SVgObj;
typedef struct { typedef struct {
@ -183,9 +181,11 @@ typedef struct SDbObj {
int32_t numOfVgroups; int32_t numOfVgroups;
int32_t numOfTables; int32_t numOfTables;
int32_t numOfSuperTables; int32_t numOfSuperTables;
SVgObj *pHead; int32_t vgListSize;
SVgObj *pTail; int32_t vgListIndex;
SVgObj **vgList;
struct SAcctObj *pAcct; struct SAcctObj *pAcct;
pthread_mutex_t mutex;
} SDbObj; } SDbObj;
typedef struct SUserObj { typedef struct SUserObj {
@ -246,7 +246,8 @@ typedef struct {
int16_t offset[TSDB_MAX_COLUMNS]; int16_t offset[TSDB_MAX_COLUMNS];
int16_t bytes[TSDB_MAX_COLUMNS]; int16_t bytes[TSDB_MAX_COLUMNS];
int32_t numOfReads; int32_t numOfReads;
int8_t reserved0[2]; int8_t maxReplica;
int8_t reserved0[0];
uint16_t payloadLen; uint16_t payloadLen;
char payload[]; char payload[];
} SShowObj; } SShowObj;

View File

@ -40,6 +40,7 @@ char* mnodeGetDnodeStatusStr(int32_t dnodeStatus);
void mgmtMonitorDnodeModule(); void mgmtMonitorDnodeModule();
int32_t mnodeGetDnodesNum(); int32_t mnodeGetDnodesNum();
int32_t mnodeGetOnlinDnodesCpuCoreNum();
int32_t mnodeGetOnlinDnodesNum(); int32_t mnodeGetOnlinDnodesNum();
void * mnodeGetNextDnode(void *pIter, SDnodeObj **pDnode); void * mnodeGetNextDnode(void *pIter, SDnodeObj **pDnode);
void mnodeIncDnodeRef(SDnodeObj *pDnode); void mnodeIncDnodeRef(SDnodeObj *pDnode);

View File

@ -94,6 +94,7 @@ void sdbDecRef(void *thandle, void *pRow);
int64_t sdbGetNumOfRows(void *handle); int64_t sdbGetNumOfRows(void *handle);
int32_t sdbGetId(void *handle); int32_t sdbGetId(void *handle);
uint64_t sdbGetVersion(); uint64_t sdbGetVersion();
bool sdbCheckRowDeleted(void *thandle, void *pRow);
#ifdef __cplusplus #ifdef __cplusplus
} }

View File

@ -30,17 +30,17 @@ void mnodeDecVgroupRef(SVgObj *pVgroup);
void mnodeDropAllDbVgroups(SDbObj *pDropDb); void mnodeDropAllDbVgroups(SDbObj *pDropDb);
void mnodeSendDropAllDbVgroupsMsg(SDbObj *pDropDb); void mnodeSendDropAllDbVgroupsMsg(SDbObj *pDropDb);
void mnodeDropAllDnodeVgroups(SDnodeObj *pDropDnode); void mnodeDropAllDnodeVgroups(SDnodeObj *pDropDnode);
void mnodeUpdateAllDbVgroups(SDbObj *pAlterDb); //void mnodeUpdateAllDbVgroups(SDbObj *pAlterDb);
void * mnodeGetNextVgroup(void *pIter, SVgObj **pVgroup); void * mnodeGetNextVgroup(void *pIter, SVgObj **pVgroup);
void mnodeUpdateVgroup(SVgObj *pVgroup); void mnodeUpdateVgroup(SVgObj *pVgroup);
void mnodeUpdateVgroupStatus(SVgObj *pVgroup, SDnodeObj *pDnode, SVnodeLoad *pVload); void mnodeUpdateVgroupStatus(SVgObj *pVgroup, SDnodeObj *pDnode, SVnodeLoad *pVload);
void mnodeCheckUnCreatedVgroup(SDnodeObj *pDnode, SVnodeLoad *pVloads, int32_t openVnodes); void mnodeCheckUnCreatedVgroup(SDnodeObj *pDnode, SVnodeLoad *pVloads, int32_t openVnodes);
int32_t mnodeCreateVgroup(struct SMnodeMsg *pMsg, SDbObj *pDb); int32_t mnodeCreateVgroup(struct SMnodeMsg *pMsg);
void mnodeDropVgroup(SVgObj *pVgroup, void *ahandle); void mnodeDropVgroup(SVgObj *pVgroup, void *ahandle);
void mnodeAlterVgroup(SVgObj *pVgroup, void *ahandle); void mnodeAlterVgroup(SVgObj *pVgroup, void *ahandle);
SVgObj *mnodeGetAvailableVgroup(SDbObj *pDb); int32_t mnodeGetAvailableVgroup(struct SMnodeMsg *pMsg, SVgObj **pVgroup, int32_t *sid);
void mnodeAddTableIntoVgroup(SVgObj *pVgroup, SChildTableObj *pTable); void mnodeAddTableIntoVgroup(SVgObj *pVgroup, SChildTableObj *pTable);
void mnodeRemoveTableFromVgroup(SVgObj *pVgroup, SChildTableObj *pTable); void mnodeRemoveTableFromVgroup(SVgObj *pVgroup, SChildTableObj *pTable);

View File

@ -38,6 +38,7 @@
#include "mnodeUser.h" #include "mnodeUser.h"
#include "mnodeVgroup.h" #include "mnodeVgroup.h"
#define VG_LIST_SIZE 8
static void * tsDbSdb = NULL; static void * tsDbSdb = NULL;
static int32_t tsDbUpdateSize; static int32_t tsDbUpdateSize;
@ -50,8 +51,14 @@ static int32_t mnodeProcessCreateDbMsg(SMnodeMsg *pMsg);
static int32_t mnodeProcessAlterDbMsg(SMnodeMsg *pMsg); static int32_t mnodeProcessAlterDbMsg(SMnodeMsg *pMsg);
static int32_t mnodeProcessDropDbMsg(SMnodeMsg *pMsg); static int32_t mnodeProcessDropDbMsg(SMnodeMsg *pMsg);
static void mnodeDestroyDb(SDbObj *pDb) {
pthread_mutex_destroy(&pDb->mutex);
tfree(pDb->vgList);
tfree(pDb);
}
static int32_t mnodeDbActionDestroy(SSdbOper *pOper) { static int32_t mnodeDbActionDestroy(SSdbOper *pOper) {
tfree(pOper->pObj); mnodeDestroyDb(pOper->pObj);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
@ -59,8 +66,9 @@ static int32_t mnodeDbActionInsert(SSdbOper *pOper) {
SDbObj *pDb = pOper->pObj; SDbObj *pDb = pOper->pObj;
SAcctObj *pAcct = mnodeGetAcct(pDb->acct); SAcctObj *pAcct = mnodeGetAcct(pDb->acct);
pDb->pHead = NULL; pthread_mutex_init(&pDb->mutex, NULL);
pDb->pTail = NULL; pDb->vgListSize = VG_LIST_SIZE;
pDb->vgList = calloc(pDb->vgListSize, sizeof(SVgObj *));
pDb->numOfVgroups = 0; pDb->numOfVgroups = 0;
pDb->numOfTables = 0; pDb->numOfTables = 0;
pDb->numOfSuperTables = 0; pDb->numOfSuperTables = 0;
@ -94,14 +102,15 @@ static int32_t mnodeDbActionDelete(SSdbOper *pOper) {
} }
static int32_t mnodeDbActionUpdate(SSdbOper *pOper) { static int32_t mnodeDbActionUpdate(SSdbOper *pOper) {
SDbObj *pDb = pOper->pObj; SDbObj *pNew = pOper->pObj;
SDbObj *pSaved = mnodeGetDb(pDb->name); SDbObj *pDb = mnodeGetDb(pNew->name);
if (pDb != pSaved) { if (pDb != NULL && pNew != pDb) {
memcpy(pSaved, pDb, pOper->rowSize); memcpy(pDb, pNew, pOper->rowSize);
free(pDb); free(pNew->vgList);
free(pNew);
} }
mnodeUpdateAllDbVgroups(pSaved); //mnodeUpdateAllDbVgroups(pDb);
mnodeDecDbRef(pSaved); mnodeDecDbRef(pDb);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
@ -278,9 +287,9 @@ static int32_t mnodeCheckDbCfg(SDbCfg *pCfg) {
return TSDB_CODE_MND_INVALID_DB_OPTION; return TSDB_CODE_MND_INVALID_DB_OPTION;
} }
if (pCfg->replications < TSDB_MIN_REPLICA_NUM || pCfg->replications > TSDB_MAX_REPLICA_NUM) { if (pCfg->replications < TSDB_MIN_DB_REPLICA_OPTION || pCfg->replications > TSDB_MAX_DB_REPLICA_OPTION) {
mError("invalid db option replications:%d valid range: [%d, %d]", pCfg->replications, TSDB_MIN_REPLICA_NUM, mError("invalid db option replications:%d valid range: [%d, %d]", pCfg->replications, TSDB_MIN_DB_REPLICA_OPTION,
TSDB_MAX_REPLICA_NUM); TSDB_MAX_DB_REPLICA_OPTION);
return TSDB_CODE_MND_INVALID_DB_OPTION; return TSDB_CODE_MND_INVALID_DB_OPTION;
} }
@ -384,7 +393,7 @@ static int32_t mnodeCreateDb(SAcctObj *pAcct, SCMCreateDbMsg *pCreate, void *pMs
code = sdbInsertRow(&oper); code = sdbInsertRow(&oper);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
tfree(pDb); mnodeDestroyDb(pDb);
mLInfo("db:%s, failed to create, reason:%s", pDb->name, tstrerror(code)); mLInfo("db:%s, failed to create, reason:%s", pDb->name, tstrerror(code));
return code; return code;
} else { } else {
@ -421,45 +430,33 @@ void mnodePrintVgroups(SDbObj *pDb, char *oper) {
void mnodeAddVgroupIntoDb(SVgObj *pVgroup) { void mnodeAddVgroupIntoDb(SVgObj *pVgroup) {
SDbObj *pDb = pVgroup->pDb; SDbObj *pDb = pVgroup->pDb;
pVgroup->next = pDb->pHead; pthread_mutex_lock(&pDb->mutex);
pVgroup->prev = NULL; int32_t vgPos = pDb->numOfVgroups++;
if (vgPos >= pDb->vgListSize) {
pDb->vgList = realloc(pDb->vgList, pDb->vgListSize * 2 * sizeof(SVgObj *));
memset(pDb->vgList + pDb->vgListSize, 0, pDb->vgListSize * sizeof(SVgObj *));
pDb->vgListSize *= 2;
}
if (pDb->pHead) pDb->pHead->prev = pVgroup; pDb->vgList[vgPos] = pVgroup;
if (pDb->pTail == NULL) pDb->pTail = pVgroup; pthread_mutex_unlock(&pDb->mutex);
pDb->pHead = pVgroup;
pDb->numOfVgroups++;
}
void mnodeAddVgroupIntoDbTail(SVgObj *pVgroup) {
SDbObj *pDb = pVgroup->pDb;
pVgroup->next = NULL;
pVgroup->prev = pDb->pTail;
if (pDb->pTail) pDb->pTail->next = pVgroup;
if (pDb->pHead == NULL) pDb->pHead = pVgroup;
pDb->pTail = pVgroup;
pDb->numOfVgroups++;
} }
void mnodeRemoveVgroupFromDb(SVgObj *pVgroup) { void mnodeRemoveVgroupFromDb(SVgObj *pVgroup) {
SDbObj *pDb = pVgroup->pDb; SDbObj *pDb = pVgroup->pDb;
if (pVgroup->prev) pVgroup->prev->next = pVgroup->next;
if (pVgroup->next) pVgroup->next->prev = pVgroup->prev; pthread_mutex_lock(&pDb->mutex);
if (pVgroup->prev == NULL) pDb->pHead = pVgroup->next; for (int32_t v1 = 0; v1 < pDb->numOfVgroups; ++v1) {
if (pVgroup->next == NULL) pDb->pTail = pVgroup->prev; if (pDb->vgList[v1] == pVgroup) {
for (int32_t v2 = v1; v2 < pDb->numOfVgroups - 1; ++v2) {
pDb->vgList[v2] = pDb->vgList[v2 + 1];
}
pDb->numOfVgroups--; pDb->numOfVgroups--;
} break;
}
}
void mnodeMoveVgroupToTail(SVgObj *pVgroup) { pthread_mutex_unlock(&pDb->mutex);
mnodeRemoveVgroupFromDb(pVgroup);
mnodeAddVgroupIntoDbTail(pVgroup);
}
void mnodeMoveVgroupToHead(SVgObj *pVgroup) {
mnodeRemoveVgroupFromDb(pVgroup);
mnodeAddVgroupIntoDb(pVgroup);
} }
void mnodeCleanupDbs() { void mnodeCleanupDbs() {
@ -531,11 +528,6 @@ static int32_t mnodeGetDbMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn
#ifndef __CLOUD_VERSION__ #ifndef __CLOUD_VERSION__
if (strcmp(pUser->user, TSDB_DEFAULT_USER) == 0) { if (strcmp(pUser->user, TSDB_DEFAULT_USER) == 0) {
#endif #endif
pShow->bytes[cols] = 4;
pSchema[cols].type = TSDB_DATA_TYPE_INT;
strcpy(pSchema[cols].name, "maxtables");
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
pShow->bytes[cols] = 4; pShow->bytes[cols] = 4;
pSchema[cols].type = TSDB_DATA_TYPE_INT; pSchema[cols].type = TSDB_DATA_TYPE_INT;
@ -561,12 +553,6 @@ static int32_t mnodeGetDbMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn
pSchema[cols].bytes = htons(pShow->bytes[cols]); pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++; cols++;
pShow->bytes[cols] = 4;
pSchema[cols].type = TSDB_DATA_TYPE_INT;
strcpy(pSchema[cols].name, "ctime(Sec.)");
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
pShow->bytes[cols] = 1; pShow->bytes[cols] = 1;
pSchema[cols].type = TSDB_DATA_TYPE_TINYINT; pSchema[cols].type = TSDB_DATA_TYPE_TINYINT;
strcpy(pSchema[cols].name, "wallevel"); strcpy(pSchema[cols].name, "wallevel");
@ -676,10 +662,6 @@ static int32_t mnodeRetrieveDbs(SShowObj *pShow, char *data, int32_t rows, void
#ifndef __CLOUD_VERSION__ #ifndef __CLOUD_VERSION__
if (strcmp(pUser->user, TSDB_DEFAULT_USER) == 0) { if (strcmp(pUser->user, TSDB_DEFAULT_USER) == 0) {
#endif #endif
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(int32_t *)pWrite = pDb->cfg.maxTables; // table num can be created should minus 1
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(int32_t *)pWrite = pDb->cfg.cacheBlockSize; *(int32_t *)pWrite = pDb->cfg.cacheBlockSize;
cols++; cols++;
@ -696,10 +678,6 @@ static int32_t mnodeRetrieveDbs(SShowObj *pShow, char *data, int32_t rows, void
*(int32_t *)pWrite = pDb->cfg.maxRowsPerFileBlock; *(int32_t *)pWrite = pDb->cfg.maxRowsPerFileBlock;
cols++; cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(int32_t *)pWrite = pDb->cfg.commitTime;
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(int8_t *)pWrite = pDb->cfg.walLevel; *(int8_t *)pWrite = pDb->cfg.walLevel;
cols++; cols++;

View File

@ -69,6 +69,7 @@ static int32_t mnodeDnodeActionInsert(SSdbOper *pOper) {
SDnodeObj *pDnode = pOper->pObj; SDnodeObj *pDnode = pOper->pObj;
if (pDnode->status != TAOS_DN_STATUS_DROPPING) { if (pDnode->status != TAOS_DN_STATUS_DROPPING) {
pDnode->status = TAOS_DN_STATUS_OFFLINE; pDnode->status = TAOS_DN_STATUS_OFFLINE;
pDnode->lastAccess = tsAccessSquence;
} }
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
@ -187,7 +188,27 @@ int32_t mnodeGetDnodesNum() {
return sdbGetNumOfRows(tsDnodeSdb); return sdbGetNumOfRows(tsDnodeSdb);
} }
int32_t mnodeGetOnlinDnodesNum(char *ep) { int32_t mnodeGetOnlinDnodesCpuCoreNum() {
SDnodeObj *pDnode = NULL;
void * pIter = NULL;
int32_t cpuCores = 0;
while (1) {
pIter = mnodeGetNextDnode(pIter, &pDnode);
if (pDnode == NULL) break;
if (pDnode->status != TAOS_DN_STATUS_OFFLINE) {
cpuCores += pDnode->numOfCores;
}
mnodeDecDnodeRef(pDnode);
}
sdbFreeIter(pIter);
if (cpuCores < 2) cpuCores = 2;
return cpuCores;
}
int32_t mnodeGetOnlinDnodesNum() {
SDnodeObj *pDnode = NULL; SDnodeObj *pDnode = NULL;
void * pIter = NULL; void * pIter = NULL;
int32_t onlineDnodes = 0; int32_t onlineDnodes = 0;
@ -283,10 +304,12 @@ static void mnodeProcessCfgDnodeMsgRsp(SRpcMsg *rpcMsg) {
} }
static bool mnodeCheckClusterCfgPara(const SClusterCfg *clusterCfg) { static bool mnodeCheckClusterCfgPara(const SClusterCfg *clusterCfg) {
if (clusterCfg->numOfMnodes != tsNumOfMnodes) return false; if (clusterCfg->numOfMnodes != htonl(tsNumOfMnodes)) return false;
if (clusterCfg->mnodeEqualVnodeNum != tsMnodeEqualVnodeNum) return false; if (clusterCfg->mnodeEqualVnodeNum != htonl(tsMnodeEqualVnodeNum)) return false;
if (clusterCfg->offlineThreshold != tsOfflineThreshold) return false; if (clusterCfg->offlineThreshold != htonl(tsOfflineThreshold)) return false;
if (clusterCfg->statusInterval != tsStatusInterval) return false; if (clusterCfg->statusInterval != htonl(tsStatusInterval)) return false;
if (clusterCfg->maxtablesPerVnode != htonl(tsMaxTablePerVnode)) return false;
if (clusterCfg->maxVgroupsPerDb != htonl(tsMaxVgroupsPerDb)) return false;
if (0 != strncasecmp(clusterCfg->arbitrator, tsArbitrator, strlen(tsArbitrator))) return false; if (0 != strncasecmp(clusterCfg->arbitrator, tsArbitrator, strlen(tsArbitrator))) return false;
if (0 != strncasecmp(clusterCfg->timezone, tsTimezone, strlen(tsTimezone))) return false; if (0 != strncasecmp(clusterCfg->timezone, tsTimezone, strlen(tsTimezone))) return false;
@ -376,7 +399,7 @@ static int32_t mnodeProcessDnodeStatusMsg(SMnodeMsg *pMsg) {
if (false == ret) { if (false == ret) {
mnodeDecDnodeRef(pDnode); mnodeDecDnodeRef(pDnode);
rpcFreeCont(pRsp); rpcFreeCont(pRsp);
mError("dnode %s cluster cfg parameters inconsistent", pStatus->dnodeEp); mError("dnode:%d, %s cluster cfg parameters inconsistent", pDnode->dnodeId, pStatus->dnodeEp);
return TSDB_CODE_MND_CLUSTER_CFG_INCONSISTENT; return TSDB_CODE_MND_CLUSTER_CFG_INCONSISTENT;
} }
@ -468,18 +491,22 @@ static int32_t mnodeDropDnodeByEp(char *ep, SMnodeMsg *pMsg) {
return TSDB_CODE_MND_DNODE_NOT_EXIST; return TSDB_CODE_MND_DNODE_NOT_EXIST;
} }
mnodeDecDnodeRef(pDnode);
if (strcmp(pDnode->dnodeEp, mnodeGetMnodeMasterEp()) == 0) { if (strcmp(pDnode->dnodeEp, mnodeGetMnodeMasterEp()) == 0) {
mError("dnode:%d, can't drop dnode:%s which is master", pDnode->dnodeId, ep); mError("dnode:%d, can't drop dnode:%s which is master", pDnode->dnodeId, ep);
mnodeDecDnodeRef(pDnode);
return TSDB_CODE_MND_NO_REMOVE_MASTER; return TSDB_CODE_MND_NO_REMOVE_MASTER;
} }
mInfo("dnode:%d, start to drop it", pDnode->dnodeId); mInfo("dnode:%d, start to drop it", pDnode->dnodeId);
#ifndef _SYNC #ifndef _SYNC
return mnodeDropDnode(pDnode, pMsg); int32_t code = mnodeDropDnode(pDnode, pMsg);
#else #else
return balanceDropDnode(pDnode); int32_t code = balanceDropDnode(pDnode);
#endif #endif
mnodeDecDnodeRef(pDnode);
return code;
} }
static int32_t mnodeProcessCreateDnodeMsg(SMnodeMsg *pMsg) { static int32_t mnodeProcessCreateDnodeMsg(SMnodeMsg *pMsg) {

View File

@ -115,7 +115,7 @@ SConnObj *mnodeAccquireConn(int32_t connId, char *user, uint32_t ip, uint16_t po
uint64_t expireTime = CONN_KEEP_TIME * 1000 + (uint64_t)taosGetTimestampMs(); uint64_t expireTime = CONN_KEEP_TIME * 1000 + (uint64_t)taosGetTimestampMs();
SConnObj *pConn = taosCacheUpdateExpireTimeByName(tsMnodeConnCache, &connId, sizeof(int32_t), expireTime); SConnObj *pConn = taosCacheUpdateExpireTimeByName(tsMnodeConnCache, &connId, sizeof(int32_t), expireTime);
if (pConn == NULL) { if (pConn == NULL) {
mError("connId:%d, is already destroyed, user:%s ip:%s:%u", connId, user, taosIpStr(ip), port); mDebug("connId:%d, is already destroyed, user:%s ip:%s:%u", connId, user, taosIpStr(ip), port);
return NULL; return NULL;
} }

View File

@ -264,8 +264,12 @@ static void sdbConfirmForward(void *ahandle, void *param, int32_t code) {
if (pOper->cb != NULL) { if (pOper->cb != NULL) {
pOper->retCode = (*pOper->cb)(pMsg, pOper->retCode); pOper->retCode = (*pOper->cb)(pMsg, pOper->retCode);
} }
dnodeSendRpcMnodeWriteRsp(pMsg, pOper->retCode); dnodeSendRpcMnodeWriteRsp(pMsg, pOper->retCode);
// if ahandle, means this func is called by sdb write
if (ahandle == NULL) {
sdbDecRef(pOper->table, pOper->pObj);
}
taosFreeQitem(pOper); taosFreeQitem(pOper);
} }
@ -389,7 +393,7 @@ void sdbCleanUp() {
} }
void sdbIncRef(void *handle, void *pObj) { void sdbIncRef(void *handle, void *pObj) {
if (pObj == NULL) return; if (pObj == NULL || handle == NULL) return;
SSdbTable *pTable = handle; SSdbTable *pTable = handle;
int32_t * pRefCount = (int32_t *)(pObj + pTable->refCountPos); int32_t * pRefCount = (int32_t *)(pObj + pTable->refCountPos);
@ -398,7 +402,7 @@ void sdbIncRef(void *handle, void *pObj) {
} }
void sdbDecRef(void *handle, void *pObj) { void sdbDecRef(void *handle, void *pObj) {
if (pObj == NULL) return; if (pObj == NULL || handle == NULL) return;
SSdbTable *pTable = handle; SSdbTable *pTable = handle;
int32_t * pRefCount = (int32_t *)(pObj + pTable->refCountPos); int32_t * pRefCount = (int32_t *)(pObj + pTable->refCountPos);
@ -581,16 +585,26 @@ static int sdbWrite(void *param, void *data, int type) {
return sdbInsertHash(pTable, &oper); return sdbInsertHash(pTable, &oper);
} else if (action == SDB_ACTION_DELETE) { } else if (action == SDB_ACTION_DELETE) {
SSdbRow *rowMeta = sdbGetRowMeta(pTable, pHead->cont); SSdbRow *rowMeta = sdbGetRowMeta(pTable, pHead->cont);
assert(rowMeta != NULL && rowMeta->row != NULL); if (rowMeta == NULL || rowMeta->row == NULL) {
sdbError("table:%s, failed to get object:%s from wal while dispose delete action", pTable->tableName,
pHead->cont);
return TSDB_CODE_SUCCESS;
}
SSdbOper oper = {.table = pTable, .pObj = rowMeta->row}; SSdbOper oper = {.table = pTable, .pObj = rowMeta->row};
return sdbDeleteHash(pTable, &oper); return sdbDeleteHash(pTable, &oper);
} else if (action == SDB_ACTION_UPDATE) { } else if (action == SDB_ACTION_UPDATE) {
SSdbRow *rowMeta = sdbGetRowMeta(pTable, pHead->cont); SSdbRow *rowMeta = sdbGetRowMeta(pTable, pHead->cont);
assert(rowMeta != NULL && rowMeta->row != NULL); if (rowMeta == NULL || rowMeta->row == NULL) {
sdbError("table:%s, failed to get object:%s from wal while dispose update action", pTable->tableName,
pHead->cont);
return TSDB_CODE_SUCCESS;
}
SSdbOper oper = {.rowSize = pHead->len, .rowData = pHead->cont, .table = pTable}; SSdbOper oper = {.rowSize = pHead->len, .rowData = pHead->cont, .table = pTable};
code = (*pTable->decodeFp)(&oper); code = (*pTable->decodeFp)(&oper);
return sdbUpdateHash(pTable, &oper); return sdbUpdateHash(pTable, &oper);
} else { return TSDB_CODE_MND_INVALID_MSG_TYPE; } } else {
return TSDB_CODE_MND_INVALID_MSG_TYPE;
}
} }
int32_t sdbInsertRow(SSdbOper *pOper) { int32_t sdbInsertRow(SSdbOper *pOper) {
@ -647,6 +661,14 @@ int32_t sdbInsertRow(SSdbOper *pOper) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
bool sdbCheckRowDeleted(void *pTableInput, void *pRow) {
SSdbTable *pTable = pTableInput;
if (pTable == NULL) return false;
int8_t *updateEnd = pRow + pTable->refCountPos - 1;
return (*updateEnd == 1);
}
int32_t sdbDeleteRow(SSdbOper *pOper) { int32_t sdbDeleteRow(SSdbOper *pOper) {
SSdbTable *pTable = (SSdbTable *)pOper->table; SSdbTable *pTable = (SSdbTable *)pOper->table;
if (pTable == NULL) return TSDB_CODE_MND_SDB_INVALID_TABLE_TYPE; if (pTable == NULL) return TSDB_CODE_MND_SDB_INVALID_TABLE_TYPE;
@ -663,14 +685,18 @@ int32_t sdbDeleteRow(SSdbOper *pOper) {
return TSDB_CODE_MND_SDB_INVAID_META_ROW; return TSDB_CODE_MND_SDB_INVAID_META_ROW;
} }
sdbIncRef(pTable, pOper->pObj);
int32_t code = sdbDeleteHash(pTable, pOper); int32_t code = sdbDeleteHash(pTable, pOper);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
sdbError("table:%s, failed to delete from hash", pTable->tableName); sdbError("table:%s, failed to delete from hash", pTable->tableName);
sdbDecRef(pTable, pOper->pObj);
return code; return code;
} }
// just delete data from memory // just delete data from memory
if (pOper->type != SDB_OPER_GLOBAL) { if (pOper->type != SDB_OPER_GLOBAL) {
sdbDecRef(pTable, pOper->pObj);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
@ -692,7 +718,6 @@ int32_t sdbDeleteRow(SSdbOper *pOper) {
pNewOper->pMsg, pTable->tableName, pOper->pObj, sdbGetKeyStrFromObj(pTable, pOper->pObj)); pNewOper->pMsg, pTable->tableName, pOper->pObj, sdbGetKeyStrFromObj(pTable, pOper->pObj));
} }
sdbIncRef(pNewOper->table, pNewOper->pObj);
taosWriteQitem(tsSdbWriteQueue, TAOS_QTYPE_RPC, pNewOper); taosWriteQitem(tsSdbWriteQueue, TAOS_QTYPE_RPC, pNewOper);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
@ -968,11 +993,12 @@ static void *sdbWorkerFp(void *param) {
int32_t code = sdbWrite(pOper, pHead, type); int32_t code = sdbWrite(pOper, pHead, type);
if (code > 0) code = 0; if (code > 0) code = 0;
if (pOper) if (pOper) {
pOper->retCode = code; pOper->retCode = code;
else } else {
pHead->len = code; // hackway pHead->len = code; // hackway
} }
}
walFsync(tsSdbObj.wal); walFsync(tsSdbObj.wal);
@ -983,7 +1009,6 @@ static void *sdbWorkerFp(void *param) {
if (type == TAOS_QTYPE_RPC) { if (type == TAOS_QTYPE_RPC) {
pOper = (SSdbOper *)item; pOper = (SSdbOper *)item;
sdbDecRef(pOper->table, pOper->pObj);
sdbConfirmForward(NULL, pOper, pOper->retCode); sdbConfirmForward(NULL, pOper, pOper->retCode);
} else if (type == TAOS_QTYPE_FWD) { } else if (type == TAOS_QTYPE_FWD) {
pHead = (SWalHead *)item; pHead = (SWalHead *)item;

View File

@ -72,7 +72,7 @@ static void mnodeProcessCreateChildTableRsp(SRpcMsg *rpcMsg);
static int32_t mnodeProcessDropTableMsg(SMnodeMsg *mnodeMsg); static int32_t mnodeProcessDropTableMsg(SMnodeMsg *mnodeMsg);
static int32_t mnodeProcessDropSuperTableMsg(SMnodeMsg *pMsg); static int32_t mnodeProcessDropSuperTableMsg(SMnodeMsg *pMsg);
static void mnodeProcessDropSuperTableRsp(SRpcMsg *rpcMsg); static void mnodeProcessDropSuperTableRsp(SRpcMsg *rpcMsg);
static int32_t mnodeProcessDropChildTableMsg(SMnodeMsg *pMsg); static int32_t mnodeProcessDropChildTableMsg(SMnodeMsg *pMsg, bool needReturn);
static void mnodeProcessDropChildTableRsp(SRpcMsg *rpcMsg); static void mnodeProcessDropChildTableRsp(SRpcMsg *rpcMsg);
static int32_t mnodeProcessSuperTableVgroupMsg(SMnodeMsg *mnodeMsg); static int32_t mnodeProcessSuperTableVgroupMsg(SMnodeMsg *mnodeMsg);
@ -319,15 +319,6 @@ static int32_t mnodeChildTableActionRestored() {
continue; continue;
} }
if (pVgroup->tableList == NULL) {
mError("ctable:%s, vgId:%d tableList is null", pTable->info.tableId, pTable->vgId);
pTable->vgId = 0;
SSdbOper desc = {.type = SDB_OPER_LOCAL, .pObj = pTable, .table = tsChildTableSdb};
sdbDeleteRow(&desc);
mnodeDecTableRef(pTable);
continue;
}
if (pTable->info.type == TSDB_CHILD_TABLE) { if (pTable->info.type == TSDB_CHILD_TABLE) {
SSuperTableObj *pSuperTable = mnodeGetSuperTableByUid(pTable->suid); SSuperTableObj *pSuperTable = mnodeGetSuperTableByUid(pTable->suid);
if (pSuperTable == NULL) { if (pSuperTable == NULL) {
@ -385,7 +376,7 @@ static void mnodeCleanupChildTables() {
} }
static void mnodeAddTableIntoStable(SSuperTableObj *pStable, SChildTableObj *pCtable) { static void mnodeAddTableIntoStable(SSuperTableObj *pStable, SChildTableObj *pCtable) {
pStable->numOfTables++; atomic_add_fetch_32(&pStable->numOfTables, 1);
if (pStable->vgHash == NULL) { if (pStable->vgHash == NULL) {
pStable->vgHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false); pStable->vgHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false);
@ -394,18 +385,22 @@ static void mnodeAddTableIntoStable(SSuperTableObj *pStable, SChildTableObj *pCt
if (pStable->vgHash != NULL) { if (pStable->vgHash != NULL) {
if (taosHashGet(pStable->vgHash, &pCtable->vgId, sizeof(pCtable->vgId)) == NULL) { if (taosHashGet(pStable->vgHash, &pCtable->vgId, sizeof(pCtable->vgId)) == NULL) {
taosHashPut(pStable->vgHash, &pCtable->vgId, sizeof(pCtable->vgId), &pCtable->vgId, sizeof(pCtable->vgId)); taosHashPut(pStable->vgHash, &pCtable->vgId, sizeof(pCtable->vgId), &pCtable->vgId, sizeof(pCtable->vgId));
mDebug("table:%s, vgId:%d is put into stable vgList, sizeOfVgList:%d", pStable->info.tableId, pCtable->vgId,
(int32_t)taosHashGetSize(pStable->vgHash));
} }
} }
} }
static void mnodeRemoveTableFromStable(SSuperTableObj *pStable, SChildTableObj *pCtable) { static void mnodeRemoveTableFromStable(SSuperTableObj *pStable, SChildTableObj *pCtable) {
pStable->numOfTables--; atomic_sub_fetch_32(&pStable->numOfTables, 1);
if (pStable->vgHash == NULL) return; if (pStable->vgHash == NULL) return;
SVgObj *pVgroup = mnodeGetVgroup(pCtable->vgId); SVgObj *pVgroup = mnodeGetVgroup(pCtable->vgId);
if (pVgroup == NULL) { if (pVgroup == NULL) {
taosHashRemove(pStable->vgHash, (char *)&pCtable->vgId, sizeof(pCtable->vgId)); taosHashRemove(pStable->vgHash, (char *)&pCtable->vgId, sizeof(pCtable->vgId));
mDebug("table:%s, vgId:%d is remove from stable vgList, sizeOfVgList:%d", pStable->info.tableId, pCtable->vgId,
(int32_t)taosHashGetSize(pStable->vgHash));
} }
mnodeDecVgroupRef(pVgroup); mnodeDecVgroupRef(pVgroup);
} }
@ -757,11 +752,15 @@ static int32_t mnodeProcessDropTableMsg(SMnodeMsg *pMsg) {
} }
if (pMsg->pTable->type == TSDB_SUPER_TABLE) { if (pMsg->pTable->type == TSDB_SUPER_TABLE) {
mInfo("app:%p:%p, table:%s, start to drop stable", pMsg->rpcMsg.ahandle, pMsg, pDrop->tableId); SSuperTableObj *pSTable = (SSuperTableObj *)pMsg->pTable;
mInfo("app:%p:%p, table:%s, start to drop stable, uid:%" PRIu64 ", numOfChildTables:%d, sizeOfVgList:%d",
pMsg->rpcMsg.ahandle, pMsg, pDrop->tableId, pSTable->uid, pSTable->numOfTables, (int32_t)taosHashGetSize(pSTable->vgHash));
return mnodeProcessDropSuperTableMsg(pMsg); return mnodeProcessDropSuperTableMsg(pMsg);
} else { } else {
mInfo("app:%p:%p, table:%s, start to drop ctable", pMsg->rpcMsg.ahandle, pMsg, pDrop->tableId); SChildTableObj *pCTable = (SChildTableObj *)pMsg->pTable;
return mnodeProcessDropChildTableMsg(pMsg); mInfo("app:%p:%p, table:%s, start to drop ctable, vgId:%d sid:%d uid:%" PRIu64, pMsg->rpcMsg.ahandle, pMsg,
pDrop->tableId, pCTable->vgId, pCTable->sid, pCTable->uid);
return mnodeProcessDropChildTableMsg(pMsg, true);
} }
} }
@ -808,7 +807,7 @@ static int32_t mnodeCreateSuperTableCb(SMnodeMsg *pMsg, int32_t code) {
assert(pTable); assert(pTable);
if (code == TSDB_CODE_SUCCESS) { if (code == TSDB_CODE_SUCCESS) {
mLInfo("stable:%s, is created in sdb", pTable->info.tableId); mLInfo("stable:%s, is created in sdb, uid:%" PRIu64, pTable->info.tableId, pTable->uid);
} else { } else {
mError("app:%p:%p, stable:%s, failed to create in sdb, reason:%s", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId, mError("app:%p:%p, stable:%s, failed to create in sdb, reason:%s", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId,
tstrerror(code)); tstrerror(code));
@ -896,7 +895,7 @@ static int32_t mnodeProcessDropSuperTableMsg(SMnodeMsg *pMsg) {
if (pMsg == NULL) return TSDB_CODE_MND_APP_ERROR; if (pMsg == NULL) return TSDB_CODE_MND_APP_ERROR;
SSuperTableObj *pStable = (SSuperTableObj *)pMsg->pTable; SSuperTableObj *pStable = (SSuperTableObj *)pMsg->pTable;
if (pStable->numOfTables != 0) { if (pStable->vgHash != NULL /*pStable->numOfTables != 0*/) {
SHashMutableIterator *pIter = taosHashCreateIter(pStable->vgHash); SHashMutableIterator *pIter = taosHashCreateIter(pStable->vgHash);
while (taosHashIterNext(pIter)) { while (taosHashIterNext(pIter)) {
int32_t *pVgId = taosHashIterGet(pIter); int32_t *pVgId = taosHashIterGet(pIter);
@ -1600,8 +1599,8 @@ static int32_t mnodeDoCreateChildTableCb(SMnodeMsg *pMsg, int32_t code) {
assert(pTable); assert(pTable);
if (code == TSDB_CODE_SUCCESS) { if (code == TSDB_CODE_SUCCESS) {
mDebug("app:%p:%p, table:%s, create table in sid:%d, uid:%" PRIu64, pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId, mDebug("app:%p:%p, table:%s, created in mnode, vgId:%d sid:%d, uid:%" PRIu64 ", result:%s", pMsg->rpcMsg.ahandle,
pTable->sid, pTable->uid); pMsg, pTable->info.tableId, pTable->vgId, pTable->sid, pTable->uid, tstrerror(code));
} else { } else {
mError("app:%p:%p, table:%s, failed to create table sid:%d, uid:%" PRIu64 ", reason:%s", pMsg->rpcMsg.ahandle, pMsg, mError("app:%p:%p, table:%s, failed to create table sid:%d, uid:%" PRIu64 ", reason:%s", pMsg->rpcMsg.ahandle, pMsg,
pTable->info.tableId, pTable->sid, pTable->uid, tstrerror(code)); pTable->info.tableId, pTable->sid, pTable->uid, tstrerror(code));
@ -1730,27 +1729,25 @@ static int32_t mnodeProcessCreateChildTableMsg(SMnodeMsg *pMsg) {
return code; return code;
} }
SVgObj *pVgroup = mnodeGetAvailableVgroup(pMsg->pDb);
if (pVgroup == NULL) {
mDebug("app:%p:%p, table:%s, start to create a new vgroup", pMsg->rpcMsg.ahandle, pMsg, pCreate->tableId);
return mnodeCreateVgroup(pMsg, pMsg->pDb);
}
if (pMsg->retry == 0) { if (pMsg->retry == 0) {
if (pMsg->pTable == NULL) { if (pMsg->pTable == NULL) {
int32_t sid = taosAllocateId(pVgroup->idPool); SVgObj *pVgroup = NULL;
if (sid <= 0) { int32_t sid = 0;
mDebug("app:%p:%p, table:%s, no enough sid in vgId:%d", pMsg->rpcMsg.ahandle, pMsg, pCreate->tableId, code = mnodeGetAvailableVgroup(pMsg, &pVgroup, &sid);
pVgroup->vgId); if (code != TSDB_CODE_SUCCESS) {
return mnodeCreateVgroup(pMsg, pMsg->pDb); mDebug("app:%p:%p, table:%s, failed to get available vgroup, reason:%s", pMsg->rpcMsg.ahandle, pMsg,
pCreate->tableId, tstrerror(code));
return code;
}
if (pMsg->pVgroup != NULL) {
mnodeDecVgroupRef(pMsg->pVgroup);
} }
if (pMsg->pVgroup == NULL) {
pMsg->pVgroup = pVgroup; pMsg->pVgroup = pVgroup;
mnodeIncVgroupRef(pVgroup); mnodeIncVgroupRef(pVgroup);
}
mDebug("app:%p:%p, table:%s, create table in vgroup, vgId:%d sid:%d", pMsg->rpcMsg.ahandle, pMsg, pCreate->tableId, mDebug("app:%p:%p, table:%s, allocated in vgroup, vgId:%d sid:%d", pMsg->rpcMsg.ahandle, pMsg, pCreate->tableId,
pVgroup->vgId, sid); pVgroup->vgId, sid);
return mnodeDoCreateChildTable(pMsg, sid); return mnodeDoCreateChildTable(pMsg, sid);
@ -1769,7 +1766,7 @@ static int32_t mnodeProcessCreateChildTableMsg(SMnodeMsg *pMsg) {
} }
} }
static int32_t mnodeProcessDropChildTableMsg(SMnodeMsg *pMsg) { static int32_t mnodeProcessDropChildTableMsg(SMnodeMsg *pMsg, bool needReturn) {
SChildTableObj *pTable = (SChildTableObj *)pMsg->pTable; SChildTableObj *pTable = (SChildTableObj *)pMsg->pTable;
if (pMsg->pVgroup == NULL) pMsg->pVgroup = mnodeGetVgroup(pTable->vgId); if (pMsg->pVgroup == NULL) pMsg->pVgroup = mnodeGetVgroup(pTable->vgId);
if (pMsg->pVgroup == NULL) { if (pMsg->pVgroup == NULL) {
@ -1793,7 +1790,9 @@ static int32_t mnodeProcessDropChildTableMsg(SMnodeMsg *pMsg) {
SRpcIpSet ipSet = mnodeGetIpSetFromVgroup(pMsg->pVgroup); SRpcIpSet ipSet = mnodeGetIpSetFromVgroup(pMsg->pVgroup);
mInfo("app:%p:%p, table:%s, send drop ctable msg", pMsg->rpcMsg.ahandle, pMsg, pDrop->tableId); mInfo("app:%p:%p, table:%s, send drop ctable msg, vgId:%d sid:%d uid:%" PRIu64, pMsg->rpcMsg.ahandle, pMsg,
pDrop->tableId, pTable->vgId, pTable->sid, pTable->uid);
SRpcMsg rpcMsg = { SRpcMsg rpcMsg = {
.ahandle = pMsg, .ahandle = pMsg,
.pCont = pDrop, .pCont = pDrop,
@ -1802,6 +1801,8 @@ static int32_t mnodeProcessDropChildTableMsg(SMnodeMsg *pMsg) {
.msgType = TSDB_MSG_TYPE_MD_DROP_TABLE .msgType = TSDB_MSG_TYPE_MD_DROP_TABLE
}; };
if (!needReturn) rpcMsg.ahandle = NULL;
dnodeSendMsgToDnode(&ipSet, &rpcMsg); dnodeSendMsgToDnode(&ipSet, &rpcMsg);
return TSDB_CODE_MND_ACTION_IN_PROGRESS; return TSDB_CODE_MND_ACTION_IN_PROGRESS;
@ -2125,7 +2126,7 @@ static void mnodeDropAllChildTablesInStable(SSuperTableObj *pStable) {
int32_t numOfTables = 0; int32_t numOfTables = 0;
SChildTableObj *pTable = NULL; SChildTableObj *pTable = NULL;
mInfo("stable:%s, all child tables(%d) will dropped from sdb", pStable->info.tableId, numOfTables); mInfo("stable:%s, all child tables:%d will dropped from sdb", pStable->info.tableId, pStable->numOfTables);
while (1) { while (1) {
pIter = mnodeGetNextChildTable(pIter, &pTable); pIter = mnodeGetNextChildTable(pIter, &pTable);
@ -2149,6 +2150,7 @@ static void mnodeDropAllChildTablesInStable(SSuperTableObj *pStable) {
mInfo("stable:%s, all child tables:%d is dropped from sdb", pStable->info.tableId, numOfTables); mInfo("stable:%s, all child tables:%d is dropped from sdb", pStable->info.tableId, numOfTables);
} }
#if 0
static SChildTableObj* mnodeGetTableByPos(int32_t vnode, int32_t sid) { static SChildTableObj* mnodeGetTableByPos(int32_t vnode, int32_t sid) {
SVgObj *pVgroup = mnodeGetVgroup(vnode); SVgObj *pVgroup = mnodeGetVgroup(vnode);
if (pVgroup == NULL) return NULL; if (pVgroup == NULL) return NULL;
@ -2159,8 +2161,11 @@ static SChildTableObj* mnodeGetTableByPos(int32_t vnode, int32_t sid) {
mnodeDecVgroupRef(pVgroup); mnodeDecVgroupRef(pVgroup);
return pTable; return pTable;
} }
#endif
static int32_t mnodeProcessTableCfgMsg(SMnodeMsg *pMsg) { static int32_t mnodeProcessTableCfgMsg(SMnodeMsg *pMsg) {
return TSDB_CODE_COM_OPS_NOT_SUPPORT;
#if 0
SDMConfigTableMsg *pCfg = pMsg->rpcMsg.pCont; SDMConfigTableMsg *pCfg = pMsg->rpcMsg.pCont;
pCfg->dnodeId = htonl(pCfg->dnodeId); pCfg->dnodeId = htonl(pCfg->dnodeId);
pCfg->vgId = htonl(pCfg->vgId); pCfg->vgId = htonl(pCfg->vgId);
@ -2184,6 +2189,7 @@ static int32_t mnodeProcessTableCfgMsg(SMnodeMsg *pMsg) {
pMsg->rpcRsp.rsp = pCreate; pMsg->rpcRsp.rsp = pCreate;
pMsg->rpcRsp.len = htonl(pCreate->contLen); pMsg->rpcRsp.len = htonl(pCreate->contLen);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
#endif
} }
// handle drop child response // handle drop child response
@ -2195,12 +2201,15 @@ static void mnodeProcessDropChildTableRsp(SRpcMsg *rpcMsg) {
SChildTableObj *pTable = (SChildTableObj *)mnodeMsg->pTable; SChildTableObj *pTable = (SChildTableObj *)mnodeMsg->pTable;
assert(pTable); assert(pTable);
mInfo("app:%p:%p, table:%s, drop table rsp received, thandle:%p result:%s", mnodeMsg->rpcMsg.ahandle, mnodeMsg,
pTable->info.tableId, mnodeMsg->rpcMsg.handle, tstrerror(rpcMsg->code)); mInfo("app:%p:%p, table:%s, drop table rsp received, vgId:%d sid:%d uid:%" PRIu64 ", thandle:%p result:%s",
mnodeMsg->rpcMsg.ahandle, mnodeMsg, pTable->info.tableId, pTable->vgId, pTable->sid, pTable->uid,
mnodeMsg->rpcMsg.handle, tstrerror(rpcMsg->code));
if (rpcMsg->code != TSDB_CODE_SUCCESS) { if (rpcMsg->code != TSDB_CODE_SUCCESS) {
mError("app:%p:%p, table:%s, failed to drop in dnode, reason:%s", mnodeMsg->rpcMsg.ahandle, mnodeMsg, mError("app:%p:%p, table:%s, failed to drop in dnode, vgId:%d sid:%d uid:%" PRIu64 ", reason:%s",
pTable->info.tableId, tstrerror(rpcMsg->code)); mnodeMsg->rpcMsg.ahandle, mnodeMsg, pTable->info.tableId, pTable->vgId, pTable->sid, pTable->uid,
tstrerror(rpcMsg->code));
dnodeSendRpcMnodeWriteRsp(mnodeMsg, rpcMsg->code); dnodeSendRpcMnodeWriteRsp(mnodeMsg, rpcMsg->code);
return; return;
} }
@ -2247,6 +2256,14 @@ static void mnodeProcessCreateChildTableRsp(SRpcMsg *rpcMsg) {
SChildTableObj *pTable = (SChildTableObj *)mnodeMsg->pTable; SChildTableObj *pTable = (SChildTableObj *)mnodeMsg->pTable;
assert(pTable); assert(pTable);
// If the table is deleted by another thread during creation, stop creating and send drop msg to vnode
if (sdbCheckRowDeleted(tsChildTableSdb, pTable)) {
mDebug("app:%p:%p, table:%s, create table rsp received, but a deleting opertion incoming, vgId:%d sid:%d uid:%" PRIu64,
mnodeMsg->rpcMsg.ahandle, mnodeMsg, pTable->info.tableId, pTable->vgId, pTable->sid, pTable->uid);
mnodeProcessDropChildTableMsg(mnodeMsg, false);
rpcMsg->code = TSDB_CODE_SUCCESS;
}
if (rpcMsg->code == TSDB_CODE_SUCCESS || rpcMsg->code == TSDB_CODE_TDB_TABLE_ALREADY_EXIST) { if (rpcMsg->code == TSDB_CODE_SUCCESS || rpcMsg->code == TSDB_CODE_TDB_TABLE_ALREADY_EXIST) {
SCMCreateTableMsg *pCreate = mnodeMsg->rpcMsg.pCont; SCMCreateTableMsg *pCreate = mnodeMsg->rpcMsg.pCont;
if (pCreate->getMeta) { if (pCreate->getMeta) {

View File

@ -46,6 +46,7 @@ typedef enum {
static void *tsVgroupSdb = NULL; static void *tsVgroupSdb = NULL;
static int32_t tsVgUpdateSize = 0; static int32_t tsVgUpdateSize = 0;
static int32_t mnodeAllocVgroupIdPool(SVgObj *pInputVgroup);
static int32_t mnodeGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn); static int32_t mnodeGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn);
static int32_t mnodeRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, void *pConn); static int32_t mnodeRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, void *pConn);
static void mnodeProcessCreateVnodeRsp(SRpcMsg *rpcMsg); static void mnodeProcessCreateVnodeRsp(SRpcMsg *rpcMsg);
@ -53,17 +54,17 @@ static void mnodeProcessDropVnodeRsp(SRpcMsg *rpcMsg);
static int32_t mnodeProcessVnodeCfgMsg(SMnodeMsg *pMsg) ; static int32_t mnodeProcessVnodeCfgMsg(SMnodeMsg *pMsg) ;
static void mnodeSendDropVgroupMsg(SVgObj *pVgroup, void *ahandle); static void mnodeSendDropVgroupMsg(SVgObj *pVgroup, void *ahandle);
static int32_t mnodeVgroupActionDestroy(SSdbOper *pOper) { static void mnodeDestroyVgroup(SVgObj *pVgroup) {
SVgObj *pVgroup = pOper->pObj;
if (pVgroup->idPool) { if (pVgroup->idPool) {
taosIdPoolCleanUp(pVgroup->idPool); taosIdPoolCleanUp(pVgroup->idPool);
pVgroup->idPool = NULL; pVgroup->idPool = NULL;
} }
if (pVgroup->tableList) {
tfree(pVgroup->tableList);
}
tfree(pOper->pObj); tfree(pVgroup);
}
static int32_t mnodeVgroupActionDestroy(SSdbOper *pOper) {
mnodeDestroyVgroup(pOper->pObj);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
@ -82,21 +83,9 @@ static int32_t mnodeVgroupActionInsert(SSdbOper *pOper) {
} }
pVgroup->pDb = pDb; pVgroup->pDb = pDb;
pVgroup->prev = NULL;
pVgroup->next = NULL;
pVgroup->accessState = TSDB_VN_ALL_ACCCESS; pVgroup->accessState = TSDB_VN_ALL_ACCCESS;
if (mnodeAllocVgroupIdPool(pVgroup) < 0) {
int32_t size = sizeof(SChildTableObj *) * pDb->cfg.maxTables; mError("vgId:%d, failed to init idpool for vgroups", pVgroup->vgId);
pVgroup->tableList = calloc(pDb->cfg.maxTables, sizeof(SChildTableObj *));
if (pVgroup->tableList == NULL) {
mError("vgId:%d, failed to malloc(size:%d) for the tableList of vgroups", pVgroup->vgId, size);
return -1;
}
pVgroup->idPool = taosInitIdPool(pDb->cfg.maxTables);
if (pVgroup->idPool == NULL) {
mError("vgId:%d, failed to taosInitIdPool for vgroups", pVgroup->vgId);
tfree(pVgroup->tableList);
return -1; return -1;
} }
@ -134,20 +123,6 @@ static int32_t mnodeVgroupActionDelete(SSdbOper *pOper) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
static void mnodeVgroupUpdateIdPool(SVgObj *pVgroup) {
int32_t oldTables = taosIdPoolMaxSize(pVgroup->idPool);
SDbObj *pDb = pVgroup->pDb;
if (pDb != NULL) {
if (pDb->cfg.maxTables != oldTables) {
mInfo("vgId:%d tables change from %d to %d", pVgroup->vgId, oldTables, pDb->cfg.maxTables);
taosUpdateIdPool(pVgroup->idPool, pDb->cfg.maxTables);
int32_t size = sizeof(SChildTableObj *) * pDb->cfg.maxTables;
pVgroup->tableList = (SChildTableObj **)realloc(pVgroup->tableList, size);
memset(pVgroup->tableList + oldTables, 0, (pDb->cfg.maxTables - oldTables) * sizeof(SChildTableObj *));
}
}
}
static int32_t mnodeVgroupActionUpdate(SSdbOper *pOper) { static int32_t mnodeVgroupActionUpdate(SSdbOper *pOper) {
SVgObj *pNew = pOper->pObj; SVgObj *pNew = pOper->pObj;
SVgObj *pVgroup = mnodeGetVgroup(pNew->vgId); SVgObj *pVgroup = mnodeGetVgroup(pNew->vgId);
@ -174,7 +149,6 @@ static int32_t mnodeVgroupActionUpdate(SSdbOper *pOper) {
free(pNew); free(pNew);
} }
mnodeVgroupUpdateIdPool(pVgroup);
// reset vgid status on vgroup changed // reset vgid status on vgroup changed
mDebug("vgId:%d, reset sync status to unsynced", pVgroup->vgId); mDebug("vgId:%d, reset sync status to unsynced", pVgroup->vgId);
@ -344,8 +318,132 @@ void mnodeUpdateVgroupStatus(SVgObj *pVgroup, SDnodeObj *pDnode, SVnodeLoad *pVl
} }
} }
SVgObj *mnodeGetAvailableVgroup(SDbObj *pDb) { static int32_t mnodeAllocVgroupIdPool(SVgObj *pInputVgroup) {
return pDb->pHead; SDbObj *pDb = pInputVgroup->pDb;
if (pDb == NULL) return TSDB_CODE_MND_APP_ERROR;
int32_t minIdPoolSize = TSDB_MAX_TABLES;
int32_t maxIdPoolSize = tsMinTablePerVnode;
for (int32_t v = 0; v < pDb->numOfVgroups; ++v) {
SVgObj *pVgroup = pDb->vgList[v];
if (pVgroup == NULL) continue;
int32_t idPoolSize = taosIdPoolMaxSize(pVgroup->idPool);
minIdPoolSize = MIN(minIdPoolSize, idPoolSize);
maxIdPoolSize = MAX(maxIdPoolSize, idPoolSize);
}
// new vgroup
if (pInputVgroup->idPool == NULL) {
pInputVgroup->idPool = taosInitIdPool(maxIdPoolSize);
if (pInputVgroup->idPool == NULL) {
mError("vgId:%d, failed to init idPool for vgroup, size:%d", pInputVgroup->vgId, maxIdPoolSize);
return TSDB_CODE_MND_OUT_OF_MEMORY;
} else {
mDebug("vgId:%d, init idPool for vgroup, size:%d", pInputVgroup->vgId, maxIdPoolSize);
return TSDB_CODE_SUCCESS;
}
}
// realloc all vgroups in db
int32_t newIdPoolSize;
if (minIdPoolSize * 4 < tsTableIncStepPerVnode) {
newIdPoolSize = minIdPoolSize * 4;
} else {
newIdPoolSize = ((minIdPoolSize / tsTableIncStepPerVnode) + 1) * tsTableIncStepPerVnode;
}
if (newIdPoolSize > tsMaxTablePerVnode) {
if (minIdPoolSize >= tsMaxTablePerVnode) {
mError("db:%s, minIdPoolSize:%d newIdPoolSize:%d larger than maxTablesPerVnode:%d", pDb->name, minIdPoolSize, newIdPoolSize,
tsMaxTablePerVnode);
return TSDB_CODE_MND_NO_ENOUGH_DNODES;
} else {
newIdPoolSize = tsMaxTablePerVnode;
}
}
for (int32_t v = 0; v < pDb->numOfVgroups; ++v) {
SVgObj *pVgroup = pDb->vgList[v];
if (pVgroup == NULL) continue;
int32_t oldIdPoolSize = taosIdPoolMaxSize(pVgroup->idPool);
if (newIdPoolSize == oldIdPoolSize) continue;
if (taosUpdateIdPool(pVgroup->idPool, newIdPoolSize) < 0) {
mError("vgId:%d, failed to update idPoolSize from %d to %d", pVgroup->vgId, oldIdPoolSize, newIdPoolSize);
return TSDB_CODE_MND_NO_ENOUGH_DNODES;
} else {
mDebug("vgId:%d, idPoolSize update from %d to %d", pVgroup->vgId, oldIdPoolSize, newIdPoolSize);
}
}
return TSDB_CODE_SUCCESS;
}
int32_t mnodeGetAvailableVgroup(SMnodeMsg *pMsg, SVgObj **ppVgroup, int32_t *pSid) {
SDbObj *pDb = pMsg->pDb;
pthread_mutex_lock(&pDb->mutex);
for (int32_t v = 0; v < pDb->numOfVgroups; ++v) {
int vgIndex = (v + pDb->vgListIndex) % pDb->numOfVgroups;
SVgObj *pVgroup = pDb->vgList[vgIndex];
if (pVgroup == NULL) {
mError("db:%s, index:%d vgroup is null", pDb->name, vgIndex);
pthread_mutex_unlock(&pDb->mutex);
return TSDB_CODE_MND_APP_ERROR;
}
int32_t sid = taosAllocateId(pVgroup->idPool);
if (sid <= 0) {
mDebug("app:%p:%p, db:%s, no enough sid in vgId:%d", pMsg->rpcMsg.ahandle, pMsg, pDb->name, pVgroup->vgId);
continue;
}
*pSid = sid;
*ppVgroup = pVgroup;
pDb->vgListIndex = vgIndex;
pthread_mutex_unlock(&pDb->mutex);
return TSDB_CODE_SUCCESS;
}
int maxVgroupsPerDb = tsMaxVgroupsPerDb;
if (maxVgroupsPerDb <= 0) {
maxVgroupsPerDb = mnodeGetOnlinDnodesCpuCoreNum();
maxVgroupsPerDb = MAX(maxVgroupsPerDb, 2);
}
if (pDb->numOfVgroups < maxVgroupsPerDb) {
mDebug("app:%p:%p, db:%s, try to create a new vgroup, numOfVgroups:%d maxVgroupsPerDb:%d", pMsg->rpcMsg.ahandle, pMsg,
pDb->name, pDb->numOfVgroups, maxVgroupsPerDb);
pthread_mutex_unlock(&pDb->mutex);
int32_t code = mnodeCreateVgroup(pMsg);
if (code == TSDB_CODE_MND_ACTION_IN_PROGRESS) return code;
}
SVgObj *pVgroup = pDb->vgList[0];
if (pVgroup == NULL) return TSDB_CODE_MND_NO_ENOUGH_DNODES;
int32_t code = mnodeAllocVgroupIdPool(pVgroup);
if (code != TSDB_CODE_SUCCESS) {
pthread_mutex_unlock(&pDb->mutex);
return code;
}
int32_t sid = taosAllocateId(pVgroup->idPool);
if (sid <= 0) {
mError("app:%p:%p, db:%s, no enough sid in vgId:%d", pMsg->rpcMsg.ahandle, pMsg, pDb->name, pVgroup->vgId);
pthread_mutex_unlock(&pDb->mutex);
return TSDB_CODE_MND_NO_ENOUGH_DNODES;
}
*pSid = sid;
*ppVgroup = pVgroup;
pDb->vgListIndex = 0;
pthread_mutex_unlock(&pDb->mutex);
return TSDB_CODE_SUCCESS;
} }
void *mnodeGetNextVgroup(void *pIter, SVgObj **pVgroup) { void *mnodeGetNextVgroup(void *pIter, SVgObj **pVgroup) {
@ -373,13 +471,16 @@ static int32_t mnodeCreateVgroupCb(SMnodeMsg *pMsg, int32_t code) {
} }
pMsg->expected = pVgroup->numOfVnodes; pMsg->expected = pVgroup->numOfVnodes;
pMsg->successed = 0;
pMsg->received = 0;
mnodeSendCreateVgroupMsg(pVgroup, pMsg); mnodeSendCreateVgroupMsg(pVgroup, pMsg);
return TSDB_CODE_MND_ACTION_IN_PROGRESS; return TSDB_CODE_MND_ACTION_IN_PROGRESS;
} }
int32_t mnodeCreateVgroup(SMnodeMsg *pMsg, SDbObj *pDb) { int32_t mnodeCreateVgroup(SMnodeMsg *pMsg) {
if (pMsg == NULL) return TSDB_CODE_MND_APP_ERROR; if (pMsg == NULL) return TSDB_CODE_MND_APP_ERROR;
SDbObj *pDb = pMsg->pDb;
SVgObj *pVgroup = (SVgObj *)calloc(1, sizeof(SVgObj)); SVgObj *pVgroup = (SVgObj *)calloc(1, sizeof(SVgObj));
tstrncpy(pVgroup->dbName, pDb->name, TSDB_ACCT_LEN + TSDB_DB_NAME_LEN); tstrncpy(pVgroup->dbName, pDb->name, TSDB_ACCT_LEN + TSDB_DB_NAME_LEN);
@ -407,7 +508,7 @@ int32_t mnodeCreateVgroup(SMnodeMsg *pMsg, SDbObj *pDb) {
int32_t code = sdbInsertRow(&oper); int32_t code = sdbInsertRow(&oper);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
pMsg->pVgroup = NULL; pMsg->pVgroup = NULL;
tfree(pVgroup); mnodeDestroyVgroup(pVgroup);
} else { } else {
code = TSDB_CODE_MND_ACTION_IN_PROGRESS; code = TSDB_CODE_MND_ACTION_IN_PROGRESS;
} }
@ -461,29 +562,27 @@ int32_t mnodeGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) {
pSchema[cols].bytes = htons(pShow->bytes[cols]); pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++; cols++;
int32_t maxReplica = 0; pShow->bytes[cols] = 4;
SVgObj *pVgroup = NULL; pSchema[cols].type = TSDB_DATA_TYPE_INT;
STableObj *pTable = NULL; strcpy(pSchema[cols].name, "poolSize");
if (pShow->payloadLen > 0 ) { pSchema[cols].bytes = htons(pShow->bytes[cols]);
pTable = mnodeGetTable(pShow->payload); cols++;
if (NULL == pTable || pTable->type == TSDB_SUPER_TABLE) {
mnodeDecTableRef(pTable); pShow->bytes[cols] = 4;
return TSDB_CODE_MND_INVALID_TABLE_NAME; pSchema[cols].type = TSDB_DATA_TYPE_INT;
} strcpy(pSchema[cols].name, "maxTables");
mnodeDecTableRef(pTable); pSchema[cols].bytes = htons(pShow->bytes[cols]);
pVgroup = mnodeGetVgroup(((SChildTableObj*)pTable)->vgId); cols++;
if (NULL == pVgroup) return TSDB_CODE_MND_INVALID_TABLE_NAME;
mnodeDecVgroupRef(pVgroup); pShow->maxReplica = 1;
maxReplica = pVgroup->numOfVnodes > maxReplica ? pVgroup->numOfVnodes : maxReplica; for (int32_t v = 0; v < pDb->numOfVgroups; ++v) {
} else { SVgObj *pVgroup = pDb->vgList[v];
SVgObj *pVgroup = pDb->pHead; if (pVgroup != NULL) {
while (pVgroup != NULL) { pShow->maxReplica = pVgroup->numOfVnodes > pShow->maxReplica ? pVgroup->numOfVnodes : pShow->maxReplica;
maxReplica = pVgroup->numOfVnodes > maxReplica ? pVgroup->numOfVnodes : maxReplica;
pVgroup = pVgroup->next;
} }
} }
for (int32_t i = 0; i < maxReplica; ++i) { for (int32_t i = 0; i < pShow->maxReplica; ++i) {
pShow->bytes[cols] = 2; pShow->bytes[cols] = 2;
pSchema[cols].type = TSDB_DATA_TYPE_SMALLINT; pSchema[cols].type = TSDB_DATA_TYPE_SMALLINT;
strcpy(pSchema[cols].name, "dnode"); strcpy(pSchema[cols].name, "dnode");
@ -507,27 +606,33 @@ int32_t mnodeGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) {
pShow->numOfColumns = cols; pShow->numOfColumns = cols;
pShow->offset[0] = 0; pShow->offset[0] = 0;
for (int32_t i = 1; i < cols; ++i) pShow->offset[i] = pShow->offset[i - 1] + pShow->bytes[i - 1]; for (int32_t i = 1; i < cols; ++i) {
pShow->offset[i] = pShow->offset[i - 1] + pShow->bytes[i - 1];
pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1];
if (NULL == pTable) {
pShow->numOfRows = pDb->numOfVgroups;
pShow->pIter = pDb->pHead;
} else {
pShow->numOfRows = 1;
pShow->pIter = pVgroup;
} }
mnodeDecDbRef(pDb); pShow->numOfRows = pDb->numOfVgroups;
pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1];
mnodeDecDbRef(pDb);
return 0; return 0;
} }
static bool mnodeFilterVgroups(SVgObj *pVgroup, STableObj *pTable) {
if (NULL == pTable || pTable->type == TSDB_SUPER_TABLE) {
return true;
}
SChildTableObj *pCTable = (SChildTableObj *)pTable;
if (pVgroup->vgId == pCTable->vgId) {
return true;
} else {
return false;
}
}
int32_t mnodeRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, void *pConn) { int32_t mnodeRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, void *pConn) {
int32_t numOfRows = 0; int32_t numOfRows = 0;
SVgObj *pVgroup = NULL; SVgObj *pVgroup = NULL;
int32_t maxReplica = 0;
int32_t cols = 0; int32_t cols = 0;
char * pWrite; char * pWrite;
@ -539,16 +644,16 @@ int32_t mnodeRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, void *pC
return 0; return 0;
} }
pVgroup = pDb->pHead; STableObj *pTable = NULL;
while (pVgroup != NULL) { if (pShow->payloadLen > 0 ) {
maxReplica = pVgroup->numOfVnodes > maxReplica ? pVgroup->numOfVnodes : maxReplica; pTable = mnodeGetTable(pShow->payload);
pVgroup = pVgroup->next;
} }
while (numOfRows < rows) { while (numOfRows < rows) {
pVgroup = (SVgObj *) pShow->pIter; pShow->pIter = mnodeGetNextVgroup(pShow->pIter, &pVgroup);
if (pVgroup == NULL) break; if (pVgroup == NULL) break;
pShow->pIter = (void *) pVgroup->next; if (pVgroup->pDb != pDb) continue;
if (!mnodeFilterVgroups(pVgroup, pTable)) continue;
cols = 0; cols = 0;
@ -560,7 +665,15 @@ int32_t mnodeRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, void *pC
*(int32_t *) pWrite = pVgroup->numOfTables; *(int32_t *) pWrite = pVgroup->numOfTables;
cols++; cols++;
for (int32_t i = 0; i < maxReplica; ++i) { pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(int32_t *)pWrite = taosIdPoolMaxSize(pVgroup->idPool);
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(int32_t *)pWrite = tsMaxTablePerVnode;
cols++;
for (int32_t i = 0; i < pShow->maxReplica; ++i) {
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(int16_t *) pWrite = pVgroup->vnodeGid[i].dnodeId; *(int16_t *) pWrite = pVgroup->vnodeGid[i].dnodeId;
cols++; cols++;
@ -588,38 +701,36 @@ int32_t mnodeRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, void *pC
} }
} }
mnodeDecVgroupRef(pVgroup);
numOfRows++; numOfRows++;
} }
pShow->numOfReads += numOfRows; pShow->numOfReads += numOfRows;
mnodeDecTableRef(pTable);
mnodeDecDbRef(pDb); mnodeDecDbRef(pDb);
return numOfRows; return numOfRows;
} }
void mnodeAddTableIntoVgroup(SVgObj *pVgroup, SChildTableObj *pTable) { void mnodeAddTableIntoVgroup(SVgObj *pVgroup, SChildTableObj *pTable) {
if (pTable->sid >= 1 && pVgroup->tableList[pTable->sid - 1] == NULL) { int32_t idPoolSize = taosIdPoolMaxSize(pVgroup->idPool);
pVgroup->tableList[pTable->sid - 1] = pTable; if (pTable->sid > idPoolSize) {
mnodeAllocVgroupIdPool(pVgroup);
}
if (pTable->sid >= 1) {
taosIdPoolMarkStatus(pVgroup->idPool, pTable->sid); taosIdPoolMarkStatus(pVgroup->idPool, pTable->sid);
pVgroup->numOfTables++; pVgroup->numOfTables++;
}
if (pVgroup->numOfTables >= pVgroup->pDb->cfg.maxTables) {
mnodeMoveVgroupToTail(pVgroup);
}
mnodeIncVgroupRef(pVgroup); mnodeIncVgroupRef(pVgroup);
}
} }
void mnodeRemoveTableFromVgroup(SVgObj *pVgroup, SChildTableObj *pTable) { void mnodeRemoveTableFromVgroup(SVgObj *pVgroup, SChildTableObj *pTable) {
if (pTable->sid >= 1 && pVgroup->tableList[pTable->sid - 1] != NULL) { if (pTable->sid >= 1) {
pVgroup->tableList[pTable->sid - 1] = NULL;
taosFreeId(pVgroup->idPool, pTable->sid); taosFreeId(pVgroup->idPool, pTable->sid);
pVgroup->numOfTables--; pVgroup->numOfTables--;
}
mnodeMoveVgroupToHead(pVgroup);
mnodeDecVgroupRef(pVgroup); mnodeDecVgroupRef(pVgroup);
}
} }
SMDCreateVnodeMsg *mnodeBuildCreateVnodeMsg(SVgObj *pVgroup) { SMDCreateVnodeMsg *mnodeBuildCreateVnodeMsg(SVgObj *pVgroup) {
@ -630,13 +741,16 @@ SMDCreateVnodeMsg *mnodeBuildCreateVnodeMsg(SVgObj *pVgroup) {
if (pVnode == NULL) return NULL; if (pVnode == NULL) return NULL;
strcpy(pVnode->db, pVgroup->dbName); strcpy(pVnode->db, pVgroup->dbName);
int32_t maxTables = taosIdPoolMaxSize(pVgroup->idPool);
//TODO: dynamic alloc tables in tsdb
maxTables = MAX(10000, tsMaxTablePerVnode);
SMDVnodeCfg *pCfg = &pVnode->cfg; SMDVnodeCfg *pCfg = &pVnode->cfg;
pCfg->vgId = htonl(pVgroup->vgId); pCfg->vgId = htonl(pVgroup->vgId);
pCfg->cfgVersion = htonl(pDb->cfgVersion); pCfg->cfgVersion = htonl(pDb->cfgVersion);
pCfg->cacheBlockSize = htonl(pDb->cfg.cacheBlockSize); pCfg->cacheBlockSize = htonl(pDb->cfg.cacheBlockSize);
pCfg->totalBlocks = htonl(pDb->cfg.totalBlocks); pCfg->totalBlocks = htonl(pDb->cfg.totalBlocks);
pCfg->maxTables = htonl(pDb->cfg.maxTables + 1); pCfg->maxTables = htonl(maxTables + 1);
pCfg->daysPerFile = htonl(pDb->cfg.daysPerFile); pCfg->daysPerFile = htonl(pDb->cfg.daysPerFile);
pCfg->daysToKeep = htonl(pDb->cfg.daysToKeep); pCfg->daysToKeep = htonl(pDb->cfg.daysToKeep);
pCfg->daysToKeep1 = htonl(pDb->cfg.daysToKeep1); pCfg->daysToKeep1 = htonl(pDb->cfg.daysToKeep1);
@ -724,6 +838,8 @@ static void mnodeProcessCreateVnodeRsp(SRpcMsg *rpcMsg) {
pVgroup->vgId, tstrerror(rpcMsg->code), mnodeMsg->received, mnodeMsg->successed, mnodeMsg->expected, pVgroup->vgId, tstrerror(rpcMsg->code), mnodeMsg->received, mnodeMsg->successed, mnodeMsg->expected,
mnodeMsg->rpcMsg.handle, rpcMsg->ahandle); mnodeMsg->rpcMsg.handle, rpcMsg->ahandle);
assert(mnodeMsg->received <= mnodeMsg->expected);
if (mnodeMsg->received != mnodeMsg->expected) return; if (mnodeMsg->received != mnodeMsg->expected) return;
if (mnodeMsg->received == mnodeMsg->successed) { if (mnodeMsg->received == mnodeMsg->successed) {
@ -857,9 +973,10 @@ void mnodeDropAllDnodeVgroups(SDnodeObj *pDropDnode) {
sdbFreeIter(pIter); sdbFreeIter(pIter);
mInfo("dnode:%d, all vgroups is dropped from sdb", pDropDnode->dnodeId); mInfo("dnode:%d, all vgroups:%d is dropped from sdb", pDropDnode->dnodeId, numOfVgroups);
} }
#if 0
void mnodeUpdateAllDbVgroups(SDbObj *pAlterDb) { void mnodeUpdateAllDbVgroups(SDbObj *pAlterDb) {
void * pIter = NULL; void * pIter = NULL;
SVgObj *pVgroup = NULL; SVgObj *pVgroup = NULL;
@ -881,6 +998,7 @@ void mnodeUpdateAllDbVgroups(SDbObj *pAlterDb) {
mInfo("db:%s, all vgroups is updated in sdb", pAlterDb->name); mInfo("db:%s, all vgroups is updated in sdb", pAlterDb->name);
} }
#endif
void mnodeDropAllDbVgroups(SDbObj *pDropDb) { void mnodeDropAllDbVgroups(SDbObj *pDropDb) {
void * pIter = NULL; void * pIter = NULL;

View File

@ -61,6 +61,9 @@
#define HTTP_CHECK_BODY_CONTINUE 0 #define HTTP_CHECK_BODY_CONTINUE 0
#define HTTP_CHECK_BODY_SUCCESS 1 #define HTTP_CHECK_BODY_SUCCESS 1
#define HTTP_READ_DATA_SUCCESS 0
#define HTTP_READ_DATA_FAILED 1
#define HTTP_WRITE_RETRY_TIMES 500 #define HTTP_WRITE_RETRY_TIMES 500
#define HTTP_WRITE_WAIT_TIME_MS 5 #define HTTP_WRITE_WAIT_TIME_MS 5
#define HTTP_EXPIRED_TIME 60000 #define HTTP_EXPIRED_TIME 60000

View File

@ -23,6 +23,6 @@ void httpCleanUpConnect();
void *httpInitServer(char *ip, uint16_t port, char *label, int numOfThreads, void *fp, void *shandle); void *httpInitServer(char *ip, uint16_t port, char *label, int numOfThreads, void *fp, void *shandle);
void httpCleanUpServer(HttpServer *pServer); void httpCleanUpServer(HttpServer *pServer);
bool httpReadDataImp(HttpContext *pContext); int httpReadDataImp(HttpContext *pContext);
#endif #endif

View File

@ -137,7 +137,7 @@ void httpReleaseContext(HttpContext *pContext) {
assert(refCount >= 0); assert(refCount >= 0);
HttpContext **ppContext = pContext->ppContext; HttpContext **ppContext = pContext->ppContext;
httpDebug("context:%p, is releasd, data:%p refCount:%d", pContext, ppContext, refCount); httpDebug("context:%p, is released, data:%p refCount:%d", pContext, ppContext, refCount);
if (tsHttpServer.contextCache != NULL) { if (tsHttpServer.contextCache != NULL) {
taosCacheRelease(tsHttpServer.contextCache, (void **)(&ppContext), false); taosCacheRelease(tsHttpServer.contextCache, (void **)(&ppContext), false);

View File

@ -60,6 +60,7 @@ bool httpParseURL(HttpContext* pContext) {
char* pSeek; char* pSeek;
char* pEnd = strchr(pParser->pLast, ' '); char* pEnd = strchr(pParser->pLast, ' ');
if (pEnd == NULL) { if (pEnd == NULL) {
httpSendErrorResp(pContext, HTTP_UNSUPPORT_URL);
return false; return false;
} }
@ -275,14 +276,14 @@ bool httpParseChunkedBody(HttpContext* pContext, HttpParser* pParser, bool test)
return true; return true;
} }
bool httpReadChunkedBody(HttpContext* pContext, HttpParser* pParser) { int httpReadChunkedBody(HttpContext* pContext, HttpParser* pParser) {
bool parsedOk = httpParseChunkedBody(pContext, pParser, true); bool parsedOk = httpParseChunkedBody(pContext, pParser, true);
if (parsedOk) { if (parsedOk) {
httpParseChunkedBody(pContext, pParser, false); httpParseChunkedBody(pContext, pParser, false);
return HTTP_CHECK_BODY_SUCCESS; return HTTP_CHECK_BODY_SUCCESS;
} else { } else {
httpTrace("context:%p, fd:%d, ip:%s, chunked body not finished, continue read", pContext, pContext->fd, pContext->ipstr); httpTrace("context:%p, fd:%d, ip:%s, chunked body not finished, continue read", pContext, pContext->fd, pContext->ipstr);
if (!httpReadDataImp(pContext)) { if (httpReadDataImp(pContext) != HTTP_READ_DATA_SUCCESS) {
httpError("context:%p, fd:%d, ip:%s, read chunked request error", pContext, pContext->fd, pContext->ipstr); httpError("context:%p, fd:%d, ip:%s, read chunked request error", pContext, pContext->fd, pContext->ipstr);
return HTTP_CHECK_BODY_ERROR; return HTTP_CHECK_BODY_ERROR;
} else { } else {
@ -296,7 +297,6 @@ int httpReadUnChunkedBody(HttpContext* pContext, HttpParser* pParser) {
if (dataReadLen > pParser->data.len) { if (dataReadLen > pParser->data.len) {
httpError("context:%p, fd:%d, ip:%s, un-chunked body length invalid, read size:%d dataReadLen:%d > pContext->data.len:%d", httpError("context:%p, fd:%d, ip:%s, un-chunked body length invalid, read size:%d dataReadLen:%d > pContext->data.len:%d",
pContext, pContext->fd, pContext->ipstr, pContext->parser.bufsize, dataReadLen, pParser->data.len); pContext, pContext->fd, pContext->ipstr, pContext->parser.bufsize, dataReadLen, pParser->data.len);
httpSendErrorResp(pContext, HTTP_PARSE_BODY_ERROR);
return HTTP_CHECK_BODY_ERROR; return HTTP_CHECK_BODY_ERROR;
} else if (dataReadLen < pParser->data.len) { } else if (dataReadLen < pParser->data.len) {
httpTrace("context:%p, fd:%d, ip:%s, un-chunked body not finished, read size:%d dataReadLen:%d < pContext->data.len:%d, continue read", httpTrace("context:%p, fd:%d, ip:%s, un-chunked body not finished, read size:%d dataReadLen:%d < pContext->data.len:%d, continue read",
@ -358,20 +358,13 @@ bool httpParseRequest(HttpContext* pContext) {
} }
int httpCheckReadCompleted(HttpContext* pContext) { int httpCheckReadCompleted(HttpContext* pContext) {
HttpParser *pParser = &pContext->parser; HttpParser* pParser = &pContext->parser;
if (pContext->httpChunked == HTTP_UNCUNKED) {
int ret = httpReadUnChunkedBody(pContext, pParser);
if (ret != HTTP_CHECK_BODY_SUCCESS) {
return ret;
}
} else {
int ret = httpReadChunkedBody(pContext, pParser);
if (ret != HTTP_CHECK_BODY_SUCCESS) {
return ret;
}
}
return HTTP_CHECK_BODY_SUCCESS; if (pContext->httpChunked == HTTP_UNCUNKED) {
return httpReadUnChunkedBody(pContext, pParser);
} else {
return httpReadChunkedBody(pContext, pParser);
}
} }
bool httpDecodeRequest(HttpContext* pContext) { bool httpDecodeRequest(HttpContext* pContext) {

View File

@ -69,7 +69,7 @@ void httpCleanUpConnect() {
httpDebug("http server:%s is cleaned up", pServer->label); httpDebug("http server:%s is cleaned up", pServer->label);
} }
bool httpReadDataImp(HttpContext *pContext) { int httpReadDataImp(HttpContext *pContext) {
HttpParser *pParser = &pContext->parser; HttpParser *pParser = &pContext->parser;
while (pParser->bufsize <= (HTTP_BUFFER_SIZE - HTTP_STEP_SIZE)) { while (pParser->bufsize <= (HTTP_BUFFER_SIZE - HTTP_STEP_SIZE)) {
@ -85,8 +85,7 @@ bool httpReadDataImp(HttpContext *pContext) {
} else { } else {
httpError("context:%p, fd:%d, ip:%s, read from socket error:%d, close connect", httpError("context:%p, fd:%d, ip:%s, read from socket error:%d, close connect",
pContext, pContext->fd, pContext->ipstr, errno); pContext, pContext->fd, pContext->ipstr, errno);
httpReleaseContext(pContext); return HTTP_READ_DATA_FAILED;
return false;
} }
} else { } else {
pParser->bufsize += nread; pParser->bufsize += nread;
@ -95,15 +94,13 @@ bool httpReadDataImp(HttpContext *pContext) {
if (pParser->bufsize >= (HTTP_BUFFER_SIZE - HTTP_STEP_SIZE)) { if (pParser->bufsize >= (HTTP_BUFFER_SIZE - HTTP_STEP_SIZE)) {
httpError("context:%p, fd:%d, ip:%s, thread:%s, request big than:%d", httpError("context:%p, fd:%d, ip:%s, thread:%s, request big than:%d",
pContext, pContext->fd, pContext->ipstr, pContext->pThread->label, HTTP_BUFFER_SIZE); pContext, pContext->fd, pContext->ipstr, pContext->pThread->label, HTTP_BUFFER_SIZE);
httpSendErrorResp(pContext, HTTP_REQUSET_TOO_BIG); return HTTP_REQUSET_TOO_BIG;
httpNotifyContextClose(pContext);
return false;
} }
} }
pParser->buffer[pParser->bufsize] = 0; pParser->buffer[pParser->bufsize] = 0;
return true; return HTTP_READ_DATA_SUCCESS;
} }
static bool httpDecompressData(HttpContext *pContext) { static bool httpDecompressData(HttpContext *pContext) {
@ -141,8 +138,14 @@ static bool httpReadData(HttpContext *pContext) {
httpInitContext(pContext); httpInitContext(pContext);
} }
if (!httpReadDataImp(pContext)) { int32_t code = httpReadDataImp(pContext);
if (code != HTTP_READ_DATA_SUCCESS) {
if (code == HTTP_READ_DATA_FAILED) {
httpReleaseContext(pContext);
} else {
httpSendErrorResp(pContext, code);
httpNotifyContextClose(pContext); httpNotifyContextClose(pContext);
}
return false; return false;
} }

View File

@ -47,6 +47,10 @@ void httpProcessMultiSqlRetrieveCallBack(void *param, TAOS_RES *result, int numO
} }
} }
if (tscResultsetFetchCompleted(result)) {
isContinue = false;
}
if (isContinue) { if (isContinue) {
// retrieve next batch of rows // retrieve next batch of rows
httpDebug("context:%p, fd:%d, ip:%s, user:%s, process pos:%d, continue retrieve, numOfRows:%d, sql:%s", httpDebug("context:%p, fd:%d, ip:%s, user:%s, process pos:%d, continue retrieve, numOfRows:%d, sql:%s",
@ -75,7 +79,8 @@ void httpProcessMultiSqlCallBack(void *param, TAOS_RES *result, int code) {
HttpContext *pContext = (HttpContext *)param; HttpContext *pContext = (HttpContext *)param;
if (pContext == NULL) return; if (pContext == NULL) return;
HttpSqlCmds * multiCmds = pContext->multiCmds; code = taos_errno(result);
HttpSqlCmds *multiCmds = pContext->multiCmds;
HttpEncodeMethod *encode = pContext->encodeMethod; HttpEncodeMethod *encode = pContext->encodeMethod;
HttpSqlCmd *singleCmd = multiCmds->cmds + multiCmds->pos; HttpSqlCmd *singleCmd = multiCmds->cmds + multiCmds->pos;
@ -109,8 +114,8 @@ void httpProcessMultiSqlCallBack(void *param, TAOS_RES *result, int code) {
return; return;
} }
int num_fields = taos_field_count(result); bool isUpdate = tscIsUpdateQuery(result);
if (num_fields == 0) { if (isUpdate) {
// not select or show commands // not select or show commands
int affectRows = taos_affected_rows(result); int affectRows = taos_affected_rows(result);
httpDebug("context:%p, fd:%d, ip:%s, user:%s, process pos:%d, affect rows:%d, sql:%s", httpDebug("context:%p, fd:%d, ip:%s, user:%s, process pos:%d, affect rows:%d, sql:%s",
@ -238,6 +243,7 @@ void httpProcessSingleSqlCallBack(void *param, TAOS_RES *result, int unUsedCode)
if (pContext == NULL) return; if (pContext == NULL) return;
int32_t code = taos_errno(result); int32_t code = taos_errno(result);
HttpEncodeMethod *encode = pContext->encodeMethod; HttpEncodeMethod *encode = pContext->encodeMethod;
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {

View File

@ -121,6 +121,7 @@ typedef struct SQueryCostInfo {
uint32_t loadBlockStatis; uint32_t loadBlockStatis;
uint32_t discardBlocks; uint32_t discardBlocks;
uint64_t elapsedTime; uint64_t elapsedTime;
uint64_t ioTime;
uint64_t computTime; uint64_t computTime;
} SQueryCostInfo; } SQueryCostInfo;

View File

@ -37,7 +37,7 @@ SWindowResult *getWindowResult(SWindowResInfo *pWindowResInfo, int32_t slot);
#define curTimeWindow(_winres) ((_winres)->curIndex) #define curTimeWindow(_winres) ((_winres)->curIndex)
bool isWindowResClosed(SWindowResInfo *pWindowResInfo, int32_t slot); bool isWindowResClosed(SWindowResInfo *pWindowResInfo, int32_t slot);
void createQueryResultInfo(SQuery *pQuery, SWindowResult *pResultRow, bool isSTableQuery, SPosInfo *posInfo, size_t interBufSize); int32_t createQueryResultInfo(SQuery *pQuery, SWindowResult *pResultRow, bool isSTableQuery, SPosInfo *posInfo, size_t interBufSize);
char *getPosInResultPage(SQueryRuntimeEnv *pRuntimeEnv, int32_t columnIndex, SWindowResult *pResult); char *getPosInResultPage(SQueryRuntimeEnv *pRuntimeEnv, int32_t columnIndex, SWindowResult *pResult);

View File

@ -125,7 +125,8 @@ typedef struct SArithmeticSupport {
} SArithmeticSupport; } SArithmeticSupport;
typedef struct SQLPreAggVal { typedef struct SQLPreAggVal {
bool isSet; bool isSet; // statistics info set or not
bool dataBlockLoaded; // data block is loaded or not
SDataStatis statis; SDataStatis statis;
} SQLPreAggVal; } SQLPreAggVal;
@ -224,25 +225,14 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI
#define IS_SINGLEOUTPUT(x) (((x)&TSDB_FUNCSTATE_SO) != 0) #define IS_SINGLEOUTPUT(x) (((x)&TSDB_FUNCSTATE_SO) != 0)
#define IS_OUTER_FORWARD(x) (((x)&TSDB_FUNCSTATE_OF) != 0) #define IS_OUTER_FORWARD(x) (((x)&TSDB_FUNCSTATE_OF) != 0)
/*
* the status of one block, used in metric query. all blocks are mixed together,
* we need the status to decide if one block is a first/end/inter block of one meter
*/
enum {
BLK_FILE_BLOCK = 0x1,
BLK_BLOCK_LOADED = 0x2,
BLK_CACHE_BLOCK = 0x4, // in case of cache block, block must be loaded
};
/* determine the real data need to calculated the result */ /* determine the real data need to calculated the result */
enum { enum {
BLK_DATA_NO_NEEDED = 0x0, BLK_DATA_NO_NEEDED = 0x0,
BLK_DATA_STATIS_NEEDED = 0x1, BLK_DATA_STATIS_NEEDED = 0x1,
BLK_DATA_ALL_NEEDED = 0x3, BLK_DATA_ALL_NEEDED = 0x3,
BLK_DATA_DISCARD = 0x4, // discard current data block since it is not qualified for filter
}; };
#define SET_DATA_BLOCK_NOT_LOADED(x) ((x) &= (~BLK_BLOCK_LOADED));
typedef struct STwaInfo { typedef struct STwaInfo {
TSKEY lastKey; TSKEY lastKey;
int8_t hasResult; // flag to denote has value int8_t hasResult; // flag to denote has value
@ -264,12 +254,9 @@ typedef struct STwaInfo {
/* global sql function array */ /* global sql function array */
extern struct SQLAggFuncElem aAggs[]; extern struct SQLAggFuncElem aAggs[];
/* compatible check array list */ extern int32_t functionCompatList[]; // compatible check array list
extern int32_t funcCompatDefList[];
bool top_bot_datablock_filter(SQLFunctionCtx *pCtx, int32_t functionId, char *minval, char *maxval); bool topbot_datablock_filter(SQLFunctionCtx *pCtx, int32_t functionId, const char *minval, const char *maxval);
bool stableQueryFunctChanged(int32_t funcId);
void resetResultInfo(SResultInfo *pResInfo); void resetResultInfo(SResultInfo *pResInfo);
void setResultInfoBuf(SResultInfo *pResInfo, int32_t size, bool superTable, char* buf); void setResultInfoBuf(SResultInfo *pResInfo, int32_t size, bool superTable, char* buf);

View File

@ -1358,6 +1358,8 @@ void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, void* inputData, TSKEY
pCtx->preAggVals.isSet = false; pCtx->preAggVals.isSet = false;
} }
pCtx->preAggVals.dataBlockLoaded = (inputData != NULL);
// limit/offset query will affect this value // limit/offset query will affect this value
pCtx->startOffset = QUERY_IS_ASC_QUERY(pQuery) ? pQuery->pos:0; pCtx->startOffset = QUERY_IS_ASC_QUERY(pQuery) ? pQuery->pos:0;
pCtx->size = QUERY_IS_ASC_QUERY(pQuery) ? pBlockInfo->rows - pQuery->pos : pQuery->pos + 1; pCtx->size = QUERY_IS_ASC_QUERY(pQuery) ? pBlockInfo->rows - pQuery->pos : pQuery->pos + 1;
@ -1928,73 +1930,46 @@ char *getPosInResultPage(SQueryRuntimeEnv *pRuntimeEnv, int32_t columnIndex, SWi
pQuery->pSelectExpr[columnIndex].bytes * realRowId; pQuery->pSelectExpr[columnIndex].bytes * realRowId;
} }
/** #define IS_PREFILTER_TYPE(_t) ((_t) != TSDB_DATA_TYPE_BINARY && (_t) != TSDB_DATA_TYPE_NCHAR)
* decrease the refcount for each table involved in this query
* @param pQInfo
*/
UNUSED_FUNC void vnodeDecMeterRefcnt(SQInfo *pQInfo) {
if (pQInfo != NULL) {
// assert(taosHashGetSize(pQInfo->tableqinfoGroupInfo) >= 1);
}
#if 0 static bool needToLoadDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SDataStatis *pDataStatis, SQLFunctionCtx *pCtx,
if (pQInfo == NULL || pQInfo->tableqinfoGroupInfo.numOfTables == 1) { int32_t numOfRows) {
atomic_fetch_sub_32(&pQInfo->pObj->numOfQueries, 1); SQuery* pQuery = pRuntimeEnv->pQuery;
qDebug("QInfo:%p vid:%d sid:%d meterId:%s, query is over, numOfQueries:%d", pQInfo, pQInfo->pObj->vnode, if (pDataStatis == NULL || (pQuery->numOfFilterCols == 0 && (!pRuntimeEnv->topBotQuery))) {
pQInfo->pObj->sid, pQInfo->pObj->meterId, pQInfo->pObj->numOfQueries);
} else {
int32_t num = 0;
for (int32_t i = 0; i < pQInfo->tableqinfoGroupInfo.numOfTables; ++i) {
SMeterObj *pMeter = getMeterObj(pQInfo->tableqinfoGroupInfo, pQInfo->pSidSet->pTableIdList[i]->sid);
atomic_fetch_sub_32(&(pMeter->numOfQueries), 1);
if (pMeter->numOfQueries > 0) {
qDebug("QInfo:%p vid:%d sid:%d meterId:%s, query is over, numOfQueries:%d", pQInfo, pMeter->vnode, pMeter->sid,
pMeter->meterId, pMeter->numOfQueries);
num++;
}
}
/*
* in order to reduce log output, for all meters of which numOfQueries count are 0,
* we do not output corresponding information
*/
num = pQInfo->tableqinfoGroupInfo.numOfTables - num;
qDebug("QInfo:%p metric query is over, dec query ref for %d meters, numOfQueries on %d meters are 0", pQInfo,
pQInfo->tableqinfoGroupInfo.numOfTables, num);
}
#endif
}
static bool needToLoadDataBlock(SQuery *pQuery, SDataStatis *pDataStatis, SQLFunctionCtx *pCtx,
int32_t numOfTotalPoints) {
if (pDataStatis == NULL) {
return true; return true;
} }
#if 0
for (int32_t k = 0; k < pQuery->numOfFilterCols; ++k) { for (int32_t k = 0; k < pQuery->numOfFilterCols; ++k) {
SSingleColumnFilterInfo *pFilterInfo = &pQuery->pFilterInfo[k]; SSingleColumnFilterInfo *pFilterInfo = &pQuery->pFilterInfo[k];
int32_t colIndex = pFilterInfo->info.colIndex;
// this column not valid in current data block int32_t index = -1;
if (colIndex < 0 || pDataStatis[colIndex].colId != pFilterInfo->info.data.colId) { for(int32_t i = 0; i < pQuery->numOfCols; ++i) {
continue; if (pDataStatis[i].colId == pFilterInfo->info.colId) {
index = i;
break;
}
}
// no statistics data
if (index == -1) {
return true;
} }
// not support pre-filter operation on binary/nchar data type // not support pre-filter operation on binary/nchar data type
if (!vnodeSupportPrefilter(pFilterInfo->info.data.type)) { if (!IS_PREFILTER_TYPE(pFilterInfo->info.type)) {
continue; return true;
} }
// all points in current column are NULL, no need to check its boundary value // all points in current column are NULL, no need to check its boundary value
if (pDataStatis[colIndex].numOfNull == numOfTotalPoints) { if (pDataStatis[index].numOfNull == numOfRows) {
continue; continue;
} }
if (pFilterInfo->info.info.type == TSDB_DATA_TYPE_FLOAT) { SDataStatis* pDataBlockst = &pDataStatis[index];
float minval = *(double *)(&pDataStatis[colIndex].min);
float maxval = *(double *)(&pDataStatis[colIndex].max); if (pFilterInfo->info.type == TSDB_DATA_TYPE_FLOAT) {
float minval = *(double *)(&pDataBlockst->min);
float maxval = *(double *)(&pDataBlockst->max);
for (int32_t i = 0; i < pFilterInfo->numOfFilters; ++i) { for (int32_t i = 0; i < pFilterInfo->numOfFilters; ++i) {
if (pFilterInfo->pFilters[i].fp(&pFilterInfo->pFilters[i], (char *)&minval, (char *)&maxval)) { if (pFilterInfo->pFilters[i].fp(&pFilterInfo->pFilters[i], (char *)&minval, (char *)&maxval)) {
@ -2003,53 +1978,50 @@ static bool needToLoadDataBlock(SQuery *pQuery, SDataStatis *pDataStatis, SQLFun
} }
} else { } else {
for (int32_t i = 0; i < pFilterInfo->numOfFilters; ++i) { for (int32_t i = 0; i < pFilterInfo->numOfFilters; ++i) {
if (pFilterInfo->pFilters[i].fp(&pFilterInfo->pFilters[i], (char *)&pDataStatis[colIndex].min, if (pFilterInfo->pFilters[i].fp(&pFilterInfo->pFilters[i], (char *)&pDataBlockst->min, (char *)&pDataBlockst->max)) {
(char *)&pDataStatis[colIndex].max)) {
return true; return true;
} }
} }
} }
} }
// todo disable this opt code block temporarily if (pRuntimeEnv->topBotQuery) {
// for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
// int32_t functId = pQuery->pSelectExpr[i].base.functionId; int32_t functionId = pQuery->pSelectExpr[i].base.functionId;
// if (functId == TSDB_FUNC_TOP || functId == TSDB_FUNC_BOTTOM) { if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) {
// return top_bot_datablock_filter(&pCtx[i], functId, (char *)&pField[i].min, (char *)&pField[i].max); return topbot_datablock_filter(&pCtx[i], functionId, (char *)&pDataStatis[i].min, (char *)&pDataStatis[i].max);
// } }
// } }
}
#endif return false;
return true;
} }
SArray *loadDataBlockOnDemand(SQueryRuntimeEnv *pRuntimeEnv, void* pQueryHandle, SDataBlockInfo* pBlockInfo, SDataStatis **pStatis) { int32_t loadDataBlockOnDemand(SQueryRuntimeEnv *pRuntimeEnv, void* pQueryHandle, SDataBlockInfo* pBlockInfo, SDataStatis **pStatis, SArray** pDataBlock) {
SQuery *pQuery = pRuntimeEnv->pQuery; SQuery *pQuery = pRuntimeEnv->pQuery;
uint32_t r = 0; uint32_t status = 0;
SArray * pDataBlock = NULL;
if (pQuery->numOfFilterCols > 0) { if (pQuery->numOfFilterCols > 0) {
r = BLK_DATA_ALL_NEEDED; status = BLK_DATA_ALL_NEEDED;
} else { } else { // check if this data block is required to load
// check if this data block is required to load
for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
SSqlFuncMsg* pSqlFunc = &pQuery->pSelectExpr[i].base; SSqlFuncMsg* pSqlFunc = &pQuery->pSelectExpr[i].base;
int32_t functionId = pSqlFunc->functionId; int32_t functionId = pSqlFunc->functionId;
int32_t colId = pSqlFunc->colInfo.colId; int32_t colId = pSqlFunc->colInfo.colId;
r |= aAggs[functionId].dataReqFunc(&pRuntimeEnv->pCtx[i], pQuery->window.skey, pQuery->window.ekey, colId); status |= aAggs[functionId].dataReqFunc(&pRuntimeEnv->pCtx[i], pBlockInfo->window.skey, pBlockInfo->window.ekey, colId);
} }
if (pRuntimeEnv->pTSBuf > 0 || QUERY_IS_INTERVAL_QUERY(pQuery)) { if (pRuntimeEnv->pTSBuf > 0 || QUERY_IS_INTERVAL_QUERY(pQuery)) {
r |= BLK_DATA_ALL_NEEDED; status |= BLK_DATA_ALL_NEEDED;
} }
} }
if (r == BLK_DATA_NO_NEEDED) { if (status == BLK_DATA_NO_NEEDED) {
qDebug("QInfo:%p data block discard, rows:%d", GET_QINFO_ADDR(pRuntimeEnv), pBlockInfo->rows); qDebug("QInfo:%p data block discard, brange:%"PRId64 "-%"PRId64", rows:%d", GET_QINFO_ADDR(pRuntimeEnv),
pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows);
pRuntimeEnv->summary.discardBlocks += 1; pRuntimeEnv->summary.discardBlocks += 1;
} else if (r == BLK_DATA_STATIS_NEEDED) { } else if (status == BLK_DATA_STATIS_NEEDED) {
if (tsdbRetrieveDataBlockStatisInfo(pQueryHandle, pStatis) != TSDB_CODE_SUCCESS) { if (tsdbRetrieveDataBlockStatisInfo(pQueryHandle, pStatis) != TSDB_CODE_SUCCESS) {
// return DISK_DATA_LOAD_FAILED; // return DISK_DATA_LOAD_FAILED;
} }
@ -2057,32 +2029,34 @@ SArray *loadDataBlockOnDemand(SQueryRuntimeEnv *pRuntimeEnv, void* pQueryHandle,
pRuntimeEnv->summary.loadBlockStatis += 1; pRuntimeEnv->summary.loadBlockStatis += 1;
if (*pStatis == NULL) { // data block statistics does not exist, load data block if (*pStatis == NULL) { // data block statistics does not exist, load data block
pDataBlock = tsdbRetrieveDataBlock(pQueryHandle, NULL); *pDataBlock = tsdbRetrieveDataBlock(pQueryHandle, NULL);
pRuntimeEnv->summary.totalCheckedRows += pBlockInfo->rows; pRuntimeEnv->summary.totalCheckedRows += pBlockInfo->rows;
} }
} else { } else {
assert(r == BLK_DATA_ALL_NEEDED); assert(status == BLK_DATA_ALL_NEEDED);
// load the data block statistics to perform further filter // load the data block statistics to perform further filter
pRuntimeEnv->summary.loadBlockStatis +=1; pRuntimeEnv->summary.loadBlockStatis += 1;
if (tsdbRetrieveDataBlockStatisInfo(pQueryHandle, pStatis) != TSDB_CODE_SUCCESS) { if (tsdbRetrieveDataBlockStatisInfo(pQueryHandle, pStatis) != TSDB_CODE_SUCCESS) {
} }
if (!needToLoadDataBlock(pQuery,*pStatis, pRuntimeEnv->pCtx, pBlockInfo->rows)) { if (!needToLoadDataBlock(pRuntimeEnv, *pStatis, pRuntimeEnv->pCtx, pBlockInfo->rows)) {
#if defined(_DEBUG_VIEW) #if defined(_DEBUG_VIEW)
qDebug("QInfo:%p block discarded by per-filter", GET_QINFO_ADDR(pRuntimeEnv)); qDebug("QInfo:%p block discarded by per-filter", GET_QINFO_ADDR(pRuntimeEnv));
#endif #endif
// current block has been discard due to filter applied // current block has been discard due to filter applied
pRuntimeEnv->summary.discardBlocks += 1; pRuntimeEnv->summary.discardBlocks += 1;
// return DISK_DATA_DISCARDED; qDebug("QInfo:%p data block discard, brange:%"PRId64 "-%"PRId64", rows:%d", GET_QINFO_ADDR(pRuntimeEnv),
pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows);
return BLK_DATA_DISCARD;
} }
pRuntimeEnv->summary.totalCheckedRows += pBlockInfo->rows; pRuntimeEnv->summary.totalCheckedRows += pBlockInfo->rows;
pRuntimeEnv->summary.loadBlocks += 1; pRuntimeEnv->summary.loadBlocks += 1;
pDataBlock = tsdbRetrieveDataBlock(pQueryHandle, NULL); *pDataBlock = tsdbRetrieveDataBlock(pQueryHandle, NULL);
} }
return pDataBlock; return TSDB_CODE_SUCCESS;
} }
int32_t binarySearchForKey(char *pValue, int num, TSKEY key, int order) { int32_t binarySearchForKey(char *pValue, int num, TSKEY key, int order) {
@ -2225,13 +2199,13 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) {
pQuery->order.order); pQuery->order.order);
TsdbQueryHandleT pQueryHandle = IS_MASTER_SCAN(pRuntimeEnv)? pRuntimeEnv->pQueryHandle : pRuntimeEnv->pSecQueryHandle; TsdbQueryHandleT pQueryHandle = IS_MASTER_SCAN(pRuntimeEnv)? pRuntimeEnv->pQueryHandle : pRuntimeEnv->pSecQueryHandle;
int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order);
SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER; SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER;
while (tsdbNextDataBlock(pQueryHandle)) { while (tsdbNextDataBlock(pQueryHandle)) {
summary->totalBlocks += 1; summary->totalBlocks += 1;
if (IS_QUERY_KILLED(GET_QINFO_ADDR(pRuntimeEnv))) { if (IS_QUERY_KILLED(GET_QINFO_ADDR(pRuntimeEnv))) {
finalizeQueryResult(pRuntimeEnv); // clean up allocated resource during query
longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED);
} }
@ -2259,7 +2233,11 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) {
ensureOutputBuffer(pRuntimeEnv, &blockInfo); ensureOutputBuffer(pRuntimeEnv, &blockInfo);
SDataStatis *pStatis = NULL; SDataStatis *pStatis = NULL;
SArray *pDataBlock = loadDataBlockOnDemand(pRuntimeEnv, pQueryHandle, &blockInfo, &pStatis); SArray *pDataBlock = NULL;
if (loadDataBlockOnDemand(pRuntimeEnv, pQueryHandle, &blockInfo, &pStatis, &pDataBlock) == BLK_DATA_DISCARD) {
pQuery->current->lastKey = QUERY_IS_ASC_QUERY(pQuery)? blockInfo.window.ekey + step:blockInfo.window.skey + step;
continue;
}
// query start position can not move into tableApplyFunctionsOnBlock due to limit/offset condition // query start position can not move into tableApplyFunctionsOnBlock due to limit/offset condition
pQuery->pos = QUERY_IS_ASC_QUERY(pQuery)? 0 : blockInfo.rows - 1; pQuery->pos = QUERY_IS_ASC_QUERY(pQuery)? 0 : blockInfo.rows - 1;
@ -2282,8 +2260,6 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) {
if (QUERY_IS_INTERVAL_QUERY(pQuery) && IS_MASTER_SCAN(pRuntimeEnv)) { if (QUERY_IS_INTERVAL_QUERY(pQuery) && IS_MASTER_SCAN(pRuntimeEnv)) {
if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) { if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) {
// int32_t step = QUERY_IS_ASC_QUERY(pQuery) ? QUERY_ASC_FORWARD_STEP : QUERY_DESC_FORWARD_STEP;
closeAllTimeWindow(&pRuntimeEnv->windowResInfo); closeAllTimeWindow(&pRuntimeEnv->windowResInfo);
// removeRedundantWindow(&pRuntimeEnv->windowResInfo, pTableQueryInfo->lastKey - step, step); // removeRedundantWindow(&pRuntimeEnv->windowResInfo, pTableQueryInfo->lastKey - step, step);
pRuntimeEnv->windowResInfo.curIndex = pRuntimeEnv->windowResInfo.size - 1; // point to the last time window pRuntimeEnv->windowResInfo.curIndex = pRuntimeEnv->windowResInfo.size - 1; // point to the last time window
@ -2978,16 +2954,23 @@ void switchCtxOrder(SQueryRuntimeEnv *pRuntimeEnv) {
} }
} }
void createQueryResultInfo(SQuery *pQuery, SWindowResult *pResultRow, bool isSTableQuery, SPosInfo *posInfo, size_t interBufSize) { int32_t createQueryResultInfo(SQuery *pQuery, SWindowResult *pResultRow, bool isSTableQuery, SPosInfo *posInfo, size_t interBufSize) {
int32_t numOfCols = pQuery->numOfOutput; int32_t numOfCols = pQuery->numOfOutput;
pResultRow->resultInfo = calloc((size_t)numOfCols, sizeof(SResultInfo)); pResultRow->resultInfo = calloc((size_t)numOfCols, sizeof(SResultInfo));
if (pResultRow->resultInfo == NULL) {
return TSDB_CODE_QRY_OUT_OF_MEMORY;
}
pResultRow->pos = *posInfo; pResultRow->pos = *posInfo;
char* buf = calloc(1, interBufSize); char* buf = calloc(1, interBufSize);
if (buf == NULL) {
return TSDB_CODE_QRY_OUT_OF_MEMORY;
}
// set the intermediate result output buffer // set the intermediate result output buffer
setWindowResultInfo(pResultRow->resultInfo, pQuery, isSTableQuery, buf); setWindowResultInfo(pResultRow->resultInfo, pQuery, isSTableQuery, buf);
return TSDB_CODE_SUCCESS;
} }
void resetCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv) { void resetCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv) {
@ -3368,7 +3351,10 @@ static STableQueryInfo *createTableQueryInfo(SQueryRuntimeEnv *pRuntimeEnv, void
if (QUERY_IS_INTERVAL_QUERY(pQuery) || pRuntimeEnv->groupbyNormalCol) { if (QUERY_IS_INTERVAL_QUERY(pQuery) || pRuntimeEnv->groupbyNormalCol) {
int32_t initialSize = 16; int32_t initialSize = 16;
int32_t initialThreshold = 100; int32_t initialThreshold = 100;
initWindowResInfo(&pTableQueryInfo->windowResInfo, pRuntimeEnv, initialSize, initialThreshold, TSDB_DATA_TYPE_INT); int32_t code = initWindowResInfo(&pTableQueryInfo->windowResInfo, pRuntimeEnv, initialSize, initialThreshold, TSDB_DATA_TYPE_INT);
if (code != TSDB_CODE_SUCCESS) {
return NULL;
}
} else { // in other aggregate query, do not initialize the windowResInfo } else { // in other aggregate query, do not initialize the windowResInfo
} }
@ -3383,12 +3369,10 @@ void destroyTableQueryInfo(STableQueryInfo *pTableQueryInfo, int32_t numOfCols)
cleanupTimeWindowInfo(&pTableQueryInfo->windowResInfo, numOfCols); cleanupTimeWindowInfo(&pTableQueryInfo->windowResInfo, numOfCols);
} }
#define SET_CURRENT_QUERY_TABLE_INFO(_runtime, _tableInfo) \ #define CHECK_QUERY_TIME_RANGE(_q, _tableInfo) \
do { \ do { \
SQuery *_query = (_runtime)->pQuery; \ assert((((_tableInfo)->lastKey >= (_tableInfo)->win.skey) && QUERY_IS_ASC_QUERY(_q)) || \
_query->current = _tableInfo; \ (((_tableInfo)->lastKey <= (_tableInfo)->win.skey) && !QUERY_IS_ASC_QUERY(_q))); \
assert((((_tableInfo)->lastKey >= (_tableInfo)->win.skey) && QUERY_IS_ASC_QUERY(_query)) || \
(((_tableInfo)->lastKey <= (_tableInfo)->win.skey) && !QUERY_IS_ASC_QUERY(_query))); \
} while (0) } while (0)
/** /**
@ -3700,7 +3684,7 @@ static void updateWindowResNumOfRes(SQueryRuntimeEnv *pRuntimeEnv) {
} }
} }
void stableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBlockInfo *pDataBlockInfo, SDataStatis *pStatis, static void stableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBlockInfo *pDataBlockInfo, SDataStatis *pStatis,
SArray *pDataBlock, __block_search_fn_t searchFn) { SArray *pDataBlock, __block_search_fn_t searchFn) {
SQuery * pQuery = pRuntimeEnv->pQuery; SQuery * pQuery = pRuntimeEnv->pQuery;
STableQueryInfo* pTableQueryInfo = pQuery->current; STableQueryInfo* pTableQueryInfo = pQuery->current;
@ -3859,9 +3843,10 @@ static void queryCostStatis(SQInfo *pQInfo) {
// pQInfo, pSummary->readDiskBlocks, pSummary->totalBlockSize, pSummary->loadBlocksUs / 1000.0, // pQInfo, pSummary->readDiskBlocks, pSummary->totalBlockSize, pSummary->loadBlocksUs / 1000.0,
// pSummary->skippedFileBlocks, pSummary->totalGenData); // pSummary->skippedFileBlocks, pSummary->totalGenData);
qDebug("QInfo:%p :cost summary: elpased time:%"PRId64" us, total blocks:%d, use block statis:%d, use block data:%d, " qDebug("QInfo:%p :cost summary: elapsed time:%"PRId64" us, io time:%"PRId64" us, total blocks:%d, load block statis:%d,"
"total rows:%"PRId64 ", check rows:%"PRId64, pQInfo, pSummary->elapsedTime, pSummary->totalBlocks, " load data block:%d, total rows:%"PRId64 ", check rows:%"PRId64,
pSummary->loadBlockStatis, pSummary->loadBlocks, pSummary->totalRows, pSummary->totalCheckedRows); pQInfo, pSummary->elapsedTime, pSummary->ioTime, pSummary->totalBlocks, pSummary->loadBlockStatis,
pSummary->loadBlocks, pSummary->totalRows, pSummary->totalCheckedRows);
// qDebug("QInfo:%p cost: temp file:%d Bytes", pQInfo, pSummary->tmpBufferInDisk); // qDebug("QInfo:%p cost: temp file:%d Bytes", pQInfo, pSummary->tmpBufferInDisk);
// //
@ -4189,7 +4174,10 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo
type = TSDB_DATA_TYPE_INT; // group id type = TSDB_DATA_TYPE_INT; // group id
} }
initWindowResInfo(&pRuntimeEnv->windowResInfo, pRuntimeEnv, 32, 4096, type); code = initWindowResInfo(&pRuntimeEnv->windowResInfo, pRuntimeEnv, 32, 4096, type);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
} }
} else if (pRuntimeEnv->groupbyNormalCol || QUERY_IS_INTERVAL_QUERY(pQuery)) { } else if (pRuntimeEnv->groupbyNormalCol || QUERY_IS_INTERVAL_QUERY(pQuery)) {
@ -4206,7 +4194,10 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo
type = TSDB_DATA_TYPE_TIMESTAMP; type = TSDB_DATA_TYPE_TIMESTAMP;
} }
initWindowResInfo(&pRuntimeEnv->windowResInfo, pRuntimeEnv, rows, 4096, type); code = initWindowResInfo(&pRuntimeEnv->windowResInfo, pRuntimeEnv, rows, 4096, type);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
} }
if (pQuery->fillType != TSDB_FILL_NONE && !isPointInterpoQuery(pQuery)) { if (pQuery->fillType != TSDB_FILL_NONE && !isPointInterpoQuery(pQuery)) {
@ -4237,6 +4228,23 @@ static void enableExecutionForNextTable(SQueryRuntimeEnv *pRuntimeEnv) {
} }
} }
static FORCE_INLINE void setEnvForEachBlock(SQInfo* pQInfo, STableQueryInfo* pTableQueryInfo, SDataBlockInfo* pBlockInfo) {
SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv;
SQuery* pQuery = pQInfo->runtimeEnv.pQuery;
int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order);
if (!QUERY_IS_INTERVAL_QUERY(pQuery)) {
setExecutionContext(pQInfo, pTableQueryInfo->groupIndex, pBlockInfo->window.ekey + step);
} else { // interval query
TSKEY nextKey = pBlockInfo->window.skey;
setIntervalQueryRange(pQInfo, nextKey);
if (pRuntimeEnv->hasTagResults || pRuntimeEnv->pTSBuf != NULL) {
setAdditionalInfo(pQInfo, pTableQueryInfo->pTable, pTableQueryInfo);
}
}
}
static int64_t scanMultiTableDataBlocks(SQInfo *pQInfo) { static int64_t scanMultiTableDataBlocks(SQInfo *pQInfo) {
SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv; SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv;
SQuery* pQuery = pRuntimeEnv->pQuery; SQuery* pQuery = pRuntimeEnv->pQuery;
@ -4247,10 +4255,12 @@ static int64_t scanMultiTableDataBlocks(SQInfo *pQInfo) {
TsdbQueryHandleT pQueryHandle = IS_MASTER_SCAN(pRuntimeEnv)? pRuntimeEnv->pQueryHandle : pRuntimeEnv->pSecQueryHandle; TsdbQueryHandleT pQueryHandle = IS_MASTER_SCAN(pRuntimeEnv)? pRuntimeEnv->pQueryHandle : pRuntimeEnv->pSecQueryHandle;
SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER; SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER;
int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order);
while (tsdbNextDataBlock(pQueryHandle)) { while (tsdbNextDataBlock(pQueryHandle)) {
summary->totalBlocks += 1; summary->totalBlocks += 1;
if (IS_QUERY_KILLED(pQInfo)) { if (IS_QUERY_KILLED(pQInfo)) {
finalizeQueryResult(pRuntimeEnv); // clean up allocated resource during query
longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED);
} }
@ -4260,24 +4270,19 @@ static int64_t scanMultiTableDataBlocks(SQInfo *pQInfo) {
break; break;
} }
assert(*pTableQueryInfo != NULL); pQuery->current = *pTableQueryInfo;
SET_CURRENT_QUERY_TABLE_INFO(pRuntimeEnv, *pTableQueryInfo); CHECK_QUERY_TIME_RANGE(pQuery, *pTableQueryInfo);
SDataStatis *pStatis = NULL;
SArray *pDataBlock = loadDataBlockOnDemand(pRuntimeEnv, pQueryHandle, &blockInfo, &pStatis);
if (!pRuntimeEnv->groupbyNormalCol) { if (!pRuntimeEnv->groupbyNormalCol) {
if (!QUERY_IS_INTERVAL_QUERY(pQuery)) { setEnvForEachBlock(pQInfo, *pTableQueryInfo, &blockInfo);
int32_t step = QUERY_IS_ASC_QUERY(pQuery)? 1:-1; }
setExecutionContext(pQInfo, (*pTableQueryInfo)->groupIndex, blockInfo.window.ekey + step);
} else { // interval query
TSKEY nextKey = blockInfo.window.skey;
setIntervalQueryRange(pQInfo, nextKey);
if (pRuntimeEnv->hasTagResults || pRuntimeEnv->pTSBuf != NULL) { SDataStatis *pStatis = NULL;
setAdditionalInfo(pQInfo, (*pTableQueryInfo)->pTable, *pTableQueryInfo); SArray *pDataBlock = NULL;
}
} if (loadDataBlockOnDemand(pRuntimeEnv, pQueryHandle, &blockInfo, &pStatis, &pDataBlock) == BLK_DATA_DISCARD) {
pQuery->current->lastKey = QUERY_IS_ASC_QUERY(pQuery)? blockInfo.window.ekey + step:blockInfo.window.skey + step;
continue;
} }
summary->totalRows += blockInfo.rows; summary->totalRows += blockInfo.rows;
@ -4537,7 +4542,6 @@ static void sequentialTableProcess(SQInfo *pQInfo) {
while (pQInfo->tableIndex < pQInfo->tableqinfoGroupInfo.numOfTables) { while (pQInfo->tableIndex < pQInfo->tableqinfoGroupInfo.numOfTables) {
if (IS_QUERY_KILLED(pQInfo)) { if (IS_QUERY_KILLED(pQInfo)) {
finalizeQueryResult(pRuntimeEnv); // clean up allocated resource during query
longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED);
} }
@ -5035,6 +5039,7 @@ static void stableQueryImpl(SQInfo *pQInfo) {
isFirstLastRowQuery(pQuery) || pRuntimeEnv->groupbyNormalCol); isFirstLastRowQuery(pQuery) || pRuntimeEnv->groupbyNormalCol);
sequentialTableProcess(pQInfo); sequentialTableProcess(pQInfo);
} }
// record the total elapsed time // record the total elapsed time
@ -5366,10 +5371,11 @@ static int32_t buildAirthmeticExprFromMsg(SExprInfo *pArithExprInfo, SQueryTable
qDebug("qmsg:%p create arithmetic expr from binary string: %s", pQueryMsg, pArithExprInfo->base.arg[0].argValue.pz); qDebug("qmsg:%p create arithmetic expr from binary string: %s", pQueryMsg, pArithExprInfo->base.arg[0].argValue.pz);
tExprNode* pExprNode = NULL; tExprNode* pExprNode = NULL;
TRY(32) { TRY(TSDB_MAX_TAGS) {
pExprNode = exprTreeFromBinary(pArithExprInfo->base.arg[0].argValue.pz, pArithExprInfo->base.arg[0].argBytes); pExprNode = exprTreeFromBinary(pArithExprInfo->base.arg[0].argValue.pz, pArithExprInfo->base.arg[0].argBytes);
} CATCH( code ) { } CATCH( code ) {
CLEANUP_EXECUTE(); CLEANUP_EXECUTE();
qError("qmsg:%p failed to create arithmetic expression string from:%s, reason: %s", pQueryMsg, pArithExprInfo->base.arg[0].argValue.pz, tstrerror(code));
return code; return code;
} END_TRY } END_TRY
@ -5627,17 +5633,23 @@ static void freeQInfo(SQInfo *pQInfo);
static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList, SSqlGroupbyExpr *pGroupbyExpr, SExprInfo *pExprs, static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList, SSqlGroupbyExpr *pGroupbyExpr, SExprInfo *pExprs,
STableGroupInfo *pTableGroupInfo, SColumnInfo* pTagCols) { STableGroupInfo *pTableGroupInfo, SColumnInfo* pTagCols) {
SQInfo *pQInfo = (SQInfo *)calloc(1, sizeof(SQInfo));
if (pQInfo == NULL) {
return NULL;
}
SQuery *pQuery = calloc(1, sizeof(SQuery));
pQInfo->runtimeEnv.pQuery = pQuery;
int16_t numOfCols = pQueryMsg->numOfCols; int16_t numOfCols = pQueryMsg->numOfCols;
int16_t numOfOutput = pQueryMsg->numOfOutput; int16_t numOfOutput = pQueryMsg->numOfOutput;
SQInfo *pQInfo = (SQInfo *)calloc(1, sizeof(SQInfo));
if (pQInfo == NULL) {
goto _cleanup_qinfo;
}
// to make sure third party won't overwrite this structure
pQInfo->signature = pQInfo;
pQInfo->tableGroupInfo = *pTableGroupInfo;
SQuery *pQuery = calloc(1, sizeof(SQuery));
if (pQuery == NULL) {
goto _cleanup_query;
}
pQInfo->runtimeEnv.pQuery = pQuery;
pQuery->numOfCols = numOfCols; pQuery->numOfCols = numOfCols;
pQuery->numOfOutput = numOfOutput; pQuery->numOfOutput = numOfOutput;
pQuery->limit.limit = pQueryMsg->limit; pQuery->limit.limit = pQueryMsg->limit;
@ -5651,6 +5663,7 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList,
pQuery->slidingTimeUnit = pQueryMsg->slidingTimeUnit; pQuery->slidingTimeUnit = pQueryMsg->slidingTimeUnit;
pQuery->fillType = pQueryMsg->fillType; pQuery->fillType = pQueryMsg->fillType;
pQuery->numOfTags = pQueryMsg->numOfTags; pQuery->numOfTags = pQueryMsg->numOfTags;
pQuery->tagColList = pTagCols;
// todo do not allocate ?? // todo do not allocate ??
pQuery->colList = calloc(numOfCols, sizeof(SSingleColumnFilterInfo)); pQuery->colList = calloc(numOfCols, sizeof(SSingleColumnFilterInfo));
@ -5663,8 +5676,6 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList,
pQuery->colList[i].filters = tscFilterInfoClone(pQueryMsg->colList[i].filters, pQuery->colList[i].numOfFilters); pQuery->colList[i].filters = tscFilterInfoClone(pQueryMsg->colList[i].filters, pQuery->colList[i].numOfFilters);
} }
pQuery->tagColList = pTagCols;
// calculate the result row size // calculate the result row size
for (int16_t col = 0; col < numOfOutput; ++col) { for (int16_t col = 0; col < numOfOutput; ++col) {
assert(pExprs[col].bytes > 0); assert(pExprs[col].bytes > 0);
@ -5709,10 +5720,6 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList,
memcpy(pQuery->fillVal, (char *)pQueryMsg->fillVal, pQuery->numOfOutput * sizeof(int64_t)); memcpy(pQuery->fillVal, (char *)pQueryMsg->fillVal, pQuery->numOfOutput * sizeof(int64_t));
} }
// to make sure third party won't overwrite this structure
pQInfo->signature = pQInfo;
pQInfo->tableGroupInfo = *pTableGroupInfo;
size_t numOfGroups = 0; size_t numOfGroups = 0;
if (pTableGroupInfo->pGroupList != NULL) { if (pTableGroupInfo->pGroupList != NULL) {
numOfGroups = taosArrayGetSize(pTableGroupInfo->pGroupList); numOfGroups = taosArrayGetSize(pTableGroupInfo->pGroupList);
@ -5736,6 +5743,10 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList,
size_t s = taosArrayGetSize(pa); size_t s = taosArrayGetSize(pa);
SArray* p1 = taosArrayInit(s, POINTER_BYTES); SArray* p1 = taosArrayInit(s, POINTER_BYTES);
if (p1 == NULL) {
goto _cleanup;
}
taosArrayPush(pQInfo->tableqinfoGroupInfo.pGroupList, &p1);
for(int32_t j = 0; j < s; ++j) { for(int32_t j = 0; j < s; ++j) {
void* pTable = taosArrayGetP(pa, j); void* pTable = taosArrayGetP(pa, j);
@ -5750,13 +5761,14 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList,
void* buf = pQInfo->pBuf + index * sizeof(STableQueryInfo); void* buf = pQInfo->pBuf + index * sizeof(STableQueryInfo);
STableQueryInfo* item = createTableQueryInfo(&pQInfo->runtimeEnv, pTable, window, buf); STableQueryInfo* item = createTableQueryInfo(&pQInfo->runtimeEnv, pTable, window, buf);
if (item == NULL) {
goto _cleanup;
}
item->groupIndex = i; item->groupIndex = i;
taosArrayPush(p1, &item); taosArrayPush(p1, &item);
taosHashPut(pQInfo->tableqinfoGroupInfo.map, &id->tid, sizeof(id->tid), &item, POINTER_BYTES); taosHashPut(pQInfo->tableqinfoGroupInfo.map, &id->tid, sizeof(id->tid), &item, POINTER_BYTES);
index += 1; index += 1;
} }
taosArrayPush(pQInfo->tableqinfoGroupInfo.pGroupList, &p1);
} }
pQInfo->arrTableIdInfo = taosArrayInit(tableIndex, sizeof(STableIdInfo)); pQInfo->arrTableIdInfo = taosArrayInit(tableIndex, sizeof(STableIdInfo));
@ -5775,6 +5787,21 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList,
qDebug("qmsg:%p QInfo:%p created", pQueryMsg, pQInfo); qDebug("qmsg:%p QInfo:%p created", pQueryMsg, pQInfo);
return pQInfo; return pQInfo;
_cleanup_qinfo:
tsdbDestoryTableGroup(pTableGroupInfo);
_cleanup_query:
taosArrayDestroy(pGroupbyExpr->columnInfo);
tfree(pGroupbyExpr);
tfree(pTagCols);
for (int32_t i = 0; i < numOfOutput; ++i) {
SExprInfo* pExprInfo = &pExprs[i];
if (pExprInfo->pExpr != NULL) {
tExprTreeDestroy(&pExprInfo->pExpr, NULL);
}
}
tfree(pExprs);
_cleanup: _cleanup:
freeQInfo(pQInfo); freeQInfo(pQInfo);
return NULL; return NULL;
@ -5893,6 +5920,7 @@ static void freeQInfo(SQInfo *pQInfo) {
} }
// todo refactor, extract method to destroytableDataInfo // todo refactor, extract method to destroytableDataInfo
if (pQInfo->tableqinfoGroupInfo.pGroupList != NULL) {
int32_t numOfGroups = GET_NUM_OF_TABLEGROUP(pQInfo); int32_t numOfGroups = GET_NUM_OF_TABLEGROUP(pQInfo);
for (int32_t i = 0; i < numOfGroups; ++i) { for (int32_t i = 0; i < numOfGroups; ++i) {
SArray *p = GET_TABLEGROUP(pQInfo, i); SArray *p = GET_TABLEGROUP(pQInfo, i);
@ -5907,6 +5935,7 @@ static void freeQInfo(SQInfo *pQInfo) {
taosArrayDestroy(p); taosArrayDestroy(p);
} }
}
tfree(pQInfo->pBuf); tfree(pQInfo->pBuf);
taosArrayDestroy(pQInfo->tableqinfoGroupInfo.pGroupList); taosArrayDestroy(pQInfo->tableqinfoGroupInfo.pGroupList);
@ -6086,6 +6115,9 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, vo
code = tsdbQuerySTableByTagCond(tsdb, id->uid, tagCond, pQueryMsg->tagCondLen, pQueryMsg->tagNameRelType, tbnameCond, &tableGroupInfo, pGroupColIndex, code = tsdbQuerySTableByTagCond(tsdb, id->uid, tagCond, pQueryMsg->tagCondLen, pQueryMsg->tagNameRelType, tbnameCond, &tableGroupInfo, pGroupColIndex,
numOfGroupByCols); numOfGroupByCols);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
if (code == TSDB_CODE_QRY_EXCEED_TAGS_LIMIT) {
qError("qmsg:%p failed to QueryStable, reason: %s", pQueryMsg, tstrerror(code));
}
goto _over; goto _over;
} }
} else { } else {
@ -6133,11 +6165,6 @@ _over:
//pQInfo already freed in initQInfo, but *pQInfo may not pointer to null; //pQInfo already freed in initQInfo, but *pQInfo may not pointer to null;
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
*pQInfo = NULL; *pQInfo = NULL;
} else {
// SQInfo* pq = (SQInfo*) (*pQInfo);
// T_REF_INC(pq);
// T_REF_INC(pq);
} }
// if failed to add ref for all meters in this query, abort current query // if failed to add ref for all meters in this query, abort current query
@ -6306,6 +6333,7 @@ int32_t qKillQuery(qinfo_t qinfo) {
return TSDB_CODE_QRY_INVALID_QHANDLE; return TSDB_CODE_QRY_INVALID_QHANDLE;
} }
sem_post(&pQInfo->dataReady);
setQueryKilled(pQInfo); setQueryKilled(pQInfo);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
@ -6518,13 +6546,14 @@ void** qRegisterQInfo(void* pMgmt, uint64_t qInfo) {
SQueryMgmt *pQueryMgmt = pMgmt; SQueryMgmt *pQueryMgmt = pMgmt;
if (pQueryMgmt->qinfoPool == NULL) { if (pQueryMgmt->qinfoPool == NULL) {
qError("QInfo:%p failed to add qhandle into qMgmt, since qMgmt is closed", (void *)qInfo);
return NULL; return NULL;
} }
pthread_mutex_lock(&pQueryMgmt->lock); pthread_mutex_lock(&pQueryMgmt->lock);
if (pQueryMgmt->closed) { if (pQueryMgmt->closed) {
pthread_mutex_unlock(&pQueryMgmt->lock); pthread_mutex_unlock(&pQueryMgmt->lock);
qError("QInfo:%p failed to add qhandle into cache, since qMgmt is colsing", (void *)qInfo);
return NULL; return NULL;
} else { } else {
uint64_t handleVal = (uint64_t) qInfo; uint64_t handleVal = (uint64_t) qInfo;

View File

@ -50,9 +50,15 @@ int32_t initWindowResInfo(SWindowResInfo *pWindowResInfo, SQueryRuntimeEnv *pRun
// use the pointer arraylist // use the pointer arraylist
pWindowResInfo->pResult = calloc(threshold, sizeof(SWindowResult)); pWindowResInfo->pResult = calloc(threshold, sizeof(SWindowResult));
if (pWindowResInfo->pResult == NULL) {
return TSDB_CODE_QRY_OUT_OF_MEMORY;
}
for (int32_t i = 0; i < pWindowResInfo->capacity; ++i) { for (int32_t i = 0; i < pWindowResInfo->capacity; ++i) {
SPosInfo posInfo = {-1, -1}; SPosInfo posInfo = {-1, -1};
createQueryResultInfo(pRuntimeEnv->pQuery, &pWindowResInfo->pResult[i], pRuntimeEnv->stableQuery, &posInfo, pRuntimeEnv->interBufSize); int32_t code = createQueryResultInfo(pRuntimeEnv->pQuery, &pWindowResInfo->pResult[i], pRuntimeEnv->stableQuery, &posInfo, pRuntimeEnv->interBufSize);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
} }
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
@ -76,10 +82,12 @@ void cleanupTimeWindowInfo(SWindowResInfo *pWindowResInfo, int32_t numOfCols) {
return; return;
} }
if (pWindowResInfo->pResult != NULL) {
for (int32_t i = 0; i < pWindowResInfo->capacity; ++i) { for (int32_t i = 0; i < pWindowResInfo->capacity; ++i) {
SWindowResult *pResult = &pWindowResInfo->pResult[i]; SWindowResult *pResult = &pWindowResInfo->pResult[i];
destroyTimeWindowRes(pResult, numOfCols); destroyTimeWindowRes(pResult, numOfCols);
} }
}
taosHashCleanup(pWindowResInfo->hashList); taosHashCleanup(pWindowResInfo->hashList);
tfree(pWindowResInfo->pResult); tfree(pWindowResInfo->pResult);

View File

@ -962,10 +962,13 @@ static UNUSED_FUNC char* exception_strdup(const char* str) {
static tExprNode* exprTreeFromBinaryImpl(SBufferReader* br) { static tExprNode* exprTreeFromBinaryImpl(SBufferReader* br) {
int32_t anchor = CLEANUP_GET_ANCHOR(); int32_t anchor = CLEANUP_GET_ANCHOR();
if (CLEANUP_EXCEED_LIMIT()) {
THROW(TSDB_CODE_QRY_EXCEED_TAGS_LIMIT);
return NULL;
}
tExprNode* pExpr = exception_calloc(1, sizeof(tExprNode)); tExprNode* pExpr = exception_calloc(1, sizeof(tExprNode));
CLEANUP_PUSH_VOID_PTR_PTR(true, tExprNodeDestroy, pExpr, NULL); CLEANUP_PUSH_VOID_PTR_PTR(true, tExprNodeDestroy, pExpr, NULL);
pExpr->nodeType = tbufReadUint8(br); pExpr->nodeType = tbufReadUint8(br);
if (pExpr->nodeType == TSQL_NODE_VALUE) { if (pExpr->nodeType == TSQL_NODE_VALUE) {
@ -995,7 +998,6 @@ static tExprNode* exprTreeFromBinaryImpl(SBufferReader* br) {
pExpr->_node.hasPK = tbufReadUint8(br); pExpr->_node.hasPK = tbufReadUint8(br);
pExpr->_node.pLeft = exprTreeFromBinaryImpl(br); pExpr->_node.pLeft = exprTreeFromBinaryImpl(br);
pExpr->_node.pRight = exprTreeFromBinaryImpl(br); pExpr->_node.pRight = exprTreeFromBinaryImpl(br);
assert(pExpr->_node.pLeft != NULL && pExpr->_node.pRight != NULL); assert(pExpr->_node.pLeft != NULL && pExpr->_node.pRight != NULL);
} }

View File

@ -57,8 +57,30 @@ int tsdbCreateTable(TSDB_REPO_T *repo, STableCfg *pCfg) {
STable * super = NULL; STable * super = NULL;
STable * table = NULL; STable * table = NULL;
int newSuper = 0; int newSuper = 0;
int tid = pCfg->tableId.tid;
STable * pTable = NULL;
STable *pTable = tsdbGetTableByUid(pMeta, pCfg->tableId.uid); if (tid < 0 || tid >= pRepo->config.maxTables) {
tsdbError("vgId:%d failed to create table since invalid tid %d", REPO_ID(pRepo), tid);
terrno = TSDB_CODE_TDB_IVD_CREATE_TABLE_INFO;
goto _err;
}
if (pMeta->tables[tid] != NULL) {
if (TABLE_UID(pMeta->tables[tid]) == pCfg->tableId.uid) {
tsdbError("vgId:%d table %s already exists, tid %d uid %" PRId64, REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
TABLE_TID(pTable), TABLE_UID(pTable));
return TSDB_CODE_TDB_TABLE_ALREADY_EXIST;
} else {
tsdbError("vgId:%d table %s at tid %d uid %" PRIu64
" exists, replace it with new table, this can be not reasonable",
REPO_ID(pRepo), TABLE_CHAR_NAME(pMeta->tables[tid]), TABLE_TID(pMeta->tables[tid]),
TABLE_UID(pMeta->tables[tid]));
tsdbDropTable(pRepo, pMeta->tables[tid]->tableId);
}
}
pTable = tsdbGetTableByUid(pMeta, pCfg->tableId.uid);
if (pTable != NULL) { if (pTable != NULL) {
tsdbError("vgId:%d table %s already exists, tid %d uid %" PRId64, REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), tsdbError("vgId:%d table %s already exists, tid %d uid %" PRId64, REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
TABLE_TID(pTable), TABLE_UID(pTable)); TABLE_TID(pTable), TABLE_UID(pTable));
@ -72,10 +94,10 @@ int tsdbCreateTable(TSDB_REPO_T *repo, STableCfg *pCfg) {
super = tsdbNewTable(pCfg, true); super = tsdbNewTable(pCfg, true);
if (super == NULL) goto _err; if (super == NULL) goto _err;
} else { } else {
// TODO if (TABLE_TYPE(super) != TSDB_SUPER_TABLE || TABLE_UID(super) != pCfg->superUid) {
if (super->type != TSDB_SUPER_TABLE) return -1; terrno = TSDB_CODE_TDB_IVD_CREATE_TABLE_INFO;
if (super->tableId.uid != pCfg->superUid) return -1; goto _err;
// tsdbUpdateTable(pRepo, super, pCfg); }
} }
} }
@ -705,6 +727,9 @@ static STable *tsdbNewTable(STableCfg *pCfg, bool isSuper) {
T_REF_INC(pTable); T_REF_INC(pTable);
tsdbDebug("table %s tid %d uid %" PRIu64 " is created", TABLE_CHAR_NAME(pTable), TABLE_TID(pTable),
TABLE_UID(pTable));
return pTable; return pTable;
_err: _err:
@ -714,7 +739,9 @@ _err:
static void tsdbFreeTable(STable *pTable) { static void tsdbFreeTable(STable *pTable) {
if (pTable) { if (pTable) {
if (pTable->name != NULL) tsdbDebug("table %s is destroyed", TABLE_CHAR_NAME(pTable)); if (pTable->name != NULL)
tsdbDebug("table %s tid %d uid %" PRIu64 " is destroyed", TABLE_CHAR_NAME(pTable), TABLE_TID(pTable),
TABLE_UID(pTable));
tfree(TABLE_NAME(pTable)); tfree(TABLE_NAME(pTable));
if (TABLE_TYPE(pTable) != TSDB_CHILD_TABLE) { if (TABLE_TYPE(pTable) != TSDB_CHILD_TABLE) {
for (int i = 0; i < TSDB_MAX_TABLE_SCHEMAS; i++) { for (int i = 0; i < TSDB_MAX_TABLE_SCHEMAS; i++) {
@ -782,7 +809,7 @@ static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx, boo
tsdbGetTableSchemaImpl(pTable, false, false, -1)); tsdbGetTableSchemaImpl(pTable, false, false, -1));
} }
tsdbTrace("vgId:%d table %s tid %d uid %" PRIu64 " is added to meta", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), tsdbDebug("vgId:%d table %s tid %d uid %" PRIu64 " is added to meta", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
TABLE_TID(pTable), TABLE_UID(pTable)); TABLE_TID(pTable), TABLE_UID(pTable));
return 0; return 0;

View File

@ -739,7 +739,7 @@ static int tsdbWriteBlockToFile(SRWHelper *pHelper, SFile *pFile, SDataCols *pDa
pCompBlock->keyFirst = dataColsKeyFirst(pDataCols); pCompBlock->keyFirst = dataColsKeyFirst(pDataCols);
pCompBlock->keyLast = dataColsKeyAt(pDataCols, rowsToWrite - 1); pCompBlock->keyLast = dataColsKeyAt(pDataCols, rowsToWrite - 1);
tsdbDebug("vgId:%d tid:%d a block of data is written to file %s, offset %" PRId64 tsdbTrace("vgId:%d tid:%d a block of data is written to file %s, offset %" PRId64
" numOfRows %d len %d numOfCols %" PRId16 " keyFirst %" PRId64 " keyLast %" PRId64, " numOfRows %d len %d numOfCols %" PRId16 " keyFirst %" PRId64 " keyLast %" PRId64,
REPO_ID(helperRepo(pHelper)), pHelper->tableInfo.tid, pFile->fname, (int64_t)(pCompBlock->offset), REPO_ID(helperRepo(pHelper)), pHelper->tableInfo.tid, pFile->fname, (int64_t)(pCompBlock->offset),
(int)(pCompBlock->numOfRows), pCompBlock->len, pCompBlock->numOfCols, pCompBlock->keyFirst, (int)(pCompBlock->numOfRows), pCompBlock->len, pCompBlock->numOfCols, pCompBlock->keyFirst,
@ -940,7 +940,7 @@ static int tsdbInsertSuperBlock(SRWHelper *pHelper, SCompBlock *pCompBlock, int
ASSERT(pHelper->pCompInfo->blocks[0].keyLast < pHelper->pCompInfo->blocks[1].keyFirst); ASSERT(pHelper->pCompInfo->blocks[0].keyLast < pHelper->pCompInfo->blocks[1].keyFirst);
} }
tsdbDebug("vgId:%d tid:%d a super block is inserted at index %d", REPO_ID(pHelper->pRepo), pHelper->tableInfo.tid, tsdbTrace("vgId:%d tid:%d a super block is inserted at index %d", REPO_ID(pHelper->pRepo), pHelper->tableInfo.tid,
blkIdx); blkIdx);
return 0; return 0;

View File

@ -1802,6 +1802,7 @@ int32_t tsdbRetrieveDataBlockStatisInfo(TsdbQueryHandleT* pQueryHandle, SDataSta
tsdbLoadCompData(&pHandle->rhelper, pBlockInfo->compBlock, NULL); tsdbLoadCompData(&pHandle->rhelper, pBlockInfo->compBlock, NULL);
// todo opt perf
size_t numOfCols = QH_GET_NUM_OF_COLS(pHandle); size_t numOfCols = QH_GET_NUM_OF_COLS(pHandle);
for(int32_t i = 0; i < numOfCols; ++i) { for(int32_t i = 0; i < numOfCols; ++i) {
SDataStatis* st = &pHandle->statis[i]; SDataStatis* st = &pHandle->statis[i];
@ -1820,6 +1821,13 @@ int32_t tsdbRetrieveDataBlockStatisInfo(TsdbQueryHandleT* pQueryHandle, SDataSta
if (pHandle->statis[i].numOfNull == -1) { // set the column data are all NULL if (pHandle->statis[i].numOfNull == -1) { // set the column data are all NULL
pHandle->statis[i].numOfNull = pBlockInfo->compBlock->numOfRows; pHandle->statis[i].numOfNull = pBlockInfo->compBlock->numOfRows;
} }
// todo opt perf
SColumnInfo* pColInfo = taosArrayGet(pHandle->pColumns, i);
if (pColInfo->type == TSDB_DATA_TYPE_TIMESTAMP) {
pHandle->statis[i].min = pBlockInfo->compBlock->keyFirst;
pHandle->statis[i].max = pBlockInfo->compBlock->keyLast;
}
} }
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
@ -2193,7 +2201,7 @@ int32_t tsdbQuerySTableByTagCond(TSDB_REPO_T* tsdb, uint64_t uid, const char* pT
int32_t ret = TSDB_CODE_SUCCESS; int32_t ret = TSDB_CODE_SUCCESS;
tExprNode* expr = NULL; tExprNode* expr = NULL;
TRY(32) { TRY(TSDB_MAX_TAGS) {
expr = exprTreeFromTableName(tbnameCond); expr = exprTreeFromTableName(tbnameCond);
if (expr == NULL) { if (expr == NULL) {
expr = exprTreeFromBinary(pTagCond, len); expr = exprTreeFromBinary(pTagCond, len);
@ -2217,7 +2225,8 @@ int32_t tsdbQuerySTableByTagCond(TSDB_REPO_T* tsdb, uint64_t uid, const char* pT
} CATCH( code ) { } CATCH( code ) {
CLEANUP_EXECUTE(); CLEANUP_EXECUTE();
ret = code; terrno = code;
goto _error;
// TODO: more error handling // TODO: more error handling
} END_TRY } END_TRY

View File

@ -78,6 +78,7 @@ void cleanupPush_int_ptr ( bool failOnly, void* func, void* arg );
int32_t cleanupGetActionCount(); int32_t cleanupGetActionCount();
void cleanupExecuteTo( int32_t anchor, bool failed ); void cleanupExecuteTo( int32_t anchor, bool failed );
void cleanupExecute( SExceptionNode* node, bool failed ); void cleanupExecute( SExceptionNode* node, bool failed );
bool cleanupExceedLimit();
#define CLEANUP_PUSH_VOID_PTR_PTR( failOnly, func, arg1, arg2 ) cleanupPush_void_ptr_ptr( (failOnly), (void*)(func), (void*)(arg1), (void*)(arg2) ) #define CLEANUP_PUSH_VOID_PTR_PTR( failOnly, func, arg1, arg2 ) cleanupPush_void_ptr_ptr( (failOnly), (void*)(func), (void*)(arg1), (void*)(arg2) )
#define CLEANUP_PUSH_VOID_PTR_BOOL( failOnly, func, arg1, arg2 ) cleanupPush_void_ptr_bool( (failOnly), (void*)(func), (void*)(arg1), (bool)(arg2) ) #define CLEANUP_PUSH_VOID_PTR_BOOL( failOnly, func, arg1, arg2 ) cleanupPush_void_ptr_bool( (failOnly), (void*)(func), (void*)(arg1), (bool)(arg2) )
@ -91,7 +92,7 @@ void cleanupExecute( SExceptionNode* node, bool failed );
#define CLEANUP_GET_ANCHOR() cleanupGetActionCount() #define CLEANUP_GET_ANCHOR() cleanupGetActionCount()
#define CLEANUP_EXECUTE_TO( anchor, failed ) cleanupExecuteTo( (anchor), (failed) ) #define CLEANUP_EXECUTE_TO( anchor, failed ) cleanupExecuteTo( (anchor), (failed) )
#define CLEANUP_EXCEED_LIMIT() cleanupExceedLimit()
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// functions & macros for exception handling // functions & macros for exception handling

View File

@ -23,13 +23,12 @@ extern "C" {
#include "os.h" #include "os.h"
#define TARRAY_MIN_SIZE 8 #define TARRAY_MIN_SIZE 8
#define TARRAY_GET_ELEM(array, index) ((void*)((array)->pData + (index) * (array)->elemSize)) #define TARRAY_GET_ELEM(array, index) ((void*)((char*)((array)->pData) + (index) * (array)->elemSize))
typedef struct SArray { typedef struct SArray {
size_t size; size_t size;
size_t capacity; size_t capacity;
size_t elemSize; size_t elemSize;
void* pData; void* pData;
} SArray; } SArray;

View File

@ -19,13 +19,14 @@
#ifdef TAOS_RANDOM_FILE_FAIL #ifdef TAOS_RANDOM_FILE_FAIL
void taosSetRandomFileFailFactor(int factor); void taosSetRandomFileFailFactor(int factor);
ssize_t taos_tread(int fd, void *buf, size_t count); void taosSetRandomFileFailOutput(const char *path);
ssize_t taos_twrite(int fd, void *buf, size_t count); ssize_t taos_tread(int fd, void *buf, size_t count, const char *file, uint32_t line);
off_t taos_lseek(int fd, off_t offset, int whence); ssize_t taos_twrite(int fd, void *buf, size_t count, const char *file, uint32_t line);
off_t taos_lseek(int fd, off_t offset, int whence, const char *file, uint32_t line);
#define tread(fd, buf, count) taos_tread(fd, buf, count) #define tread(fd, buf, count) taos_tread(fd, buf, count, __FILE__, __LINE__)
#define twrite(fd, buf, count) taos_twrite(fd, buf, count) #define twrite(fd, buf, count) taos_twrite(fd, buf, count, __FILE__, __LINE__)
#define lseek(fd, offset, whence) taos_lseek(fd, offset, whence) #define lseek(fd, offset, whence) taos_lseek(fd, offset, whence, __FILE__, __LINE__)
#endif // TAOS_RANDOM_FILE_FAIL #endif // TAOS_RANDOM_FILE_FAIL

View File

@ -147,3 +147,6 @@ void cleanupExecuteTo( int32_t anchor, bool failed ) {
void cleanupExecute( SExceptionNode* node, bool failed ) { void cleanupExecute( SExceptionNode* node, bool failed ) {
doExecuteCleanup( node, 0, failed ); doExecuteCleanup( node, 0, failed );
} }
bool cleanupExceedLimit() {
return expList->numCleanupAction >= expList->maxCleanupAction;
}

View File

@ -294,7 +294,7 @@ void *taosCachePut(SCacheObj *pCacheObj, const void *key, size_t keyLen, const v
} }
} else { // old data exists, update the node } else { // old data exists, update the node
pNode = taosUpdateCacheImpl(pCacheObj, pOld, key, keyLen, pData, dataSize, duration * 1000L); pNode = taosUpdateCacheImpl(pCacheObj, pOld, key, keyLen, pData, dataSize, duration * 1000L);
uDebug("cache:%s, key:%p, %p exist in cache, updated", pCacheObj->name, key, pNode->data); uDebug("cache:%s, key:%p, %p exist in cache, updated old:%p", pCacheObj->name, key, pNode->data, pOld);
} }
__cache_unlock(pCacheObj); __cache_unlock(pCacheObj);
@ -307,6 +307,8 @@ void *taosCacheAcquireByKey(SCacheObj *pCacheObj, const void *key, size_t keyLen
return NULL; return NULL;
} }
void *pData = NULL;
__cache_rd_lock(pCacheObj); __cache_rd_lock(pCacheObj);
SCacheDataNode **ptNode = (SCacheDataNode **)taosHashGet(pCacheObj->pHashTable, key, keyLen); SCacheDataNode **ptNode = (SCacheDataNode **)taosHashGet(pCacheObj->pHashTable, key, keyLen);
@ -314,19 +316,21 @@ void *taosCacheAcquireByKey(SCacheObj *pCacheObj, const void *key, size_t keyLen
int32_t ref = 0; int32_t ref = 0;
if (ptNode != NULL) { if (ptNode != NULL) {
ref = T_REF_INC(*ptNode); ref = T_REF_INC(*ptNode);
pData = (*ptNode)->data;
} }
__cache_unlock(pCacheObj); __cache_unlock(pCacheObj);
if (ptNode != NULL) { if (pData != NULL) {
atomic_add_fetch_32(&pCacheObj->statistics.hitCount, 1); atomic_add_fetch_32(&pCacheObj->statistics.hitCount, 1);
uDebug("cache:%s, key:%p, %p is retrieved from cache, refcnt:%d", pCacheObj->name, key, (*ptNode)->data, ref); uDebug("cache:%s, key:%p, %p is retrieved from cache, refcnt:%d", pCacheObj->name, key, pData, ref);
} else { } else {
atomic_add_fetch_32(&pCacheObj->statistics.missCount, 1); atomic_add_fetch_32(&pCacheObj->statistics.missCount, 1);
uDebug("cache:%s, key:%p, not in cache, retrieved failed", pCacheObj->name, key); uDebug("cache:%s, key:%p, not in cache, retrieved failed", pCacheObj->name, key);
} }
atomic_add_fetch_32(&pCacheObj->statistics.totalAccess, 1); atomic_add_fetch_32(&pCacheObj->statistics.totalAccess, 1);
return (ptNode != NULL) ? (*ptNode)->data : NULL; return pData;
} }
void* taosCacheUpdateExpireTimeByName(SCacheObj *pCacheObj, void *key, size_t keyLen, uint64_t expireTime) { void* taosCacheUpdateExpireTimeByName(SCacheObj *pCacheObj, void *key, size_t keyLen, uint64_t expireTime) {
@ -413,57 +417,89 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) {
*data = NULL; *data = NULL;
// note: extend lifespan before dec ref count // note: extend lifespan before dec ref count
if (pCacheObj->extendLifespan) { bool inTrashCan = pNode->inTrashCan;
if (pCacheObj->extendLifespan && (!inTrashCan)) {
atomic_store_64(&pNode->expireTime, pNode->lifespan + taosGetTimestampMs()); atomic_store_64(&pNode->expireTime, pNode->lifespan + taosGetTimestampMs());
uDebug("cache:%s data:%p extend life time to %"PRId64 " before release", pCacheObj->name, pNode->data, pNode->expireTime); uDebug("cache:%s data:%p extend life time to %"PRId64 " before release", pCacheObj->name, pNode->data, pNode->expireTime);
} }
bool inTrashCan = pNode->inTrashCan; if (_remove) {
uDebug("cache:%s, key:%p, %p is released, refcnt:%d", pCacheObj->name, pNode->key, pNode->data, T_REF_VAL_GET(pNode) - 1); __cache_wr_lock(pCacheObj);
// NOTE: once refcount is decrease, pNode may be free by other thread immediately. // NOTE: once refcount is decrease, pNode may be freed by other thread immediately.
int32_t ref = T_REF_DEC(pNode); int32_t ref = T_REF_DEC(pNode);
uDebug("cache:%s, key:%p, %p is released, refcnt:%d", pCacheObj->name, pNode->key, pNode->data, ref);
if (inTrashCan) {
// Remove it if the ref count is 0.
// The ref count does not need to load and check again after lock acquired, since ref count can not be increased when
// the node is in trashcan.
if (ref == 0) {
__cache_wr_lock(pCacheObj);
assert(pNode->pTNodeHeader->pData == pNode);
taosRemoveFromTrashCan(pCacheObj, pNode->pTNodeHeader);
__cache_unlock(pCacheObj);
}
} else {
assert(pNode->pTNodeHeader == NULL);
if (_remove) { // not in trash can, but need to remove it
__cache_wr_lock(pCacheObj);
/* /*
* If not referenced by other users. Otherwise move this node to trashcan wait for all users * If it is not referenced by other users, remove it immediately. Otherwise move this node to trashcan wait for all users
* releasing this resources. * releasing this resources.
* *
* NOTE: previous ref is 0, and current ref is still 0, remove it. If previous is not 0, there is another thread * NOTE: previous ref is 0, and current ref is still 0, remove it. If previous is not 0, there is another thread
* that tries to do the same thing. * that tries to do the same thing.
*/ */
if (pNode->inTrashCan) {
if (ref == 0) { if (ref == 0) {
if (T_REF_VAL_GET(pNode) == 0) { assert(pNode->pTNodeHeader->pData == pNode);
taosCacheReleaseNode(pCacheObj, pNode); taosRemoveFromTrashCan(pCacheObj, pNode->pTNodeHeader);
}
} else { } else {
if (ref > 0) {
assert(pNode->pTNodeHeader == NULL);
taosCacheMoveToTrash(pCacheObj, pNode); taosCacheMoveToTrash(pCacheObj, pNode);
} else {
taosCacheReleaseNode(pCacheObj, pNode);
} }
} }
__cache_unlock(pCacheObj); __cache_unlock(pCacheObj);
// } else { // extend its life time
// if (pCacheObj->extendLifespan) { } else {
// atomic_store_64(&pNode->expireTime, pNode->lifespan + taosGetTimestampMs()); uDebug("cache:%s, key:%p, %p is released, refcnt:%d", pCacheObj->name, pNode->key, pNode->data, T_REF_VAL_GET(pNode) - 1);
// uDebug("cache:%s data:%p extend life time to %"PRId64 " after release", pCacheObj->name, pNode->data, pNode->expireTime);
__cache_wr_lock(pCacheObj);
// NOTE: once refcount is decrease, pNode may be freed by other thread immediately.
int32_t ref = T_REF_DEC(pNode);
if (inTrashCan && (ref == 0)) {
// Remove it if the ref count is 0.
// The ref count does not need to load and check again after lock acquired, since ref count can not be increased when
// the node is in trashcan.
assert(pNode->pTNodeHeader->pData == pNode);
taosRemoveFromTrashCan(pCacheObj, pNode->pTNodeHeader);
}
__cache_unlock(pCacheObj);
}
// else {
// if (_remove) { // not in trash can, but need to remove it
// __cache_wr_lock(pCacheObj);
//
// /*
// * If not referenced by other users. Otherwise move this node to trashcan wait for all users
// * releasing this resources.
// *
// * NOTE: previous ref is 0, and current ref is still 0, remove it. If previous is not 0, there is another thread
// * that tries to do the same thing.
// */
// if (ref == 0) {
// if (T_REF_VAL_GET(pNode) == 0) {
// taosCacheReleaseNode(pCacheObj, pNode);
// } else {
// taosCacheMoveToTrash(pCacheObj, pNode);
// }
// } else if (ref > 0) {
// if (!pNode->inTrashCan) {
// assert(pNode->pTNodeHeader == NULL);
// taosCacheMoveToTrash(pCacheObj, pNode);
// }
// }
//
// __cache_unlock(pCacheObj);
// }
// } // }
}
}
} }
void taosCacheEmpty(SCacheObj *pCacheObj) { void taosCacheEmpty(SCacheObj *pCacheObj) {

View File

@ -29,14 +29,55 @@
#ifdef TAOS_RANDOM_FILE_FAIL #ifdef TAOS_RANDOM_FILE_FAIL
static int random_file_fail_factor = 20; static int random_file_fail_factor = 20;
static FILE *fpRandomFileFailOutput = NULL;
void taosSetRandomFileFailFactor(int factor) void taosSetRandomFileFailFactor(int factor)
{ {
random_file_fail_factor = factor; random_file_fail_factor = factor;
} }
static void close_random_file_fail_output()
{
if (fpRandomFileFailOutput != NULL) {
if (fpRandomFileFailOutput != stdout) {
fclose(fpRandomFileFailOutput);
}
fpRandomFileFailOutput = NULL;
}
}
static void random_file_fail_output_sig(int sig)
{
fprintf(fpRandomFileFailOutput, "signal %d received.\n", sig);
struct sigaction act = {0};
act.sa_handler = SIG_DFL;
sigaction(sig, &act, NULL);
close_random_file_fail_output();
exit(EXIT_FAILURE);
}
void taosSetRandomFileFailOutput(const char *path)
{
if (path == NULL) {
fpRandomFileFailOutput = stdout;
} else if ((fpRandomFileFailOutput = fopen(path, "w")) != NULL) {
atexit(close_random_file_fail_output);
} else {
printf("failed to open random file fail log file '%s', errno=%d\n", path, errno);
return;
}
struct sigaction act = {0};
act.sa_handler = random_file_fail_output_sig;
sigaction(SIGFPE, &act, NULL);
sigaction(SIGSEGV, &act, NULL);
sigaction(SIGILL, &act, NULL);
}
#endif #endif
ssize_t taos_tread(int fd, void *buf, size_t count) ssize_t taos_tread(int fd, void *buf, size_t count, const char *file, uint32_t line)
{ {
#ifdef TAOS_RANDOM_FILE_FAIL #ifdef TAOS_RANDOM_FILE_FAIL
if (random_file_fail_factor > 0) { if (random_file_fail_factor > 0) {
@ -49,7 +90,7 @@ ssize_t taos_tread(int fd, void *buf, size_t count)
return tread(fd, buf, count); return tread(fd, buf, count);
} }
ssize_t taos_twrite(int fd, void *buf, size_t count) ssize_t taos_twrite(int fd, void *buf, size_t count, const char *file, uint32_t line)
{ {
#ifdef TAOS_RANDOM_FILE_FAIL #ifdef TAOS_RANDOM_FILE_FAIL
if (random_file_fail_factor > 0) { if (random_file_fail_factor > 0) {
@ -62,7 +103,7 @@ ssize_t taos_twrite(int fd, void *buf, size_t count)
return twrite(fd, buf, count); return twrite(fd, buf, count);
} }
off_t taos_lseek(int fd, off_t offset, int whence) off_t taos_lseek(int fd, off_t offset, int whence, const char *file, uint32_t line)
{ {
#ifdef TAOS_RANDOM_FILE_FAIL #ifdef TAOS_RANDOM_FILE_FAIL
if (random_file_fail_factor > 0) { if (random_file_fail_factor > 0) {

View File

@ -123,7 +123,7 @@ void taosIdPoolMarkStatus(void *handle, int id) {
int taosUpdateIdPool(id_pool_t *handle, int maxId) { int taosUpdateIdPool(id_pool_t *handle, int maxId) {
id_pool_t *pIdPool = (id_pool_t*)handle; id_pool_t *pIdPool = (id_pool_t*)handle;
if (maxId <= pIdPool->maxId) { if (maxId <= pIdPool->maxId) {
return -1; return 0;
} }
bool *idList = calloc(maxId, sizeof(bool)); bool *idList = calloc(maxId, sizeof(bool));

View File

@ -260,7 +260,7 @@ int tdUpdateKVStoreRecord(SKVStore *pStore, uint64_t uid, void *cont, int contLe
} }
taosHashPut(pStore->map, (void *)(&uid), sizeof(uid), (void *)(&rInfo), sizeof(rInfo)); taosHashPut(pStore->map, (void *)(&uid), sizeof(uid), (void *)(&rInfo), sizeof(rInfo));
uDebug("put uid %" PRIu64 " into kvStore %s", uid, pStore->fname); uTrace("put uid %" PRIu64 " into kvStore %s", uid, pStore->fname);
return 0; return 0;
} }

View File

@ -59,7 +59,7 @@ int32_t vnodeInitResources() {
vnodeInitWriteFp(); vnodeInitWriteFp();
vnodeInitReadFp(); vnodeInitReadFp();
tsDnodeVnodesHash = taosHashInit(TSDB_MAX_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true); tsDnodeVnodesHash = taosHashInit(TSDB_MIN_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true);
if (tsDnodeVnodesHash == NULL) { if (tsDnodeVnodesHash == NULL) {
vError("failed to init vnode list"); vError("failed to init vnode list");
return TSDB_CODE_VND_OUT_OF_MEMORY; return TSDB_CODE_VND_OUT_OF_MEMORY;
@ -176,16 +176,28 @@ int32_t vnodeAlter(void *param, SMDCreateVnodeMsg *pVnodeCfg) {
pVnode->status = TAOS_VN_STATUS_UPDATING; pVnode->status = TAOS_VN_STATUS_UPDATING;
int32_t code = vnodeSaveCfg(pVnodeCfg); int32_t code = vnodeSaveCfg(pVnodeCfg);
if (code != TSDB_CODE_SUCCESS) return code; if (code != TSDB_CODE_SUCCESS) {
pVnode->status = TAOS_VN_STATUS_READY;
return code;
}
code = vnodeReadCfg(pVnode); code = vnodeReadCfg(pVnode);
if (code != TSDB_CODE_SUCCESS) return code; if (code != TSDB_CODE_SUCCESS) {
pVnode->status = TAOS_VN_STATUS_READY;
return code;
}
code = syncReconfig(pVnode->sync, &pVnode->syncCfg); code = syncReconfig(pVnode->sync, &pVnode->syncCfg);
if (code != TSDB_CODE_SUCCESS) return code; if (code != TSDB_CODE_SUCCESS) {
pVnode->status = TAOS_VN_STATUS_READY;
return code;
}
code = tsdbConfigRepo(pVnode->tsdb, &pVnode->tsdbCfg); code = tsdbConfigRepo(pVnode->tsdb, &pVnode->tsdbCfg);
if (code != TSDB_CODE_SUCCESS) return code; if (code != TSDB_CODE_SUCCESS) {
pVnode->status = TAOS_VN_STATUS_READY;
return code;
}
pVnode->status = TAOS_VN_STATUS_READY; pVnode->status = TAOS_VN_STATUS_READY;
vDebug("vgId:%d, vnode is altered", pVnode->vgId); vDebug("vgId:%d, vnode is altered", pVnode->vgId);

View File

@ -108,6 +108,8 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
if (code == TSDB_CODE_SUCCESS) { if (code == TSDB_CODE_SUCCESS) {
handle = qRegisterQInfo(pVnode->qMgmt, (uint64_t) pQInfo); handle = qRegisterQInfo(pVnode->qMgmt, (uint64_t) pQInfo);
if (handle == NULL) { // failed to register qhandle if (handle == NULL) { // failed to register qhandle
vError("vgId:%d QInfo:%p register qhandle failed, return to app, code:%s", pVnode->vgId, (void *)pQInfo,
tstrerror(pRsp->code));
pRsp->code = TSDB_CODE_QRY_INVALID_QHANDLE; pRsp->code = TSDB_CODE_QRY_INVALID_QHANDLE;
qDestroyQueryInfo(pQInfo); // destroy it directly qDestroyQueryInfo(pQInfo); // destroy it directly
} else { } else {
@ -125,12 +127,14 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
} else { } else {
assert(pQInfo == NULL); assert(pQInfo == NULL);
} }
if (handle != NULL) { if (handle != NULL) {
vDebug("vgId:%d, QInfo:%p, dnode query msg disposed, register qhandle and return to app", vgId, *handle);
dnodePutItemIntoReadQueue(pVnode, *handle); dnodePutItemIntoReadQueue(pVnode, *handle);
qReleaseQInfo(pVnode->qMgmt, (void**) &handle, false); qReleaseQInfo(pVnode->qMgmt, (void**) &handle, false);
} }
vDebug("vgId:%d, QInfo:%p, dnode query msg disposed", vgId, pQInfo);
} else { } else {
assert(pCont != NULL); assert(pCont != NULL);
handle = qAcquireQInfo(pVnode->qMgmt, (uint64_t) pCont); handle = qAcquireQInfo(pVnode->qMgmt, (uint64_t) pCont);
@ -138,12 +142,13 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
vWarn("QInfo:%p invalid qhandle in continuing exec query, conn:%p", (void*) pCont, pReadMsg->rpcMsg.handle); vWarn("QInfo:%p invalid qhandle in continuing exec query, conn:%p", (void*) pCont, pReadMsg->rpcMsg.handle);
code = TSDB_CODE_QRY_INVALID_QHANDLE; code = TSDB_CODE_QRY_INVALID_QHANDLE;
} else { } else {
vDebug("vgId:%d, QInfo:%p, dnode query msg in progress", pVnode->vgId, (void*) pCont); vDebug("vgId:%d, QInfo:%p, dnode continue exec query", pVnode->vgId, (void*) pCont);
code = TSDB_CODE_VND_ACTION_IN_PROGRESS; code = TSDB_CODE_VND_ACTION_IN_PROGRESS;
qTableQuery(*handle); // do execute query qTableQuery(*handle); // do execute query
} }
qReleaseQInfo(pVnode->qMgmt, (void**) &handle, false); qReleaseQInfo(pVnode->qMgmt, (void**) &handle, false);
} }
return code; return code;
} }
@ -155,7 +160,7 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
pRetrieve->qhandle = htobe64(pRetrieve->qhandle); pRetrieve->qhandle = htobe64(pRetrieve->qhandle);
pRetrieve->free = htons(pRetrieve->free); pRetrieve->free = htons(pRetrieve->free);
vDebug("vgId:%d, QInfo:%p, retrieve msg is disposed", pVnode->vgId, *(void**) pRetrieve->qhandle); vDebug("vgId:%d, QInfo:%p, retrieve msg is disposed", pVnode->vgId, (void*) pRetrieve->qhandle);
memset(pRet, 0, sizeof(SRspRet)); memset(pRet, 0, sizeof(SRspRet));

View File

@ -184,6 +184,8 @@ int vnodeWriteToQueue(void *param, void *data, int type) {
memcpy(pWal, pHead, size); memcpy(pWal, pHead, size);
atomic_add_fetch_32(&pVnode->refCount, 1); atomic_add_fetch_32(&pVnode->refCount, 1);
vDebug("vgId:%d, get vnode wqueue, refCount:%d", pVnode->vgId, pVnode->refCount);
taosWriteQitem(pVnode->wqueue, type, pWal); taosWriteQitem(pVnode->wqueue, type, pWal);
return 0; return 0;

View File

@ -12,3 +12,4 @@ SET(CMAKE_VERBOSE_MAKEFILE ON)
ADD_SUBDIRECTORY(examples/c) ADD_SUBDIRECTORY(examples/c)
ADD_SUBDIRECTORY(tsim) ADD_SUBDIRECTORY(tsim)
ADD_SUBDIRECTORY(test/c) ADD_SUBDIRECTORY(test/c)
ADD_SUBDIRECTORY(comparisonTest/tdengine)

View File

@ -0,0 +1,12 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/util/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/common/inc)
INCLUDE_DIRECTORIES(${TD_OS_DIR}/inc)
IF ((TD_LINUX_64) OR (TD_LINUX_32 AND TD_ARM))
add_executable(tdengineTest tdengineTest.c)
target_link_libraries(tdengineTest taos_static tutil common pthread)
ENDIF()

View File

@ -7,6 +7,7 @@
#include <time.h> #include <time.h>
#include <pthread.h> #include <pthread.h>
#include <sys/time.h> #include <sys/time.h>
#include <inttypes.h>
typedef struct { typedef struct {
char sql[256]; char sql[256];
@ -123,19 +124,21 @@ void writeDataImp(void *param) {
if (taos == NULL) if (taos == NULL)
taos_error(taos); taos_error(taos);
int code = taos_query(taos, "use db"); TAOS_RES* result = taos_query(taos, "use db");
int32_t code = taos_errno(result);
if (code != 0) { if (code != 0) {
taos_error(taos); taos_error(taos);
} }
taos_free_result(result);
char sql[65000]; char *sql = calloc(1, 8*1024*1024);
int sqlLen = 0; int sqlLen = 0;
int lastMachineid = 0; int lastMachineid = 0;
int counter = 0; int counter = 0;
int totalRecords = 0; int totalRecords = 0;
for (int j = pThread->sID; j <= pThread->eID; j++) { for (int j = pThread->sID; j <= pThread->eID; j++) {
char fileName[256]; char fileName[300];
sprintf(fileName, "%s/testdata%d.csv", arguments.dataDir, j); sprintf(fileName, "%s/testdata%d.csv", arguments.dataDir, j);
FILE *fp = fopen(fileName, "r"); FILE *fp = fopen(fileName, "r");
@ -162,7 +165,7 @@ void writeDataImp(void *param) {
int64_t timestamp; int64_t timestamp;
int temperature; int temperature;
float humidity; float humidity;
sscanf(line, "%d%s%d%lld%d%f", &machineid, machinename, &machinegroup, &timestamp, &temperature, &humidity); sscanf(line, "%d%s%d%" PRId64 "%d%f", &machineid, machinename, &machinegroup, &timestamp, &temperature, &humidity);
if (counter == 0) { if (counter == 0) {
sqlLen = sprintf(sql, "insert into"); sqlLen = sprintf(sql, "insert into");
@ -174,14 +177,16 @@ void writeDataImp(void *param) {
machineid, machineid, machinename, machinegroup); machineid, machineid, machinename, machinegroup);
} }
sqlLen += sprintf(sql + sqlLen, "(%lld,%d,%f)", timestamp, temperature, humidity); sqlLen += sprintf(sql + sqlLen, "(%" PRId64 ",%d,%f)", timestamp, temperature, humidity);
counter++; counter++;
if (counter >= arguments.rowsPerRequest) { if (counter >= arguments.rowsPerRequest) {
int code = taos_query(taos, sql); TAOS_RES *result = taos_query(taos, sql);
int32_t code = taos_errno(result);
if (code != 0) { if (code != 0) {
printf("thread:%d error:%d reason:%s\n", pThread->pid, code, taos_errstr(taos)); printf("thread:%d error:%d reason:%s\n", pThread->threadId, code, taos_errstr(taos));
} }
taos_free_result(result);
totalRecords += counter; totalRecords += counter;
counter = 0; counter = 0;
@ -194,15 +199,18 @@ void writeDataImp(void *param) {
} }
if (counter > 0) { if (counter > 0) {
int code = taos_query(taos, sql); TAOS_RES *result = taos_query(taos, sql);
int32_t code = taos_errno(result);
if (code != 0) { if (code != 0) {
printf("thread:%d error:%d reason:%s\n", pThread->pid, code, taos_errstr(taos)); printf("thread:%d error:%d reason:%s\n", pThread->threadId, code, taos_errstr(taos));
} }
taos_free_result(result);
totalRecords += counter; totalRecords += counter;
} }
__sync_fetch_and_add(&statis.totalRows, totalRecords); __sync_fetch_and_add(&statis.totalRows, totalRecords);
free(sql);
} }
void writeData() { void writeData() {
@ -215,19 +223,23 @@ void writeData() {
taos_init(); taos_init();
void *taos = taos_connect("127.0.0.1", "root", "taosdata", NULL, 0); void *taos = taos_connect("127.0.0.1", "root", "taosdata", NULL, 0);
if (taos == NULL) if (taos == NULL) taos_error(taos);
taos_error(taos);
int code = taos_query(taos, "create database if not exists db"); TAOS_RES *result = taos_query(taos, "create database if not exists db");
int32_t code = taos_errno(result);
if (code != 0) { if (code != 0) {
taos_error(taos); taos_error(taos);
} }
taos_free_result(result);
code = taos_query(taos, "create table if not exists db.devices(ts timestamp, temperature int, humidity float) " result = taos_query(taos,
"create table if not exists db.devices(ts timestamp, temperature int, humidity float) "
"tags(devid int, devname binary(16), devgroup int)"); "tags(devid int, devname binary(16), devgroup int)");
code = taos_errno(result);
if (code != 0) { if (code != 0) {
taos_error(taos); taos_error(taos);
} }
taos_free_result(result);
int64_t st = getTimeStampMs(); int64_t st = getTimeStampMs();
@ -292,17 +304,12 @@ void readData() {
int64_t st = getTimeStampMs(); int64_t st = getTimeStampMs();
int code = taos_query(taos, line); TAOS_RES *result = taos_query(taos, line);
int32_t code = taos_errno(result);
if (code != 0) { if (code != 0) {
taos_error(taos); taos_error(taos);
} }
void *result = taos_use_result(taos);
if (result == NULL) {
printf("failed to get result, reason:%s\n", taos_errstr(taos));
exit(1);
}
TAOS_ROW row; TAOS_ROW row;
int rows = 0; int rows = 0;
//int num_fields = taos_field_count(taos); //int num_fields = taos_field_count(taos);

View File

@ -28,8 +28,11 @@ class TDSimClient:
"locale": "en_US.UTF-8", "locale": "en_US.UTF-8",
"charset": "UTF-8", "charset": "UTF-8",
"asyncLog": "0", "asyncLog": "0",
"anyIp": "0", "minTablesPerVnode": "4",
"sdbDebugFlag": "135", "maxTablesPerVnode": "1000",
"tableIncStepPerVnode": "10000",
"maxVgroupsPerDb": "1000",
"sdbDebugFlag": "143",
"rpcDebugFlag": "135", "rpcDebugFlag": "135",
"tmrDebugFlag": "131", "tmrDebugFlag": "131",
"cDebugFlag": "135", "cDebugFlag": "135",
@ -37,7 +40,6 @@ class TDSimClient:
"jnidebugFlag": "135", "jnidebugFlag": "135",
"qdebugFlag": "135", "qdebugFlag": "135",
} }
def init(self, path): def init(self, path):
self.__init__() self.__init__()
self.path = path self.path = path

View File

@ -2,7 +2,7 @@ system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c wallevel -v 0 system sh/cfg.sh -n dnode1 -c wallevel -v 0
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4 system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 1000 system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 1000
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start
@ -10,7 +10,7 @@ sleep 3000
sql connect sql connect
print ============================ dnode1 start print ============================ dnode1 start
sql create database db maxTables 500 cache 2 blocks 4 days 10 keep 20 minRows 300 maxRows 400 ctime 120 precision 'ms' comp 2 wal 1 replica 1 sql create database db cache 2 blocks 4 days 10 keep 20 minRows 300 maxRows 400 ctime 120 precision 'ms' comp 2 wal 1 replica 1
sql show databases sql show databases
print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09 print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09
if $data00 != db then if $data00 != db then
@ -31,13 +31,10 @@ endi
if $data06 != 20,20,20 then if $data06 != 20,20,20 then
return -1 return -1
endi endi
if $data07 != 500 then if $data07 != 2 then
return -1 return -1
endi endi
if $data08 != 2 then if $data08 != 4 then
return -1
endi
if $data09 != 4 then
return -1 return -1
endi endi
@ -46,7 +43,6 @@ system sh/exec.sh -n dnode1 -s stop -x SIGINT
return return
sql_error alter database db cache 256 sql_error alter database db cache 256
sql_error alter database db blocks 1 sql_error alter database db blocks 1
sql_error alter database db maxTables 10
sql_error alter database db days 10 sql_error alter database db days 10
sql_error alter database db keep 10 sql_error alter database db keep 10
sql_error alter database db minRows 350 sql_error alter database db minRows 350
@ -59,7 +55,6 @@ sql_error alter database db replica 2
print ============== step3 print ============== step3
sql alter database db maxTables 1000
sql alter database db comp 1 sql alter database db comp 1
sql alter database db blocks 40 sql alter database db blocks 40
sql alter database db keep 30 sql alter database db keep 30

View File

@ -1,11 +1,13 @@
system sh/stop_dnodes.sh system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c wallevel -v 2 system sh/cfg.sh -n dnode1 -c wallevel -v 2
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 2 system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 4
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 5
system sh/deploy.sh -n dnode2 -i 2 system sh/deploy.sh -n dnode2 -i 2
system sh/cfg.sh -n dnode2 -c wallevel -v 2 system sh/cfg.sh -n dnode2 -c wallevel -v 2
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 2 system sh/cfg.sh -n dnode2 -c maxVgroupsPerDb -v 4
system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 5
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode2 -s start system sh/exec.sh -n dnode2 -s start
@ -17,7 +19,7 @@ sleep 1000
print ============================ step1 print ============================ step1
sql create database db maxTables 5 sql create database db
sql create table db.st (ts timestamp, i int) tags(t int) sql create table db.st (ts timestamp, i int) tags(t int)
sql create table db.t000 using db.st tags(0) sql create table db.t000 using db.st tags(0)
sql create table db.t001 using db.st tags(1) sql create table db.t001 using db.st tags(1)
@ -74,9 +76,14 @@ if $rows != 20 then
endi endi
print ============================ step3 print ============================ step3
system sh/exec.sh -n dnode1 -s stop -x SIGINT
sql alter database db maxTables 10 system sh/exec.sh -n dnode2 -s stop -x SIGINT
sleep 1000 system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 10
system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 10
sleep 5000
system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode2 -s start
sleep 5000
sql create table db.t100 using db.st tags(0) sql create table db.t100 using db.st tags(0)
sql create table db.t101 using db.st tags(1) sql create table db.t101 using db.st tags(1)
@ -133,9 +140,14 @@ if $rows != 40 then
endi endi
print ============================ step5 print ============================ step5
system sh/exec.sh -n dnode1 -s stop -x SIGINT
sql alter database db maxTables 15 system sh/exec.sh -n dnode2 -s stop -x SIGINT
sleep 1000 system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 15
system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 15
sleep 5000
system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode2 -s start
sleep 5000
sql create table db.t200 using db.st tags(0) sql create table db.t200 using db.st tags(0)
sql create table db.t201 using db.st tags(1) sql create table db.t201 using db.st tags(1)
@ -252,9 +264,14 @@ if $rows != 60 then
endi endi
print ============================ step9 print ============================ step9
system sh/exec.sh -n dnode1 -s stop -x SIGINT
sql alter database db maxTables 20 system sh/exec.sh -n dnode2 -s stop -x SIGINT
sleep 1000 system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 20
system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 20
sleep 5000
system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode2 -s start
sleep 5000
sql create table db.t300 using db.st tags(0) sql create table db.t300 using db.st tags(0)
sql create table db.t301 using db.st tags(1) sql create table db.t301 using db.st tags(1)
@ -380,9 +397,14 @@ if $rows != 80 then
endi endi
print ============================ step9 print ============================ step9
system sh/exec.sh -n dnode1 -s stop -x SIGINT
sql alter database db maxTables 25 system sh/exec.sh -n dnode2 -s stop -x SIGINT
sleep 1000 system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 25
system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 25
sleep 5000
system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode2 -s start
sleep 5000
sql create table db.t400 using db.st tags(0) sql create table db.t400 using db.st tags(0)
sql create table db.t401 using db.st tags(1) sql create table db.t401 using db.st tags(1)

View File

@ -1,8 +1,8 @@
system sh/stop_dnodes.sh system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c wallevel -v 0 system sh/cfg.sh -n dnode1 -c wallevel -v 0
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 1 system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 1000 system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 10
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start
@ -11,7 +11,7 @@ sql connect
print ============================ step1 print ============================ step1
sql create database db maxTables 10 sql create database db
sql create table db.st (ts timestamp, i int) tags(t int) sql create table db.st (ts timestamp, i int) tags(t int)
sql create table db.t0 using db.st tags(0) sql create table db.t0 using db.st tags(0)
sql create table db.t1 using db.st tags(1) sql create table db.t1 using db.st tags(1)
@ -49,8 +49,11 @@ endi
print ============================ step3 print ============================ step3
sql alter database db maxTables 20 system sh/exec.sh -n dnode1 -s stop -x SIGINT
sleep 1000 system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 20
sleep 5000
system sh/exec.sh -n dnode1 -s start
sleep 5000
sql create table db.t10 using db.st tags(0) sql create table db.t10 using db.st tags(0)
sql create table db.t11 using db.st tags(1) sql create table db.t11 using db.st tags(1)
@ -86,9 +89,11 @@ if $rows != 20 then
endi endi
print ============================ step5 print ============================ step5
system sh/exec.sh -n dnode1 -s stop -x SIGINT
sql alter database db maxTables 30 system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 30
sleep 1000 sleep 5000
system sh/exec.sh -n dnode1 -s start
sleep 5000
sql create table db.t20 using db.st tags(0) sql create table db.t20 using db.st tags(0)
sql create table db.t21 using db.st tags(1) sql create table db.t21 using db.st tags(1)
@ -183,9 +188,11 @@ if $rows != 30 then
endi endi
print ============================ step9 print ============================ step9
system sh/exec.sh -n dnode1 -s stop -x SIGINT
sql alter database db maxTables 40 system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 40
sleep 1000 sleep 5000
system sh/exec.sh -n dnode1 -s start
sleep 5000
sql create table db.t30 using db.st tags(0) sql create table db.t30 using db.st tags(0)
sql create table db.t31 using db.st tags(1) sql create table db.t31 using db.st tags(1)
@ -285,9 +292,11 @@ if $rows != 40 then
endi endi
print ============================ step12 print ============================ step12
system sh/exec.sh -n dnode1 -s stop -x SIGINT
sql alter database db maxTables 50 system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 50
sleep 1000 sleep 5000
system sh/exec.sh -n dnode1 -s start
sleep 5000
sql create table db.t40 using db.st tags(0) sql create table db.t40 using db.st tags(0)
sql create table db.t41 using db.st tags(1) sql create table db.t41 using db.st tags(1)

View File

@ -1,8 +1,8 @@
system sh/stop_dnodes.sh system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c wallevel -v 0 system sh/cfg.sh -n dnode1 -c wallevel -v 0
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4 system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 4
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 1000 system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 5
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start
@ -11,7 +11,7 @@ sql connect
print ============================ step1 print ============================ step1
sql create database db maxTables 5 sql create database db
sql create table db.st (ts timestamp, i int) tags(t int) sql create table db.st (ts timestamp, i int) tags(t int)
sql create table db.t000 using db.st tags(0) sql create table db.t000 using db.st tags(0)
sql create table db.t001 using db.st tags(1) sql create table db.t001 using db.st tags(1)
@ -68,9 +68,11 @@ if $rows != 20 then
endi endi
print ============================ step3 print ============================ step3
system sh/exec.sh -n dnode1 -s stop -x SIGINT
sql alter database db maxTables 10 system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 10
sleep 1000 sleep 5000
system sh/exec.sh -n dnode1 -s start
sleep 5000
sql create table db.t100 using db.st tags(0) sql create table db.t100 using db.st tags(0)
sql create table db.t101 using db.st tags(1) sql create table db.t101 using db.st tags(1)
@ -127,9 +129,11 @@ if $rows != 40 then
endi endi
print ============================ step5 print ============================ step5
system sh/exec.sh -n dnode1 -s stop -x SIGINT
sql alter database db maxTables 15 system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 15
sleep 1000 sleep 5000
system sh/exec.sh -n dnode1 -s start
sleep 5000
sql create table db.t200 using db.st tags(0) sql create table db.t200 using db.st tags(0)
sql create table db.t201 using db.st tags(1) sql create table db.t201 using db.st tags(1)
@ -244,9 +248,11 @@ if $rows != 60 then
endi endi
print ============================ step9 print ============================ step9
system sh/exec.sh -n dnode1 -s stop -x SIGINT
sql alter database db maxTables 20 system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 20
sleep 1000 sleep 5000
system sh/exec.sh -n dnode1 -s start
sleep 5000
sql create table db.t300 using db.st tags(0) sql create table db.t300 using db.st tags(0)
sql create table db.t301 using db.st tags(1) sql create table db.t301 using db.st tags(1)
@ -370,10 +376,11 @@ if $rows != 80 then
endi endi
print ============================ step12 print ============================ step12
system sh/exec.sh -n dnode1 -s stop -x SIGINT
sql alter database db maxTables 25 system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 25
sleep 1000 sleep 5000
system sh/exec.sh -n dnode1 -s start
sleep 5000
sql create table db.t400 using db.st tags(0) sql create table db.t400 using db.st tags(0)
sql create table db.t401 using db.st tags(1) sql create table db.t401 using db.st tags(1)
sql create table db.t402 using db.st tags(2) sql create table db.t402 using db.st tags(2)

View File

@ -1,8 +1,8 @@
system sh/stop_dnodes.sh system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c wallevel -v 0 system sh/cfg.sh -n dnode1 -c wallevel -v 0
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 1 system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 1000 system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 20
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start
@ -11,7 +11,7 @@ sql connect
print ============================ step1 print ============================ step1
sql create database db maxTables 20 sql create database db
sql create table db.st (ts timestamp, i int) tags(t int) sql create table db.st (ts timestamp, i int) tags(t int)
sql create table db.t000 using db.st tags(0) sql create table db.t000 using db.st tags(0)
sql create table db.t001 using db.st tags(1) sql create table db.t001 using db.st tags(1)
@ -69,7 +69,7 @@ endi
print ============================ step3 print ============================ step3
system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 2 system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 2
sleep 5000 sleep 5000
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start
sleep 5000 sleep 5000
@ -131,7 +131,7 @@ endi
print ============================ step5 print ============================ step5
system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 3 system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 3
sleep 5000 sleep 5000
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start
sleep 5000 sleep 5000

View File

@ -1,8 +1,8 @@
system sh/stop_dnodes.sh system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c wallevel -v 0 system sh/cfg.sh -n dnode1 -c wallevel -v 0
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4 system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 4
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 1000 system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 1000
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start
@ -17,7 +17,7 @@ $db = $dbPrefix . $i
$tb = $tbPrefix . $i $tb = $tbPrefix . $i
print =============== step1 print =============== step1
sql create database $db replica 1 days 20 keep 2000 sql create database $db replica 1 days 20 keep 2000 cache 16
sql show databases sql show databases
print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07
if $data00 != $db then if $data00 != $db then
@ -35,7 +35,7 @@ endi
if $data05 != 20 then if $data05 != 20 then
return -1 return -1
endi endi
if $data07 != 1000 then if $data07 != 16 then
return -1 return -1
endi endi
@ -76,13 +76,6 @@ if $data05 != 15 then
return -1 return -1
endi endi
#if $data06 != 1500,15000,1500 then
# return -1
#endi
if $data07 != 1000 then
return -1
endi
print =============== step6 print =============== step6
sql use $db sql use $db
sql create table $tb (ts timestamp, speed int) sql create table $tb (ts timestamp, speed int)

View File

@ -2,6 +2,8 @@ system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c wallevel -v 2 system sh/cfg.sh -n dnode1 -c wallevel -v 2
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 10 system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 10
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 10
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 1000
print ========= start dnodes print ========= start dnodes
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start
@ -9,7 +11,7 @@ sleep 3000
sql connect sql connect
print ======== step1 print ======== step1
sql create database db blocks 2 maxtables 1000 sql create database db blocks 2
sql create table db.mt (ts timestamp, tbcol int) TAGS(tgcol int) sql create table db.mt (ts timestamp, tbcol int) TAGS(tgcol int)
$tbPrefix = db.t $tbPrefix = db.t
@ -21,7 +23,7 @@ while $i < 2000
endw endw
sql show db.vgroups sql show db.vgroups
if $rows != 2 then if $rows != 10 then
return -1 return -1
endi endi

View File

@ -2,15 +2,15 @@ system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c walLevel -v 0
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4 system sh/cfg.sh -n dnode2 -c maxVgroupsPerDb -v 4
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 2000 system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start
sleep 3000 sleep 3000
sql connect sql connect
print =============== step2 print =============== step2
sql create database db maxtables 4 sql create database db
sql show databases sql show databases
print $rows $data07 print $rows $data07
@ -18,10 +18,6 @@ if $rows != 1 then
return -1 return -1
endi endi
if $data07 != 4 then
return -1
endi
print =============== step3 print =============== step3
sql use db sql use db
sql create table t1 (ts timestamp, i int) sql create table t1 (ts timestamp, i int)
@ -78,7 +74,7 @@ sql reset query cache
sleep 4000 sleep 4000
print =============== step7 print =============== step7
sql create database db maxtables 4 sql create database db
sql show databases sql show databases
print $rows $data07 print $rows $data07
@ -86,10 +82,6 @@ if $rows != 1 then
return -1 return -1
endi endi
if $data07 != 4 then
return -1
endi
print =============== step8 print =============== step8
sql use db sql use db
sql create table t1 (ts timestamp, i int) sql create table t1 (ts timestamp, i int)

View File

@ -6,7 +6,8 @@ $totalRows = $totalVnodes * $maxTables
system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c walLevel -v 0
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v $maxTables system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v $maxTables
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v $totalVnodes
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v $totalVnodes system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v $totalVnodes
system sh/cfg.sh -n dnode1 -c maxVnodeConnections -v 100000 system sh/cfg.sh -n dnode1 -c maxVnodeConnections -v 100000
system sh/cfg.sh -n dnode1 -c maxMeterConnections -v 100000 system sh/cfg.sh -n dnode1 -c maxMeterConnections -v 100000
@ -17,7 +18,7 @@ print ========== prepare data
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start
sleep 3000 sleep 3000
sql connect sql connect
sql create database db blocks 2 cache 1 maxTables $maxTables sql create database db blocks 2 cache 1
sql use db sql use db
print ========== step1 print ========== step1

View File

@ -81,7 +81,7 @@ print =============== step2 - no db
#11 #11
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show databases' 127.0.0.1:6020/rest/sql system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show databases' 127.0.0.1:6020/rest/sql
print 11-> $system_content print 11-> $system_content
if $system_content != @{"status":"succ","head":["name","created_time","ntables","vgroups","replica","days","keep1,keep2,keep(D)","maxtables","cache(MB)","blocks","minrows","maxrows","ctime(Sec.)","wallevel","comp","precision","status"],"data":[],"rows":0}@ then if $system_content != @{"status":"succ","head":["name","created_time","ntables","vgroups","replica","days","keep1,keep2,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","comp","precision","status"],"data":[],"rows":0}@ then
return -1 return -1
endi endi

View File

@ -128,12 +128,12 @@ endi
if $data06 != 365,365,365 then if $data06 != 365,365,365 then
return -1 return -1
endi endi
print data08 = $data08 print data07 = $data07
if $data08 != $cache then if $data07 != $cache then
print expect $cache, actual:$data08 print expect $cache, actual:$data07
return -1 return -1
endi endi
if $data09 != 4 then if $data08 != 4 then
return -1 return -1
endi endi

View File

@ -2,6 +2,7 @@ system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c walLevel -v 0
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start
sleep 3000 sleep 3000
sql connect sql connect

View File

@ -2,6 +2,7 @@ system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c walLevel -v 0
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start
sleep 3000 sleep 3000
sql connect sql connect

View File

@ -2,6 +2,7 @@ system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c walLevel -v 0
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start
sleep 3000 sleep 3000
sql connect sql connect

View File

@ -63,6 +63,11 @@ if $data41 != 9 then
endi endi
sql select * from $stb order by ts asc limit 5 sql select * from $stb order by ts asc limit 5
print select * from $stb order by ts asc limit 5
print $data00 $data01
print $data10 $data11
print $data20 $data21
if $rows != 5 then if $rows != 5 then
return -1 return -1
endi endi

View File

@ -356,6 +356,11 @@ if $rows != 0 then
return -1 return -1
endi endi
sql select top(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu order by ts desc limit 3 offset 1 sql select top(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu order by ts desc limit 3 offset 1
print select top(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu order by ts desc limit 3 offset 1
print $data00 $data01
print $data10 $data11
print $data20 $data21
if $rows != 3 then if $rows != 3 then
return -1 return -1
endi endi

View File

@ -2,6 +2,7 @@ system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c walLevel -v 0
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start
sleep 3000 sleep 3000
sql connect sql connect

View File

@ -4,7 +4,8 @@ system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c walLevel -v 0
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 8 system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 8
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 1000
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 3
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start
sleep 3000 sleep 3000

View File

@ -4,7 +4,8 @@ system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c walLevel -v 0
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 8 system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 8
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 1000
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 3
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start
sleep 3000 sleep 3000

View File

@ -3,6 +3,7 @@ system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c walLevel -v 0
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 8 system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 8
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 129 system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 129
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 8
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start
sleep 3000 sleep 3000
@ -36,7 +37,7 @@ while $x > $y
endw endw
sql show vgroups sql show vgroups
if $rows != 2 then if $rows != 8 then
return -1 return -1
endi endi
@ -51,7 +52,7 @@ while $x > $y
endw endw
sql show vgroups sql show vgroups
if $rows != 4 then if $rows != 8 then
return -1 return -1
endi endi
@ -66,7 +67,7 @@ while $x > $y
endw endw
sql show vgroups sql show vgroups
if $rows != 6 then if $rows != 8 then
return -1 return -1
endi endi

View File

@ -312,9 +312,9 @@ cd ../../../debug; make
#./test.sh -f general/parser/stream_on_sys.sim #./test.sh -f general/parser/stream_on_sys.sim
#./test.sh -f general/parser/repeatStream.sim #./test.sh -f general/parser/repeatStream.sim
./test.sh -f general/stream/metrics_del.sim #./test.sh -f general/stream/metrics_del.sim
./test.sh -f general/stream/metrics_n.sim ./test.sh -f general/stream/metrics_n.sim
./test.sh -f general/stream/metrics_replica1_vnoden.sim #./test.sh -f general/stream/metrics_replica1_vnoden.sim
./test.sh -f general/stream/restart_stream.sim ./test.sh -f general/stream/restart_stream.sim
./test.sh -f general/stream/stream_3.sim ./test.sh -f general/stream/stream_3.sim
./test.sh -f general/stream/stream_restart.sim ./test.sh -f general/stream/stream_restart.sim
@ -324,7 +324,7 @@ cd ../../../debug; make
./test.sh -f general/stream/table_replica1_vnoden.sim ./test.sh -f general/stream/table_replica1_vnoden.sim
./test.sh -f unique/arbitrator/check_cluster_cfg_para.sim ./test.sh -f unique/arbitrator/check_cluster_cfg_para.sim
./test.sh -f unique/arbitrator/dn2_mn1_cache_file_sync.sim #./test.sh -f unique/arbitrator/dn2_mn1_cache_file_sync.sim
./test.sh -f unique/arbitrator/dn3_mn1_full_createTableFail.sim ./test.sh -f unique/arbitrator/dn3_mn1_full_createTableFail.sim
./test.sh -f unique/arbitrator/dn3_mn1_full_dropDnodeFail.sim ./test.sh -f unique/arbitrator/dn3_mn1_full_dropDnodeFail.sim
./test.sh -f unique/arbitrator/dn3_mn1_multiCreateDropTable.sim ./test.sh -f unique/arbitrator/dn3_mn1_multiCreateDropTable.sim
@ -358,6 +358,7 @@ cd ../../../debug; make
./test.sh -f unique/arbitrator/replica_changeWithArbitrator.sim ./test.sh -f unique/arbitrator/replica_changeWithArbitrator.sim
./test.sh -f unique/arbitrator/sync_replica2_alterTable_add.sim ./test.sh -f unique/arbitrator/sync_replica2_alterTable_add.sim
./test.sh -f unique/arbitrator/sync_replica2_alterTable_drop.sim ./test.sh -f unique/arbitrator/sync_replica2_alterTable_drop.sim
./test.sh -f unique/arbitrator/sync_replica2_dropDb.sim ./test.sh -f unique/arbitrator/sync_replica2_dropDb.sim
./test.sh -f unique/arbitrator/sync_replica2_dropTable.sim ./test.sh -f unique/arbitrator/sync_replica2_dropTable.sim
./test.sh -f unique/arbitrator/sync_replica3_alterTable_add.sim ./test.sh -f unique/arbitrator/sync_replica3_alterTable_add.sim

View File

@ -110,7 +110,7 @@ echo "second ${HOSTNAME}:7200" >> $TAOS_CFG
echo "serverPort ${NODE}" >> $TAOS_CFG echo "serverPort ${NODE}" >> $TAOS_CFG
echo "dataDir $DATA_DIR" >> $TAOS_CFG echo "dataDir $DATA_DIR" >> $TAOS_CFG
echo "logDir $LOG_DIR" >> $TAOS_CFG echo "logDir $LOG_DIR" >> $TAOS_CFG
echo "debugFlag 135" >> $TAOS_CFG echo "debugFlag 131" >> $TAOS_CFG
echo "mDebugFlag 135" >> $TAOS_CFG echo "mDebugFlag 135" >> $TAOS_CFG
echo "sdbDebugFlag 135" >> $TAOS_CFG echo "sdbDebugFlag 135" >> $TAOS_CFG
echo "dDebugFlag 135" >> $TAOS_CFG echo "dDebugFlag 135" >> $TAOS_CFG
@ -138,6 +138,10 @@ echo "mnodeEqualVnodeNum 0" >> $TAOS_CFG
echo "clog 2" >> $TAOS_CFG echo "clog 2" >> $TAOS_CFG
echo "statusInterval 1" >> $TAOS_CFG echo "statusInterval 1" >> $TAOS_CFG
echo "numOfTotalVnodes 4" >> $TAOS_CFG echo "numOfTotalVnodes 4" >> $TAOS_CFG
echo "maxVgroupsPerDb 4" >> $TAOS_CFG
echo "minTablesPerVnode 4" >> $TAOS_CFG
echo "maxTablesPerVnode 1000" >> $TAOS_CFG
echo "tableIncStepPerVnode 10000" >> $TAOS_CFG
echo "asyncLog 0" >> $TAOS_CFG echo "asyncLog 0" >> $TAOS_CFG
echo "numOfMnodes 1" >> $TAOS_CFG echo "numOfMnodes 1" >> $TAOS_CFG
echo "locale en_US.UTF-8" >> $TAOS_CFG echo "locale en_US.UTF-8" >> $TAOS_CFG

View File

@ -25,6 +25,17 @@ system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 20
system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 20 system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 20
system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 20 system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 20
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 2
system sh/cfg.sh -n dnode2 -c maxVgroupsPerDb -v 2
system sh/cfg.sh -n dnode3 -c maxVgroupsPerDb -v 2
system sh/cfg.sh -n dnode4 -c maxVgroupsPerDb -v 2
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 100000
system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 100000
system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 100000
system sh/cfg.sh -n dnode4 -c maxTablesPerVnode -v 100000
system sh/cfg.sh -n dnode1 -c http -v 1 system sh/cfg.sh -n dnode1 -c http -v 1
system sh/cfg.sh -n dnode2 -c http -v 1 system sh/cfg.sh -n dnode2 -c http -v 1
system sh/cfg.sh -n dnode3 -c http -v 1 system sh/cfg.sh -n dnode3 -c http -v 1
system sh/cfg.sh -n dnode4 -c http -v 1

View File

@ -161,7 +161,7 @@ sleep 10000
$loopCnt = 0 $loopCnt = 0
wait_dnode_offline_overtime_dropped: wait_dnode_offline_overtime_dropped:
$loopCnt = $loopCnt + 1 $loopCnt = $loopCnt + 1
if $loopCnt == 10 then if $loopCnt == 20 then
return -1 return -1
endi endi
sql show dnodes sql show dnodes

View File

@ -29,10 +29,16 @@ system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
system sh/cfg.sh -n dnode3 -c alternativeRole -v 2 system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
system sh/cfg.sh -n dnode4 -c alternativeRole -v 2 system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 $totalTableNum = 10
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode2 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode3 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode4 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
@ -56,11 +62,10 @@ sql create dnode $hostname3
sql create dnode $hostname4 sql create dnode $hostname4
sleep 3000 sleep 3000
$totalTableNum = 10
$sleepTimer = 3000 $sleepTimer = 3000
$db = db $db = db
sql create database $db replica 3 maxTables $totalTableNum sql create database $db replica 3
sql use $db sql use $db
# create table , insert data # create table , insert data

View File

@ -29,10 +29,16 @@ system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
system sh/cfg.sh -n dnode3 -c alternativeRole -v 2 system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
system sh/cfg.sh -n dnode4 -c alternativeRole -v 2 system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 $totalTableNum = 10
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode2 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode3 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode4 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
@ -54,11 +60,10 @@ sql create dnode $hostname2
sql create dnode $hostname3 sql create dnode $hostname3
sleep 3000 sleep 3000
$totalTableNum = 10
$sleepTimer = 3000 $sleepTimer = 3000
$db = db $db = db
sql create database $db replica 2 maxTables $totalTableNum sql create database $db replica 2
sql use $db sql use $db
# create table , insert data # create table , insert data
@ -137,8 +142,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode3Vtatus = $data4_2 $dnode3Vtatus = $data6_2
$dnode2Vtatus = $data7_2 $dnode2Vtatus = $data9_2
if $dnode3Vtatus != offline then if $dnode3Vtatus != offline then
sleep 2000 sleep 2000
@ -204,8 +209,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode3Vtatus = $data4_2 $dnode3Vtatus = $data6_2
$dnode2Vtatus = $data7_2 $dnode2Vtatus = $data9_2
print dnode2Vtatus: $dnode3Vtatus print dnode2Vtatus: $dnode3Vtatus
print dnode3Vtatus: $dnode3Vtatus print dnode3Vtatus: $dnode3Vtatus
@ -319,8 +324,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode3Vtatus = $data4_2 $dnode3Vtatus = $data6_2
$dnode2Vtatus = $data7_2 $dnode2Vtatus = $data9_2
print dnode4Vtatus: $dnode4Vtatus print dnode4Vtatus: $dnode4Vtatus
print dnode3Vtatus: $dnode3Vtatus print dnode3Vtatus: $dnode3Vtatus

View File

@ -29,10 +29,16 @@ system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
system sh/cfg.sh -n dnode3 -c alternativeRole -v 2 system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
system sh/cfg.sh -n dnode4 -c alternativeRole -v 2 system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 $totalTableNum = 10
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode2 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode3 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode4 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
@ -56,11 +62,10 @@ sql create dnode $hostname3
sql create dnode $hostname4 sql create dnode $hostname4
sleep 3000 sleep 3000
$totalTableNum = 10
$sleepTimer = 3000 $sleepTimer = 3000
$db = db $db = db
sql create database $db replica 3 maxTables $totalTableNum sql create database $db replica 3
sql use $db sql use $db
# create table , insert data # create table , insert data
@ -139,8 +144,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data4_2 $dnode4Vtatus = $data6_2
$dnode3Vtatus = $data7_2 $dnode3Vtatus = $data9_2
if $dnode4Vtatus != offline then if $dnode4Vtatus != offline then
sleep 2000 sleep 2000
@ -206,8 +211,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data4_2 $dnode4Vtatus = $data6_2
$dnode3Vtatus = $data7_2 $dnode3Vtatus = $data9_2
print dnode4Vtatus: $dnode4Vtatus print dnode4Vtatus: $dnode4Vtatus
print dnode3Vtatus: $dnode3Vtatus print dnode3Vtatus: $dnode3Vtatus
@ -325,8 +330,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data4_2 $dnode4Vtatus = $data6_2
$dnode3Vtatus = $data7_2 $dnode3Vtatus = $data9_2
print dnode4Vtatus: $dnode4Vtatus print dnode4Vtatus: $dnode4Vtatus
print dnode3Vtatus: $dnode3Vtatus print dnode3Vtatus: $dnode3Vtatus
@ -386,8 +391,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data4_2 $dnode4Vtatus = $data6_2
$dnode3Vtatus = $data7_2 $dnode3Vtatus = $data9_2
print dnode4Vtatus: $dnode4Vtatus print dnode4Vtatus: $dnode4Vtatus
print dnode3Vtatus: $dnode3Vtatus print dnode3Vtatus: $dnode3Vtatus

View File

@ -23,6 +23,12 @@ system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
system sh/cfg.sh -n dnode4 -c balanceInterval -v 10 system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
system sh/cfg.sh -n dnode5 -c balanceInterval -v 10 system sh/cfg.sh -n dnode5 -c balanceInterval -v 10
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 16
system sh/cfg.sh -n dnode2 -c maxVgroupsPerDb -v 16
system sh/cfg.sh -n dnode3 -c maxVgroupsPerDb -v 16
system sh/cfg.sh -n dnode4 -c maxVgroupsPerDb -v 16
system sh/cfg.sh -n dnode5 -c maxVgroupsPerDb -v 16
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4 system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4 system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4 system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
@ -215,6 +221,7 @@ system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode3 -c offlineThreshold -v 10 system sh/cfg.sh -n dnode3 -c offlineThreshold -v 10
system sh/cfg.sh -n dnode3 -c enableCoreFile -v 1 system sh/cfg.sh -n dnode3 -c enableCoreFile -v 1
system sh/cfg.sh -n dnode3 -c maxVgroupsPerDb -v 16
system sh/exec.sh -n dnode3 -s start system sh/exec.sh -n dnode3 -s start
sql create dnode $hostname3 sql create dnode $hostname3
@ -322,7 +329,7 @@ $totalRows = 0
$tsStart = 1420041600000 $tsStart = 1420041600000
$db = db1 $db = db1
sql create database $db replica 2 maxTables 4 sql create database $db replica 2
sql use $db sql use $db
$stb = stb $stb = stb
sql create table $stb (ts timestamp, c1 int) tags(t1 int) sql create table $stb (ts timestamp, c1 int) tags(t1 int)

View File

@ -29,10 +29,16 @@ system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
system sh/cfg.sh -n dnode3 -c alternativeRole -v 2 system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
system sh/cfg.sh -n dnode4 -c alternativeRole -v 2 system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 $totalTableNum = 10
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode2 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode3 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode4 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
@ -52,11 +58,10 @@ system sh/exec.sh -n dnode2 -s start
sql create dnode $hostname2 sql create dnode $hostname2
sleep 3000 sleep 3000
$totalTableNum = 10
$sleepTimer = 10000 $sleepTimer = 10000
$db = db $db = db
sql create database $db replica 1 maxTables $totalTableNum sql create database $db replica 1
sql use $db sql use $db
# create table , insert data # create table , insert data
@ -352,16 +357,16 @@ if $loopCnt == 10 then
endi endi
sql show vgroups sql show vgroups
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 $data10_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 $data10_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 $data10_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 $data5_4 $data6_4 $data7_4 $data8_4 print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 $data5_4 $data6_4 $data7_4 $data8_4 $data9_4 $data10_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5 #print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6 #print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
$thirdDnode_2 = $data8_1 $thirdDnode_2 = $data10_1
$thirdDnode_3 = $data8_2 $thirdDnode_3 = $data10_2
$thirdDnode_4 = $data8_3 $thirdDnode_4 = $data10_3
$thirdDnode_5 = $data8_4 $thirdDnode_5 = $data10_4
if $thirdDnode_2 != null then if $thirdDnode_2 != null then
sleep 2000 sleep 2000
@ -405,10 +410,10 @@ print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $dat
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 $data5_4 $data6_4 $data7_4 $data8_4 print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 $data5_4 $data6_4 $data7_4 $data8_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5 #print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6 #print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
$sencodDnode_2 = $data5_1 $sencodDnode_2 = $data7_1
$sencodDnode_3 = $data5_2 $sencodDnode_3 = $data7_2
$sencodDnode_4 = $data5_3 $sencodDnode_4 = $data7_3
$sencodDnode_5 = $data5_4 $sencodDnode_5 = $data7_4
if $sencodDnode_2 != null then if $sencodDnode_2 != null then
sleep 2000 sleep 2000

View File

@ -29,10 +29,16 @@ system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
system sh/cfg.sh -n dnode3 -c alternativeRole -v 2 system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
system sh/cfg.sh -n dnode4 -c alternativeRole -v 2 system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 $totalTableNum = 10
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode2 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode3 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode4 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
@ -60,7 +66,7 @@ $totalTableNum = 10
$sleepTimer = 10000 $sleepTimer = 10000
$db = db $db = db
sql create database $db replica 2 maxTables $totalTableNum sql create database $db replica 2
sql use $db sql use $db
# create table , insert data # create table , insert data

View File

@ -39,10 +39,10 @@ system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode4 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode4 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode1 -c offlineThreshold -v 10 system sh/cfg.sh -n dnode1 -c offlineThreshold -v 5
system sh/cfg.sh -n dnode2 -c offlineThreshold -v 10 system sh/cfg.sh -n dnode2 -c offlineThreshold -v 5
system sh/cfg.sh -n dnode3 -c offlineThreshold -v 10 system sh/cfg.sh -n dnode3 -c offlineThreshold -v 5
system sh/cfg.sh -n dnode4 -c offlineThreshold -v 10 system sh/cfg.sh -n dnode4 -c offlineThreshold -v 5
system sh/cfg.sh -n dnode1 -c enableCoreFile -v 1 system sh/cfg.sh -n dnode1 -c enableCoreFile -v 1
system sh/cfg.sh -n dnode2 -c enableCoreFile -v 1 system sh/cfg.sh -n dnode2 -c enableCoreFile -v 1
@ -182,7 +182,7 @@ system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode4 -c alternativeRole -v 2 system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
system sh/cfg.sh -n dnode4 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode4 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode4 -c offlineThreshold -v 10 system sh/cfg.sh -n dnode4 -c offlineThreshold -v 5
system sh/cfg.sh -n dnode4 -c enableCoreFile -v 1 system sh/cfg.sh -n dnode4 -c enableCoreFile -v 1
system sh/exec.sh -n dnode4 -s start system sh/exec.sh -n dnode4 -s start

View File

@ -29,10 +29,16 @@ system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
system sh/cfg.sh -n dnode3 -c alternativeRole -v 2 system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
system sh/cfg.sh -n dnode4 -c alternativeRole -v 2 system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 $totalTableNum = 10
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode2 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode3 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode4 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
@ -137,8 +143,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data4_2 $dnode4Vtatus = $data6_2
$dnode3Vtatus = $data7_2 $dnode3Vtatus = $data9_2
if $dnode4Vtatus != offline then if $dnode4Vtatus != offline then
sleep 2000 sleep 2000
@ -212,8 +218,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data4_2 $dnode4Vtatus = $data6_2
$dnode3Vtatus = $data7_2 $dnode3Vtatus = $data9_2
print dnode4Vtatus: $dnode4Vtatus print dnode4Vtatus: $dnode4Vtatus
print dnode3Vtatus: $dnode3Vtatus print dnode3Vtatus: $dnode3Vtatus
@ -286,8 +292,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data4_2 $dnode4Vtatus = $data6_2
$dnode3Vtatus = $data7_2 $dnode3Vtatus = $data9_2
print dnode4Vtatus: $dnode4Vtatus print dnode4Vtatus: $dnode4Vtatus
print dnode3Vtatus: $dnode3Vtatus print dnode3Vtatus: $dnode3Vtatus

View File

@ -29,10 +29,16 @@ system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
system sh/cfg.sh -n dnode3 -c alternativeRole -v 2 system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
system sh/cfg.sh -n dnode4 -c alternativeRole -v 2 system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 $totalTableNum = 10
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode2 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode3 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode4 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
@ -60,7 +66,7 @@ $totalTableNum = 10
$sleepTimer = 3000 $sleepTimer = 3000
$db = db $db = db
sql create database $db replica 2 maxTables $totalTableNum sql create database $db replica 2
sql use $db sql use $db
# create table , insert data # create table , insert data
@ -143,8 +149,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode3Vtatus = $data4_2 $dnode3Vtatus = $data6_2
$dnode2Vtatus = $data7_2 $dnode2Vtatus = $data9_2
if $dnode3Vtatus != offline then if $dnode3Vtatus != offline then
sleep 2000 sleep 2000
@ -233,8 +239,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode2Vtatus = $data7_2 $dnode2Vtatus = $data9_2
$dnode3Vtatus = $data4_2 $dnode3Vtatus = $data6_2
if $dnode2Vtatus != master then if $dnode2Vtatus != master then
sleep 2000 sleep 2000
@ -312,8 +318,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode2Vtatus = $data7_2 $dnode2Vtatus = $data9_2
$dnode3Vtatus = $data4_2 $dnode3Vtatus = $data6_2
if $dnode2Vtatus != offline then if $dnode2Vtatus != offline then
sleep 2000 sleep 2000
@ -392,8 +398,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode2Vtatus = $data7_2 $dnode2Vtatus = $data9_2
$dnode3Vtatus = $data4_2 $dnode3Vtatus = $data6_2
if $dnode2Vtatus == offline then if $dnode2Vtatus == offline then
sleep 2000 sleep 2000

View File

@ -29,10 +29,16 @@ system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
system sh/cfg.sh -n dnode3 -c alternativeRole -v 2 system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
system sh/cfg.sh -n dnode4 -c alternativeRole -v 2 system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 $totalTableNum = 10
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode2 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode3 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode4 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
@ -195,8 +201,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode2Vtatus = $data7_2 $dnode2Vtatus = $data9_2
$dnode3Vtatus = $data4_2 $dnode3Vtatus = $data6_2
if $dnode2Vtatus != offline then if $dnode2Vtatus != offline then
sleep 2000 sleep 2000
@ -268,8 +274,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode2Vtatus = $data7_2 $dnode2Vtatus = $data9_2
$dnode3Vtatus = $data4_2 $dnode3Vtatus = $data6_2
if $dnode2Vtatus != offline then if $dnode2Vtatus != offline then
sleep 2000 sleep 2000
@ -324,8 +330,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode2Vtatus = $data7_2 $dnode2Vtatus = $data9_2
$dnode3Vtatus = $data4_2 $dnode3Vtatus = $data6_2
if $dnode2Vtatus != slave then if $dnode2Vtatus != slave then
sleep 2000 sleep 2000

View File

@ -29,10 +29,16 @@ system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
system sh/cfg.sh -n dnode3 -c alternativeRole -v 2 system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
system sh/cfg.sh -n dnode4 -c alternativeRole -v 2 system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 $totalTableNum = 10
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode2 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode3 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode4 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
@ -60,7 +66,7 @@ $totalTableNum = 10
$sleepTimer = 3000 $sleepTimer = 3000
$db = db $db = db
sql create database $db replica 2 maxTables $totalTableNum sql create database $db replica 2
sql use $db sql use $db
# create table , insert data # create table , insert data
@ -157,8 +163,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode2Vtatus = $data7_2 $dnode2Vtatus = $data9_2
$dnode3Vtatus = $data4_2 $dnode3Vtatus = $data6_2
if $dnode2Vtatus != offline then if $dnode2Vtatus != offline then
sleep 2000 sleep 2000
@ -230,8 +236,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode2Vtatus = $data7_2 $dnode2Vtatus = $data9_2
$dnode3Vtatus = $data4_2 $dnode3Vtatus = $data6_2
if $dnode2Vtatus != offline then if $dnode2Vtatus != offline then
sleep 2000 sleep 2000
@ -286,8 +292,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode2Vtatus = $data7_2 $dnode2Vtatus = $data9_2
$dnode3Vtatus = $data4_2 $dnode3Vtatus = $data6_2
if $dnode2Vtatus != slave then if $dnode2Vtatus != slave then
sleep 2000 sleep 2000

View File

@ -29,10 +29,16 @@ system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
system sh/cfg.sh -n dnode3 -c alternativeRole -v 2 system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
system sh/cfg.sh -n dnode4 -c alternativeRole -v 2 system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 $totalTableNum = 10
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode2 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode3 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode4 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
@ -56,11 +62,10 @@ sql create dnode $hostname3
sql create dnode $hostname4 sql create dnode $hostname4
sleep 3000 sleep 3000
$totalTableNum = 10
$sleepTimer = 3000 $sleepTimer = 3000
$db = db $db = db
sql create database $db replica 3 maxTables $totalTableNum sql create database $db replica 3
sql use $db sql use $db
# create table , insert data # create table , insert data
@ -141,8 +146,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data4_2 $dnode4Vtatus = $data6_2
$dnode3Vtatus = $data7_2 $dnode3Vtatus = $data9_2
if $dnode4Vtatus != offline then if $dnode4Vtatus != offline then
sleep 2000 sleep 2000
@ -201,8 +206,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data4_2 $dnode4Vtatus = $data6_2
$dnode3Vtatus = $data7_2 $dnode3Vtatus = $data9_2
print dnode4Vtatus: $dnode4Vtatus print dnode4Vtatus: $dnode4Vtatus
print dnode3Vtatus: $dnode3Vtatus print dnode3Vtatus: $dnode3Vtatus
@ -301,8 +306,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data4_2 $dnode4Vtatus = $data6_2
$dnode3Vtatus = $data7_2 $dnode3Vtatus = $data9_2
print dnode4Vtatus: $dnode4Vtatus print dnode4Vtatus: $dnode4Vtatus
print dnode3Vtatus: $dnode3Vtatus print dnode3Vtatus: $dnode3Vtatus
@ -417,8 +422,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data4_2 $dnode4Vtatus = $data6_2
$dnode3Vtatus = $data7_2 $dnode3Vtatus = $data9_2
print dnode4Vtatus: $dnode4Vtatus print dnode4Vtatus: $dnode4Vtatus
print dnode3Vtatus: $dnode3Vtatus print dnode3Vtatus: $dnode3Vtatus

View File

@ -29,10 +29,16 @@ system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
system sh/cfg.sh -n dnode3 -c alternativeRole -v 2 system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
system sh/cfg.sh -n dnode4 -c alternativeRole -v 2 system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 $totalTableNum = 10
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode2 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode3 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode4 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
@ -60,7 +66,7 @@ $totalTableNum = 10
$sleepTimer = 3000 $sleepTimer = 3000
$db = db $db = db
sql create database $db replica 2 maxTables $totalTableNum sql create database $db replica 2
sql use $db sql use $db
# create table , insert data # create table , insert data
@ -143,8 +149,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode3Vtatus = $data4_2 $dnode3Vtatus = $data6_2
$dnode2Vtatus = $data7_2 $dnode2Vtatus = $data9_2
if $dnode3Vtatus != offline then if $dnode3Vtatus != offline then
sleep 2000 sleep 2000
@ -233,8 +239,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode2Vtatus = $data7_2 $dnode2Vtatus = $data9_2
$dnode3Vtatus = $data4_2 $dnode3Vtatus = $data6_2
if $dnode2Vtatus != master then if $dnode2Vtatus != master then
sleep 2000 sleep 2000
@ -312,8 +318,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode2Vtatus = $data7_2 $dnode2Vtatus = $data9_2
$dnode3Vtatus = $data4_2 $dnode3Vtatus = $data6_2
if $dnode2Vtatus != offline then if $dnode2Vtatus != offline then
sleep 2000 sleep 2000
@ -391,8 +397,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode2Vtatus = $data7_2 $dnode2Vtatus = $data9_2
$dnode3Vtatus = $data4_2 $dnode3Vtatus = $data6_2
if $dnode2Vtatus == offline then if $dnode2Vtatus == offline then
sleep 2000 sleep 2000

View File

@ -34,6 +34,17 @@ system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
$totalTableNum = 10
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 20
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 20
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 20
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 20
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode2 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode3 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode4 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
@ -56,11 +67,10 @@ sql create dnode $hostname3
sql create dnode $hostname4 sql create dnode $hostname4
sleep 3000 sleep 3000
$totalTableNum = 10
$sleepTimer = 3000 $sleepTimer = 3000
$db = db $db = db
sql create database $db replica 3 maxTables $totalTableNum sql create database $db replica 3
sql use $db sql use $db
# create table , insert data # create table , insert data
@ -146,8 +156,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data4_2 $dnode4Vtatus = $data6_2
$dnode3Vtatus = $data7_2 $dnode3Vtatus = $data9_2
if $dnode4Vtatus != offline then if $dnode4Vtatus != offline then
sleep 2000 sleep 2000
@ -169,7 +179,7 @@ $totalTableNum = 10
$sleepTimer = 3000 $sleepTimer = 3000
$db = db1 $db = db1
sql create database $db replica 2 maxTables $totalTableNum sql create database $db replica 2
sql use $db sql use $db
# create table , insert data # create table , insert data
@ -254,8 +264,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode3Vtatus = $data4_3 $dnode3Vtatus = $data6_3
$dnode2Vtatus = $data7_3 $dnode2Vtatus = $data9_3
if $dnode3Vtatus != offline then if $dnode3Vtatus != offline then
sleep 2000 sleep 2000

Some files were not shown because too many files have changed in this diff Show More