Merge branch 'develop' into feature/2.0tsdb

This commit is contained in:
Hongze Cheng 2020-07-12 22:51:52 +08:00
commit 482476e409
141 changed files with 3461 additions and 1932 deletions

View File

@ -13,7 +13,7 @@ branches:
matrix: matrix:
- os: linux - os: linux
dist: bionic dist: focal
language: c language: c
git: git:
@ -28,8 +28,6 @@ matrix:
- build-essential - build-essential
- cmake - cmake
- net-tools - net-tools
- python-pip
- python-setuptools
- python3-pip - python3-pip
- python3-setuptools - python3-setuptools
- valgrind - valgrind
@ -54,13 +52,19 @@ matrix:
cd ${TRAVIS_BUILD_DIR}/debug cd ${TRAVIS_BUILD_DIR}/debug
make install > /dev/null || travis_terminate $? make install > /dev/null || travis_terminate $?
pip install numpy
pip install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python2/
pip3 install numpy pip3 install numpy
pip3 install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python3/ pip3 install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python3/
cd ${TRAVIS_BUILD_DIR}/tests cd ${TRAVIS_BUILD_DIR}/tests
./test-all.sh smoke || travis_terminate $? ./test-all.sh smoke || travis_terminate $?
sleep 1
cd ${TRAVIS_BUILD_DIR}/tests/pytest
pkill -TERM -x taosd
fuser -k -n tcp 6030
sleep 1
./crash_gen.sh -a -p -t 4 -s 25|| travis_terminate $?
sleep 1
cd ${TRAVIS_BUILD_DIR}/tests/pytest cd ${TRAVIS_BUILD_DIR}/tests/pytest
./valgrind-test.sh 2>&1 > mem-error-out.log ./valgrind-test.sh 2>&1 > mem-error-out.log

View File

@ -160,7 +160,9 @@ void tscFieldInfoUpdateOffsetForInterResult(SQueryInfo* pQueryInfo);
int16_t tscFieldInfoGetOffset(SQueryInfo* pQueryInfo, int32_t index); int16_t tscFieldInfoGetOffset(SQueryInfo* pQueryInfo, int32_t index);
void tscFieldInfoClear(SFieldInfo* pFieldInfo); void tscFieldInfoClear(SFieldInfo* pFieldInfo);
int32_t tscNumOfFields(SQueryInfo* pQueryInfo);
static FORCE_INLINE int32_t tscNumOfFields(SQueryInfo* pQueryInfo) { return pQueryInfo->fieldsInfo.numOfOutput; }
int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2); int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2);
void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes, int16_t tableIndex); void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes, int16_t tableIndex);

View File

@ -412,7 +412,44 @@ char *tscGetErrorMsgPayload(SSqlCmd *pCmd);
int32_t tscInvalidSQLErrMsg(char *msg, const char *additionalInfo, const char *sql); int32_t tscInvalidSQLErrMsg(char *msg, const char *additionalInfo, const char *sql);
int32_t tscToSQLCmd(SSqlObj *pSql, struct SSqlInfo *pInfo); int32_t tscToSQLCmd(SSqlObj *pSql, struct SSqlInfo *pInfo);
void tscGetResultColumnChr(SSqlRes *pRes, SFieldInfo* pFieldInfo, int32_t column); //void tscGetResultColumnChr(SSqlRes *pRes, SFieldInfo* pFieldInfo, int32_t column);
static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pFieldInfo, int32_t columnIndex) {
SFieldSupInfo* pInfo = (SFieldSupInfo*) TARRAY_GET_ELEM(pFieldInfo->pSupportInfo, columnIndex);
assert(pInfo->pSqlExpr != NULL);
int32_t type = pInfo->pSqlExpr->resType;
int32_t bytes = pInfo->pSqlExpr->resBytes;
char* pData = pRes->data + pInfo->pSqlExpr->offset * pRes->numOfRows + bytes * pRes->row;
if (type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BINARY) {
int32_t realLen = varDataLen(pData);
assert(realLen <= bytes - VARSTR_HEADER_SIZE);
if (isNull(pData, type)) {
pRes->tsrow[columnIndex] = NULL;
} else {
pRes->tsrow[columnIndex] = ((tstr*)pData)->data;
}
if (realLen < pInfo->pSqlExpr->resBytes - VARSTR_HEADER_SIZE) { // todo refactor
*(pData + realLen + VARSTR_HEADER_SIZE) = 0;
}
pRes->length[columnIndex] = realLen;
} else {
assert(bytes == tDataTypeDesc[type].nSize);
if (isNull(pData, type)) {
pRes->tsrow[columnIndex] = NULL;
} else {
pRes->tsrow[columnIndex] = pData;
}
pRes->length[columnIndex] = bytes;
}
}
extern void * tscCacheHandle; extern void * tscCacheHandle;
extern void * tscTmr; extern void * tscTmr;

View File

@ -43,7 +43,7 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, void (*fp)(), void* param, const
pSql->signature = pSql; pSql->signature = pSql;
pSql->param = param; pSql->param = param;
pSql->pTscObj = pObj; pSql->pTscObj = pObj;
pSql->maxRetry = TSDB_MAX_REPLICA_NUM; pSql->maxRetry = TSDB_MAX_REPLICA;
pSql->fp = fp; pSql->fp = fp;
pSql->sqlstr = calloc(1, sqlLen + 1); pSql->sqlstr = calloc(1, sqlLen + 1);

View File

@ -2953,9 +2953,13 @@ static void tag_project_function(SQLFunctionCtx *pCtx) {
assert(pCtx->inputBytes == pCtx->outputBytes); assert(pCtx->inputBytes == pCtx->outputBytes);
for (int32_t i = 0; i < pCtx->size; ++i) {
tVariantDump(&pCtx->tag, pCtx->aOutputBuf, pCtx->outputType, true); tVariantDump(&pCtx->tag, pCtx->aOutputBuf, pCtx->outputType, true);
char* data = pCtx->aOutputBuf;
pCtx->aOutputBuf += pCtx->outputBytes;
// directly copy from the first one
for (int32_t i = 1; i < pCtx->size; ++i) {
memmove(pCtx->aOutputBuf, data, pCtx->outputBytes);
pCtx->aOutputBuf += pCtx->outputBytes; pCtx->aOutputBuf += pCtx->outputBytes;
} }
} }
@ -3941,7 +3945,7 @@ static void ts_comp_finalize(SQLFunctionCtx *pCtx) {
tsBufFlush(pTSbuf); tsBufFlush(pTSbuf);
strcpy(pCtx->aOutputBuf, pTSbuf->path); strcpy(pCtx->aOutputBuf, pTSbuf->path);
tsBufDestory(pTSbuf); tsBufDestroy(pTSbuf);
doFinalizer(pCtx); doFinalizer(pCtx);
} }

View File

@ -274,6 +274,10 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
pReducer->numOfBuffer = idx; pReducer->numOfBuffer = idx;
SCompareParam *param = malloc(sizeof(SCompareParam)); SCompareParam *param = malloc(sizeof(SCompareParam));
if (param == NULL) {
tfree(pReducer);
return;
}
param->pLocalData = pReducer->pLocalDataSrc; param->pLocalData = pReducer->pLocalDataSrc;
param->pDesc = pReducer->pDesc; param->pDesc = pReducer->pDesc;
param->num = pReducer->pLocalDataSrc[0]->pMemBuffer->numOfElemsPerPage; param->num = pReducer->pLocalDataSrc[0]->pMemBuffer->numOfElemsPerPage;
@ -284,6 +288,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
pRes->code = tLoserTreeCreate(&pReducer->pLoserTree, pReducer->numOfBuffer, param, treeComparator); pRes->code = tLoserTreeCreate(&pReducer->pLoserTree, pReducer->numOfBuffer, param, treeComparator);
if (pReducer->pLoserTree == NULL || pRes->code != 0) { if (pReducer->pLoserTree == NULL || pRes->code != 0) {
tfree(param);
tfree(pReducer); tfree(pReducer);
return; return;
} }
@ -332,6 +337,8 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
tfree(pReducer->pResultBuf); tfree(pReducer->pResultBuf);
tfree(pReducer->pFinalRes); tfree(pReducer->pFinalRes);
tfree(pReducer->prevRowOfInput); tfree(pReducer->prevRowOfInput);
tfree(pReducer->pLoserTree);
tfree(param);
tfree(pReducer); tfree(pReducer);
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
return; return;

View File

@ -497,7 +497,7 @@ TAOS_STMT* taos_stmt_init(TAOS* taos) {
tsem_init(&pSql->rspSem, 0, 0); tsem_init(&pSql->rspSem, 0, 0);
pSql->signature = pSql; pSql->signature = pSql;
pSql->pTscObj = pObj; pSql->pTscObj = pObj;
pSql->maxRetry = TSDB_MAX_REPLICA_NUM; pSql->maxRetry = TSDB_MAX_REPLICA;
pStmt->pSql = pSql; pStmt->pSql = pSql;
return pStmt; return pStmt;

View File

@ -175,7 +175,6 @@ static int32_t handlePassword(SSqlCmd* pCmd, SSQLToken* pPwd) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
// todo handle memory leak in error handle function
int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
if (pInfo == NULL || pSql == NULL || pSql->signature != pSql) { if (pInfo == NULL || pSql == NULL || pSql->signature != pSql) {
return TSDB_CODE_TSC_APP_ERROR; return TSDB_CODE_TSC_APP_ERROR;
@ -5490,9 +5489,9 @@ int32_t tscCheckCreateDbParams(SSqlCmd* pCmd, SCMCreateDbMsg* pCreate) {
} }
if (pCreate->replications != -1 && if (pCreate->replications != -1 &&
(pCreate->replications < TSDB_MIN_REPLICA_NUM || pCreate->replications > TSDB_MAX_REPLICA_NUM)) { (pCreate->replications < TSDB_MIN_DB_REPLICA_OPTION || pCreate->replications > TSDB_MAX_DB_REPLICA_OPTION)) {
snprintf(msg, tListLen(msg), "invalid db option replications: %d valid range: [%d, %d]", pCreate->replications, snprintf(msg, tListLen(msg), "invalid db option replications: %d valid range: [%d, %d]", pCreate->replications,
TSDB_MIN_REPLICA_NUM, TSDB_MAX_REPLICA_NUM); TSDB_MIN_DB_REPLICA_OPTION, TSDB_MAX_DB_REPLICA_OPTION);
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
} }

View File

@ -113,7 +113,7 @@ SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pass, con
pSql->pTscObj = pObj; pSql->pTscObj = pObj;
pSql->signature = pSql; pSql->signature = pSql;
pSql->maxRetry = TSDB_MAX_REPLICA_NUM; pSql->maxRetry = TSDB_MAX_REPLICA;
tsem_init(&pSql->rspSem, 0, 0); tsem_init(&pSql->rspSem, 0, 0);
pObj->pDnodeConn = pDnodeConn; pObj->pDnodeConn = pDnodeConn;

View File

@ -255,6 +255,9 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
// release the metric/meter meta information reference, so data in cache can be updated // release the metric/meter meta information reference, so data in cache can be updated
taosCacheRelease(tscCacheHandle, (void**)&(pTableMetaInfo->pTableMeta), false); taosCacheRelease(tscCacheHandle, (void**)&(pTableMetaInfo->pTableMeta), false);
tscFreeSqlResult(pSql);
tfree(pSql->pSubs);
pSql->numOfSubs = 0;
tfree(pTableMetaInfo->vgroupList); tfree(pTableMetaInfo->vgroupList);
tscSetNextLaunchTimer(pStream, pSql); tscSetNextLaunchTimer(pStream, pSql);
} }

View File

@ -107,7 +107,7 @@ static SSub* tscCreateSubscription(STscObj* pObj, const char* topic, const char*
pSql->signature = pSql; pSql->signature = pSql;
pSql->param = pSql; pSql->param = pSql;
pSql->pTscObj = pObj; pSql->pTscObj = pObj;
pSql->maxRetry = TSDB_MAX_REPLICA_NUM; pSql->maxRetry = TSDB_MAX_REPLICA;
pSql->fp = asyncCallback; pSql->fp = asyncCallback;
int code = tscAllocPayload(pCmd, TSDB_DEFAULT_PAYLOAD_SIZE); int code = tscAllocPayload(pCmd, TSDB_DEFAULT_PAYLOAD_SIZE);

View File

@ -152,8 +152,8 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSupporter* pSupporter1, SJ
tsBufFlush(output1); tsBufFlush(output1);
tsBufFlush(output2); tsBufFlush(output2);
tsBufDestory(pSupporter1->pTSBuf); tsBufDestroy(pSupporter1->pTSBuf);
tsBufDestory(pSupporter2->pTSBuf); tsBufDestroy(pSupporter2->pTSBuf);
tscDebug("%p input1:%" PRId64 ", input2:%" PRId64 ", final:%" PRId64 " for secondary query after ts blocks " tscDebug("%p input1:%" PRId64 ", input2:%" PRId64 ", final:%" PRId64 " for secondary query after ts blocks "
"intersecting, skey:%" PRId64 ", ekey:%" PRId64, pSql, numOfInput1, numOfInput2, output1->numOfTotal, "intersecting, skey:%" PRId64 ", ekey:%" PRId64, pSql, numOfInput1, numOfInput2, output1->numOfTotal,
@ -550,7 +550,7 @@ static bool checkForDuplicateTagVal(SQueryInfo* pQueryInfo, SJoinSupporter* p1,
return true; return true;
} }
static void getIntersectionOfTableTuple(SQueryInfo* pQueryInfo, SSqlObj* pParentSql, SArray** s1, SArray** s2) { static int32_t getIntersectionOfTableTuple(SQueryInfo* pQueryInfo, SSqlObj* pParentSql, SArray** s1, SArray** s2) {
tscDebug("%p all subqueries retrieve <tid, tags> complete, do tags match", pParentSql); tscDebug("%p all subqueries retrieve <tid, tags> complete, do tags match", pParentSql);
SJoinSupporter* p1 = pParentSql->pSubs[0]->param; SJoinSupporter* p1 = pParentSql->pSubs[0]->param;
@ -568,10 +568,7 @@ static void getIntersectionOfTableTuple(SQueryInfo* pQueryInfo, SSqlObj* pParent
*s2 = taosArrayInit(p2->num, p2->tagSize); *s2 = taosArrayInit(p2->num, p2->tagSize);
if (!(checkForDuplicateTagVal(pQueryInfo, p1, pParentSql) && checkForDuplicateTagVal(pQueryInfo, p2, pParentSql))) { if (!(checkForDuplicateTagVal(pQueryInfo, p1, pParentSql) && checkForDuplicateTagVal(pQueryInfo, p2, pParentSql))) {
freeJoinSubqueryObj(pParentSql); return TSDB_CODE_QRY_DUP_JOIN_KEY;
pParentSql->res.code = TSDB_CODE_QRY_DUP_JOIN_KEY;
tscQueueAsyncRes(pParentSql);
return;
} }
int32_t i = 0, j = 0; int32_t i = 0, j = 0;
@ -594,6 +591,8 @@ static void getIntersectionOfTableTuple(SQueryInfo* pQueryInfo, SSqlObj* pParent
i++; i++;
} }
} }
return TSDB_CODE_SUCCESS;
} }
static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRows) { static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRows) {
@ -680,7 +679,14 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
} }
SArray *s1 = NULL, *s2 = NULL; SArray *s1 = NULL, *s2 = NULL;
getIntersectionOfTableTuple(pQueryInfo, pParentSql, &s1, &s2); int32_t code = getIntersectionOfTableTuple(pQueryInfo, pParentSql, &s1, &s2);
if (code != TSDB_CODE_SUCCESS) {
freeJoinSubqueryObj(pParentSql);
pParentSql->res.code = code;
tscQueueAsyncRes(pParentSql);
return;
}
if (taosArrayGetSize(s1) == 0 || taosArrayGetSize(s2) == 0) { // no results,return. if (taosArrayGetSize(s1) == 0 || taosArrayGetSize(s2) == 0) { // no results,return.
tscDebug("%p tag intersect does not generated qualified tables for join, free all sub SqlObj and quit", pParentSql); tscDebug("%p tag intersect does not generated qualified tables for join, free all sub SqlObj and quit", pParentSql);
freeJoinSubqueryObj(pParentSql); freeJoinSubqueryObj(pParentSql);
@ -762,7 +768,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
tsBufMerge(pSupporter->pTSBuf, pBuf, pTableMetaInfo->vgroupIndex); tsBufMerge(pSupporter->pTSBuf, pBuf, pTableMetaInfo->vgroupIndex);
tsBufDestory(pBuf); tsBufDestroy(pBuf);
} }
// continue to retrieve ts-comp data from vnode // continue to retrieve ts-comp data from vnode
@ -1447,9 +1453,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
static void tscFreeSubSqlObj(SRetrieveSupport *trsupport, SSqlObj *pSql) { static void tscFreeSubSqlObj(SRetrieveSupport *trsupport, SSqlObj *pSql) {
tscDebug("%p start to free subquery result", pSql); tscDebug("%p start to free subquery result", pSql);
if (pSql->res.code == TSDB_CODE_SUCCESS) {
taos_free_result(pSql); taos_free_result(pSql);
}
tfree(trsupport->localBuffer); tfree(trsupport->localBuffer);
@ -2128,7 +2132,7 @@ void **doSetResultRowData(SSqlObj *pSql, bool finalResult) {
size_t size = tscNumOfFields(pQueryInfo); size_t size = tscNumOfFields(pQueryInfo);
for (int i = 0; i < size; ++i) { for (int i = 0; i < size; ++i) {
SFieldSupInfo* pSup = tscFieldInfoGetSupp(&pQueryInfo->fieldsInfo, i); SFieldSupInfo* pSup = TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.pSupportInfo, i);
if (pSup->pSqlExpr != NULL) { if (pSup->pSqlExpr != NULL) {
tscGetResultColumnChr(pRes, &pQueryInfo->fieldsInfo, i); tscGetResultColumnChr(pRes, &pQueryInfo->fieldsInfo, i);
} }
@ -2138,8 +2142,10 @@ void **doSetResultRowData(SSqlObj *pSql, bool finalResult) {
continue; continue;
} }
TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i); TAOS_FIELD *pField = TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.pFields, i);
if (pRes->tsrow[i] != NULL && pField->type == TSDB_DATA_TYPE_NCHAR) {
transferNcharData(pSql, i, pField); transferNcharData(pSql, i, pField);
}
// calculate the result from several other columns // calculate the result from several other columns
if (pSup->pArithExprInfo != NULL) { if (pSup->pArithExprInfo != NULL) {

View File

@ -794,7 +794,7 @@ SFieldSupInfo* tscFieldInfoAppend(SFieldInfo* pFieldInfo, TAOS_FIELD* pField) {
} }
SFieldSupInfo* tscFieldInfoGetSupp(SFieldInfo* pFieldInfo, int32_t index) { SFieldSupInfo* tscFieldInfoGetSupp(SFieldInfo* pFieldInfo, int32_t index) {
return taosArrayGet(pFieldInfo->pSupportInfo, index); return TARRAY_GET_ELEM(pFieldInfo->pSupportInfo, index);
} }
SFieldSupInfo* tscFieldInfoInsert(SFieldInfo* pFieldInfo, int32_t index, TAOS_FIELD* field) { SFieldSupInfo* tscFieldInfoInsert(SFieldInfo* pFieldInfo, int32_t index, TAOS_FIELD* field) {
@ -858,11 +858,9 @@ void tscFieldInfoCopy(SFieldInfo* dst, const SFieldInfo* src) {
} }
TAOS_FIELD* tscFieldInfoGetField(SFieldInfo* pFieldInfo, int32_t index) { TAOS_FIELD* tscFieldInfoGetField(SFieldInfo* pFieldInfo, int32_t index) {
return taosArrayGet(pFieldInfo->pFields, index); return TARRAY_GET_ELEM(pFieldInfo->pFields, index);
} }
int32_t tscNumOfFields(SQueryInfo* pQueryInfo) { return pQueryInfo->fieldsInfo.numOfOutput; }
int16_t tscFieldInfoGetOffset(SQueryInfo* pQueryInfo, int32_t index) { int16_t tscFieldInfoGetOffset(SQueryInfo* pQueryInfo, int32_t index) {
SFieldSupInfo* pInfo = tscFieldInfoGetSupp(&pQueryInfo->fieldsInfo, index); SFieldSupInfo* pInfo = tscFieldInfoGetSupp(&pQueryInfo->fieldsInfo, index);
assert(pInfo != NULL); assert(pInfo != NULL);
@ -1546,7 +1544,7 @@ static void freeQueryInfoImpl(SQueryInfo* pQueryInfo) {
pQueryInfo->groupbyExpr.columnInfo = NULL; pQueryInfo->groupbyExpr.columnInfo = NULL;
} }
pQueryInfo->tsBuf = tsBufDestory(pQueryInfo->tsBuf); pQueryInfo->tsBuf = tsBufDestroy(pQueryInfo->tsBuf);
tfree(pQueryInfo->fillVal); tfree(pQueryInfo->fillVal);
} }
@ -1651,7 +1649,7 @@ SSqlObj* createSimpleSubObj(SSqlObj* pSql, void (*fp)(), void* param, int32_t cm
pNew->fp = fp; pNew->fp = fp;
pNew->param = param; pNew->param = param;
pNew->maxRetry = TSDB_MAX_REPLICA_NUM; pNew->maxRetry = TSDB_MAX_REPLICA;
pNew->sqlstr = strdup(pSql->sqlstr); pNew->sqlstr = strdup(pSql->sqlstr);
if (pNew->sqlstr == NULL) { if (pNew->sqlstr == NULL) {
@ -1806,7 +1804,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
pNew->fp = fp; pNew->fp = fp;
pNew->param = param; pNew->param = param;
pNew->maxRetry = TSDB_MAX_REPLICA_NUM; pNew->maxRetry = TSDB_MAX_REPLICA;
char* name = pTableMetaInfo->name; char* name = pTableMetaInfo->name;
STableMetaInfo* pFinalInfo = NULL; STableMetaInfo* pFinalInfo = NULL;
@ -1822,7 +1820,6 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
STableMeta* pPrevTableMeta = taosCacheTransfer(tscCacheHandle, (void**)&pPrevInfo->pTableMeta); STableMeta* pPrevTableMeta = taosCacheTransfer(tscCacheHandle, (void**)&pPrevInfo->pTableMeta);
SVgroupsInfo* pVgroupsInfo = pPrevInfo->vgroupList; SVgroupsInfo* pVgroupsInfo = pPrevInfo->vgroupList;
pPrevInfo->vgroupList = NULL;
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, name, pPrevTableMeta, pVgroupsInfo, pTableMetaInfo->tagColList); pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, name, pPrevTableMeta, pVgroupsInfo, pTableMetaInfo->tagColList);
} }
@ -2086,42 +2083,42 @@ void tscTryQueryNextClause(SSqlObj* pSql, void (*queryFp)()) {
} }
} }
void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pFieldInfo, int32_t columnIndex) { //void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pFieldInfo, int32_t columnIndex) {
SFieldSupInfo* pInfo = taosArrayGet(pFieldInfo->pSupportInfo, columnIndex); // SFieldSupInfo* pInfo = TARRAY_GET_ELEM(pFieldInfo->pSupportInfo, columnIndex);
assert(pInfo->pSqlExpr != NULL); // assert(pInfo->pSqlExpr != NULL);
//
int32_t type = pInfo->pSqlExpr->resType; // int32_t type = pInfo->pSqlExpr->resType;
int32_t bytes = pInfo->pSqlExpr->resBytes; // int32_t bytes = pInfo->pSqlExpr->resBytes;
//
char* pData = pRes->data + pInfo->pSqlExpr->offset * pRes->numOfRows + bytes * pRes->row; // char* pData = pRes->data + pInfo->pSqlExpr->offset * pRes->numOfRows + bytes * pRes->row;
//
if (type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BINARY) { // if (type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BINARY) {
int32_t realLen = varDataLen(pData); // int32_t realLen = varDataLen(pData);
assert(realLen <= bytes - VARSTR_HEADER_SIZE); // assert(realLen <= bytes - VARSTR_HEADER_SIZE);
//
if (isNull(pData, type)) { // if (isNull(pData, type)) {
pRes->tsrow[columnIndex] = NULL; // pRes->tsrow[columnIndex] = NULL;
} else { // } else {
pRes->tsrow[columnIndex] = ((tstr*)pData)->data; // pRes->tsrow[columnIndex] = ((tstr*)pData)->data;
} // }
//
if (realLen < pInfo->pSqlExpr->resBytes - VARSTR_HEADER_SIZE) { // todo refactor // if (realLen < pInfo->pSqlExpr->resBytes - VARSTR_HEADER_SIZE) { // todo refactor
*(pData + realLen + VARSTR_HEADER_SIZE) = 0; // *(pData + realLen + VARSTR_HEADER_SIZE) = 0;
} // }
//
pRes->length[columnIndex] = realLen; // pRes->length[columnIndex] = realLen;
} else { // } else {
assert(bytes == tDataTypeDesc[type].nSize); // assert(bytes == tDataTypeDesc[type].nSize);
//
if (isNull(pData, type)) { // if (isNull(pData, type)) {
pRes->tsrow[columnIndex] = NULL; // pRes->tsrow[columnIndex] = NULL;
} else { // } else {
pRes->tsrow[columnIndex] = pData; // pRes->tsrow[columnIndex] = pData;
} // }
//
pRes->length[columnIndex] = bytes; // pRes->length[columnIndex] = bytes;
} // }
} //}
void* malloc_throw(size_t size) { void* malloc_throw(size_t size) {
void* p = malloc(size); void* p = malloc(size);

View File

@ -69,6 +69,7 @@ extern int64_t tsMaxRetentWindow;
extern int32_t tsCacheBlockSize; extern int32_t tsCacheBlockSize;
extern int32_t tsBlocksPerVnode; extern int32_t tsBlocksPerVnode;
extern int32_t tsMaxTablePerVnode; extern int32_t tsMaxTablePerVnode;
extern int32_t tsMaxVgroupsPerDb;
extern int16_t tsDaysPerFile; extern int16_t tsDaysPerFile;
extern int32_t tsDaysToKeep; extern int32_t tsDaysToKeep;
extern int32_t tsMinRowsInFileBlock; extern int32_t tsMinRowsInFileBlock;

View File

@ -110,14 +110,9 @@ int16_t tsCommitTime = TSDB_DEFAULT_COMMIT_TIME; // seconds
int32_t tsTimePrecision = TSDB_DEFAULT_PRECISION; int32_t tsTimePrecision = TSDB_DEFAULT_PRECISION;
int16_t tsCompression = TSDB_DEFAULT_COMP_LEVEL; int16_t tsCompression = TSDB_DEFAULT_COMP_LEVEL;
int16_t tsWAL = TSDB_DEFAULT_WAL_LEVEL; int16_t tsWAL = TSDB_DEFAULT_WAL_LEVEL;
int32_t tsReplications = TSDB_DEFAULT_REPLICA_NUM; int32_t tsReplications = TSDB_DEFAULT_DB_REPLICA_OPTION;
int32_t tsMaxVgroupsPerDb = 0;
#ifdef _TD_ARM_32_ int32_t tsMaxTablePerVnode = TSDB_DEFAULT_TABLES;
int32_t tsMaxTablePerVnode = 100;
#else
int32_t tsMaxTablePerVnode = TSDB_DEFAULT_TABLES;
#endif
// balance // balance
int32_t tsEnableBalance = 1; int32_t tsEnableBalance = 1;
int32_t tsAlternativeRole = 0; int32_t tsAlternativeRole = 0;
@ -560,7 +555,7 @@ static void doInitGlobalConfig() {
cfg.ptr = &tsMinIntervalTime; cfg.ptr = &tsMinIntervalTime;
cfg.valType = TAOS_CFG_VTYPE_INT32; cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW; cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = 10; cfg.minValue = 1;
cfg.maxValue = 1000000; cfg.maxValue = 1000000;
cfg.ptrLength = 0; cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_MS; cfg.unitType = TAOS_CFG_UTYPE_MS;
@ -606,8 +601,18 @@ static void doInitGlobalConfig() {
cfg.unitType = TAOS_CFG_UTYPE_NONE; cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg); taosInitConfigOption(cfg);
cfg.option = "maxVgroupsPerDb";
cfg.ptr = &tsMaxVgroupsPerDb;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = 0;
cfg.maxValue = 8192;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
// database configs // database configs
cfg.option = "maxtablesPerVnode"; cfg.option = "maxTablesPerVnode";
cfg.ptr = &tsMaxTablePerVnode; cfg.ptr = &tsMaxTablePerVnode;
cfg.valType = TAOS_CFG_VTYPE_INT32; cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW; cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
@ -701,8 +706,8 @@ static void doInitGlobalConfig() {
cfg.ptr = &tsReplications; cfg.ptr = &tsReplications;
cfg.valType = TAOS_CFG_VTYPE_INT32; cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW; cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = TSDB_MIN_REPLICA_NUM; cfg.minValue = TSDB_MIN_DB_REPLICA_OPTION;
cfg.maxValue = TSDB_MAX_REPLICA_NUM; cfg.maxValue = TSDB_MAX_DB_REPLICA_OPTION;
cfg.ptrLength = 0; cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE; cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg); taosInitConfigOption(cfg);

View File

@ -82,7 +82,7 @@ int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, in
} }
int64_t start = ((startTime - intervalTime) / slidingTime + 1) * slidingTime; int64_t start = ((startTime - intervalTime) / slidingTime + 1) * slidingTime;
if (!(timeUnit == 'a' || timeUnit == 'm' || timeUnit == 's' || timeUnit == 'h')) { if (!(timeUnit == 'u' || timeUnit == 'a' || timeUnit == 'm' || timeUnit == 's' || timeUnit == 'h')) {
/* /*
* here we revised the start time of day according to the local time zone, * here we revised the start time of day according to the local time zone,
* but in case of DST, the start time of one day need to be dynamically decided. * but in case of DST, the start time of one day need to be dynamically decided.

View File

@ -367,31 +367,31 @@ bool isValidDataType(int32_t type) {
return type >= TSDB_DATA_TYPE_NULL && type <= TSDB_DATA_TYPE_NCHAR; return type >= TSDB_DATA_TYPE_NULL && type <= TSDB_DATA_TYPE_NCHAR;
} }
bool isNull(const char *val, int32_t type) { //bool isNull(const char *val, int32_t type) {
switch (type) { // switch (type) {
case TSDB_DATA_TYPE_BOOL: // case TSDB_DATA_TYPE_BOOL:
return *(uint8_t *)val == TSDB_DATA_BOOL_NULL; // return *(uint8_t *)val == TSDB_DATA_BOOL_NULL;
case TSDB_DATA_TYPE_TINYINT: // case TSDB_DATA_TYPE_TINYINT:
return *(uint8_t *)val == TSDB_DATA_TINYINT_NULL; // return *(uint8_t *)val == TSDB_DATA_TINYINT_NULL;
case TSDB_DATA_TYPE_SMALLINT: // case TSDB_DATA_TYPE_SMALLINT:
return *(uint16_t *)val == TSDB_DATA_SMALLINT_NULL; // return *(uint16_t *)val == TSDB_DATA_SMALLINT_NULL;
case TSDB_DATA_TYPE_INT: // case TSDB_DATA_TYPE_INT:
return *(uint32_t *)val == TSDB_DATA_INT_NULL; // return *(uint32_t *)val == TSDB_DATA_INT_NULL;
case TSDB_DATA_TYPE_BIGINT: // case TSDB_DATA_TYPE_BIGINT:
case TSDB_DATA_TYPE_TIMESTAMP: // case TSDB_DATA_TYPE_TIMESTAMP:
return *(uint64_t *)val == TSDB_DATA_BIGINT_NULL; // return *(uint64_t *)val == TSDB_DATA_BIGINT_NULL;
case TSDB_DATA_TYPE_FLOAT: // case TSDB_DATA_TYPE_FLOAT:
return *(uint32_t *)val == TSDB_DATA_FLOAT_NULL; // return *(uint32_t *)val == TSDB_DATA_FLOAT_NULL;
case TSDB_DATA_TYPE_DOUBLE: // case TSDB_DATA_TYPE_DOUBLE:
return *(uint64_t *)val == TSDB_DATA_DOUBLE_NULL; // return *(uint64_t *)val == TSDB_DATA_DOUBLE_NULL;
case TSDB_DATA_TYPE_NCHAR: // case TSDB_DATA_TYPE_NCHAR:
return *(uint32_t*) varDataVal(val) == TSDB_DATA_NCHAR_NULL; // return *(uint32_t*) varDataVal(val) == TSDB_DATA_NCHAR_NULL;
case TSDB_DATA_TYPE_BINARY: // case TSDB_DATA_TYPE_BINARY:
return *(uint8_t *) varDataVal(val) == TSDB_DATA_BINARY_NULL; // return *(uint8_t *) varDataVal(val) == TSDB_DATA_BINARY_NULL;
default: // default:
return false; // return false;
}; // };
} //}
void setVardataNull(char* val, int32_t type) { void setVardataNull(char* val, int32_t type) {
if (type == TSDB_DATA_TYPE_BINARY) { if (type == TSDB_DATA_TYPE_BINARY) {

View File

@ -52,7 +52,7 @@ public class TDNode {
public void start() { public void start() {
String selfPath = System.getProperty("user.dir"); String selfPath = System.getProperty("user.dir");
String binPath = ""; String binPath = "";
String projDir = selfPath + "/../../../../"; String projDir = selfPath + "/../../../";
try { try {
ArrayList<String> taosdPath = new ArrayList<>(); ArrayList<String> taosdPath = new ArrayList<>();
@ -68,7 +68,7 @@ public class TDNode {
return; return;
} else { } else {
for(String p : taosdPath) { for(String p : taosdPath) {
if(!p.contains("packing")) { if(!p.contains("packaging")) {
binPath = p; binPath = p;
} }
} }
@ -77,7 +77,7 @@ public class TDNode {
e.printStackTrace(); e.printStackTrace();
} }
if(binPath.equals("")) { if(binPath.isEmpty()) {
System.out.println("taosd not found"); System.out.println("taosd not found");
return; return;
} else { } else {
@ -106,14 +106,10 @@ public class TDNode {
String toBeKilled = "taosd"; String toBeKilled = "taosd";
if (this.running != 0) { if (this.running != 0) {
String psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print " + toBeKilled + "}'"; String killCmd = "pkill -kill -x " + toBeKilled;
String[] killCmds = {"sh", "-c", killCmd};
try { try {
Process ps = Runtime.getRuntime().exec(psCmd); Runtime.getRuntime().exec(killCmds).waitFor();
ps.waitFor();
long pid = ps.pid();
String killCmd = "kill -9 " + pid;
Runtime.getRuntime().exec(killCmd).waitFor();
for(int port = 6030; port < 6041; port ++) { for(int port = 6030; port < 6041; port ++) {
String fuserCmd = "fuser -k -n tcp " + port; String fuserCmd = "fuser -k -n tcp " + port;
@ -124,7 +120,7 @@ public class TDNode {
} }
this.running = 0; this.running = 0;
System.out.println("dnode:" + this.index + "is stopped by kill -9"); System.out.println("dnode:" + this.index + " is stopped by pkill");
} }
} }
@ -137,7 +133,6 @@ public class TDNode {
} }
} }
public void stopIP() { public void stopIP() {
try{ try{
String cmd = "sudo ifconfig lo:" + index + "192.168.0." + index + " down"; String cmd = "sudo ifconfig lo:" + index + "192.168.0." + index + " down";

View File

@ -16,11 +16,9 @@ public class TDNodes {
public void setPath(String path) { public void setPath(String path) {
try { try {
String psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'" ; String killCmd = "pkill -kill -x taosd";
Process ps = Runtime.getRuntime().exec(psCmd); String[] killCmds = {"sh", "-c", killCmd};
ps.waitFor(); Runtime.getRuntime().exec(killCmds).waitFor();
String killCmd = "kill -9 " + ps.pid();
Runtime.getRuntime().exec(killCmd).waitFor();
String binPath = System.getProperty("user.dir"); String binPath = System.getProperty("user.dir");
binPath += "/../../../debug"; binPath += "/../../../debug";

View File

@ -698,10 +698,12 @@ static void dnodeSendStatusMsg(void *handle, void *tmrId) {
pStatus->alternativeRole = (uint8_t) tsAlternativeRole; pStatus->alternativeRole = (uint8_t) tsAlternativeRole;
// fill cluster cfg parameters // fill cluster cfg parameters
pStatus->clusterCfg.numOfMnodes = tsNumOfMnodes; pStatus->clusterCfg.numOfMnodes = htonl(tsNumOfMnodes);
pStatus->clusterCfg.mnodeEqualVnodeNum = tsMnodeEqualVnodeNum; pStatus->clusterCfg.mnodeEqualVnodeNum = htonl(tsMnodeEqualVnodeNum);
pStatus->clusterCfg.offlineThreshold = tsOfflineThreshold; pStatus->clusterCfg.offlineThreshold = htonl(tsOfflineThreshold);
pStatus->clusterCfg.statusInterval = tsStatusInterval; pStatus->clusterCfg.statusInterval = htonl(tsStatusInterval);
pStatus->clusterCfg.maxtablesPerVnode = htonl(tsMaxTablePerVnode);
pStatus->clusterCfg.maxVgroupsPerDb = htonl(tsMaxVgroupsPerDb);
strcpy(pStatus->clusterCfg.arbitrator, tsArbitrator); strcpy(pStatus->clusterCfg.arbitrator, tsArbitrator);
strcpy(pStatus->clusterCfg.timezone, tsTimezone); strcpy(pStatus->clusterCfg.timezone, tsTimezone);
strcpy(pStatus->clusterCfg.locale, tsLocale); strcpy(pStatus->clusterCfg.locale, tsLocale);

View File

@ -20,6 +20,7 @@
#include "tglobal.h" #include "tglobal.h"
#include "dnodeInt.h" #include "dnodeInt.h"
#include "dnodeMain.h" #include "dnodeMain.h"
#include "tfile.h"
static void signal_handler(int32_t signum, siginfo_t *sigInfo, void *context); static void signal_handler(int32_t signum, siginfo_t *sigInfo, void *context);
static sem_t exitSem; static sem_t exitSem;
@ -67,6 +68,18 @@ int32_t main(int32_t argc, char *argv[]) {
taosSetAllocMode(TAOS_ALLOC_MODE_DETECT_LEAK, NULL, true); taosSetAllocMode(TAOS_ALLOC_MODE_DETECT_LEAK, NULL, true);
} }
} }
#endif
#ifdef TAOS_RANDOM_FILE_FAIL
else if (strcmp(argv[i], "--random-file-fail-factor") == 0) {
if ( (i+1) < argc ) {
int factor = atoi(argv[i+1]);
printf("The factor of random failure is %d\n", factor);
taosSetRandomFileFailFactor(factor);
} else {
printf("Please specify a number for random failure factor!");
exit(EXIT_FAILURE);
}
}
#endif #endif
} }

View File

@ -20,7 +20,6 @@ extern "C" {
#endif #endif
typedef void* qinfo_t; typedef void* qinfo_t;
typedef void (*_qinfo_free_fn_t)(void*);
/** /**
* create the qinfo object according to QueryTableMsg * create the qinfo object according to QueryTableMsg
@ -29,13 +28,8 @@ typedef void (*_qinfo_free_fn_t)(void*);
* @param qinfo * @param qinfo
* @return * @return
*/ */
int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryTableMsg, void* param, _qinfo_free_fn_t fn, qinfo_t* qinfo); int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryTableMsg, void* param, qinfo_t* qinfo);
/**
* Destroy QInfo object
* @param qinfo qhandle
*/
void qDestroyQueryInfo(qinfo_t qinfo);
/** /**
* the main query execution function, including query on both table and multitables, * the main query execution function, including query on both table and multitables,
@ -84,8 +78,14 @@ bool qHasMoreResultsToRetrieve(qinfo_t qinfo);
*/ */
int32_t qKillQuery(qinfo_t qinfo); int32_t qKillQuery(qinfo_t qinfo);
/**
* destroy query info structure
* @param qHandle
*/
void qDestroyQueryInfo(qinfo_t qHandle);
void* qOpenQueryMgmt(int32_t vgId); void* qOpenQueryMgmt(int32_t vgId);
void qSetQueryMgmtClosed(void* pExecutor); void qQueryMgmtNotifyClosed(void* pExecutor);
void qCleanupQueryMgmt(void* pExecutor); void qCleanupQueryMgmt(void* pExecutor);
void** qRegisterQInfo(void* pMgmt, uint64_t qInfo); void** qRegisterQInfo(void* pMgmt, uint64_t qInfo);
void** qAcquireQInfo(void* pMgmt, uint64_t key); void** qAcquireQInfo(void* pMgmt, uint64_t key);

View File

@ -160,7 +160,32 @@ extern tDataTypeDescriptor tDataTypeDesc[11];
#define POINTER_BYTES sizeof(void *) // 8 by default assert(sizeof(ptrdiff_t) == sizseof(void*) #define POINTER_BYTES sizeof(void *) // 8 by default assert(sizeof(ptrdiff_t) == sizseof(void*)
bool isValidDataType(int32_t type); bool isValidDataType(int32_t type);
bool isNull(const char *val, int32_t type); //bool isNull(const char *val, int32_t type);
static inline __attribute__((always_inline)) bool isNull(const char *val, int32_t type) {
switch (type) {
case TSDB_DATA_TYPE_BOOL:
return *(uint8_t *)val == TSDB_DATA_BOOL_NULL;
case TSDB_DATA_TYPE_TINYINT:
return *(uint8_t *)val == TSDB_DATA_TINYINT_NULL;
case TSDB_DATA_TYPE_SMALLINT:
return *(uint16_t *)val == TSDB_DATA_SMALLINT_NULL;
case TSDB_DATA_TYPE_INT:
return *(uint32_t *)val == TSDB_DATA_INT_NULL;
case TSDB_DATA_TYPE_BIGINT:
case TSDB_DATA_TYPE_TIMESTAMP:
return *(uint64_t *)val == TSDB_DATA_BIGINT_NULL;
case TSDB_DATA_TYPE_FLOAT:
return *(uint32_t *)val == TSDB_DATA_FLOAT_NULL;
case TSDB_DATA_TYPE_DOUBLE:
return *(uint64_t *)val == TSDB_DATA_DOUBLE_NULL;
case TSDB_DATA_TYPE_NCHAR:
return *(uint32_t*) varDataVal(val) == TSDB_DATA_NCHAR_NULL;
case TSDB_DATA_TYPE_BINARY:
return *(uint8_t *) varDataVal(val) == TSDB_DATA_BINARY_NULL;
default:
return false;
};
}
void setVardataNull(char* val, int32_t type); void setVardataNull(char* val, int32_t type);
void setNull(char *val, int32_t type, int32_t bytes); void setNull(char *val, int32_t type, int32_t bytes);
@ -271,8 +296,9 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
#define TSDB_DEFAULT_TOTAL_BLOCKS 4 #define TSDB_DEFAULT_TOTAL_BLOCKS 4
#define TSDB_MIN_TABLES 4 #define TSDB_MIN_TABLES 4
#define TSDB_MAX_TABLES 200000 #define TSDB_MAX_TABLES 5000000
#define TSDB_DEFAULT_TABLES 1000 #define TSDB_DEFAULT_TABLES 200000
#define TSDB_TABLES_STEP 10000
#define TSDB_MIN_DAYS_PER_FILE 1 #define TSDB_MIN_DAYS_PER_FILE 1
#define TSDB_MAX_DAYS_PER_FILE 3650 #define TSDB_MAX_DAYS_PER_FILE 3650
@ -306,9 +332,9 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
#define TSDB_MAX_WAL_LEVEL 2 #define TSDB_MAX_WAL_LEVEL 2
#define TSDB_DEFAULT_WAL_LEVEL 1 #define TSDB_DEFAULT_WAL_LEVEL 1
#define TSDB_MIN_REPLICA_NUM 1 #define TSDB_MIN_DB_REPLICA_OPTION 1
#define TSDB_MAX_REPLICA_NUM 3 #define TSDB_MAX_DB_REPLICA_OPTION 3
#define TSDB_DEFAULT_REPLICA_NUM 1 #define TSDB_DEFAULT_DB_REPLICA_OPTION 1
#define TSDB_MAX_JOIN_TABLE_NUM 5 #define TSDB_MAX_JOIN_TABLE_NUM 5
#define TSDB_MAX_UNION_CLAUSE 5 #define TSDB_MAX_UNION_CLAUSE 5

View File

@ -200,6 +200,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_CREATE_TB_MSG, 0, 0x060E, "tsdb inval
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_NO_TABLE_DATA_IN_MEM, 0, 0x060F, "tsdb no table data in memory skiplist") TAOS_DEFINE_ERROR(TSDB_CODE_TDB_NO_TABLE_DATA_IN_MEM, 0, 0x060F, "tsdb no table data in memory skiplist")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_FILE_ALREADY_EXISTS, 0, 0x0610, "tsdb file already exists") TAOS_DEFINE_ERROR(TSDB_CODE_TDB_FILE_ALREADY_EXISTS, 0, 0x0610, "tsdb file already exists")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_TABLE_RECONFIGURE, 0, 0x0611, "tsdb need to reconfigure table") TAOS_DEFINE_ERROR(TSDB_CODE_TDB_TABLE_RECONFIGURE, 0, 0x0611, "tsdb need to reconfigure table")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_IVD_CREATE_TABLE_INFO, 0, 0x0612, "tsdb create table information")
// query // query
TAOS_DEFINE_ERROR(TSDB_CODE_QRY_INVALID_QHANDLE, 0, 0x0700, "query invalid handle") TAOS_DEFINE_ERROR(TSDB_CODE_QRY_INVALID_QHANDLE, 0, 0x0700, "query invalid handle")

View File

@ -569,6 +569,8 @@ typedef struct {
char timezone[64]; // tsTimezone char timezone[64]; // tsTimezone
char locale[TSDB_LOCALE_LEN]; // tsLocale char locale[TSDB_LOCALE_LEN]; // tsLocale
char charset[TSDB_LOCALE_LEN]; // tsCharset char charset[TSDB_LOCALE_LEN]; // tsCharset
int32_t maxtablesPerVnode;
int32_t maxVgroupsPerDb;
} SClusterCfg; } SClusterCfg;
typedef struct { typedef struct {
@ -644,7 +646,7 @@ typedef struct SCMSTableVgroupMsg {
typedef struct { typedef struct {
int32_t vgId; int32_t vgId;
int8_t numOfIps; int8_t numOfIps;
SIpAddr ipAddr[TSDB_MAX_REPLICA_NUM]; SIpAddr ipAddr[TSDB_MAX_REPLICA];
} SCMVgroupInfo; } SCMVgroupInfo;
typedef struct { typedef struct {

View File

@ -136,7 +136,7 @@ static void shellGetDirectoryFileList(char *inputDir)
static void shellSourceFile(TAOS *con, char *fptr) { static void shellSourceFile(TAOS *con, char *fptr) {
wordexp_t full_path; wordexp_t full_path;
int read_len = 0; int read_len = 0;
char * cmd = malloc(MAX_COMMAND_SIZE); char * cmd = malloc(tsMaxSQLStringLen);
size_t cmd_len = 0; size_t cmd_len = 0;
char * line = NULL; char * line = NULL;
size_t line_len = 0; size_t line_len = 0;
@ -185,7 +185,7 @@ static void shellSourceFile(TAOS *con, char *fptr) {
int lineNo = 0; int lineNo = 0;
while ((read_len = getline(&line, &line_len, f)) != -1) { while ((read_len = getline(&line, &line_len, f)) != -1) {
++lineNo; ++lineNo;
if (read_len >= MAX_COMMAND_SIZE) continue; if (read_len >= tsMaxSQLStringLen) continue;
line[--read_len] = '\0'; line[--read_len] = '\0';
if (read_len == 0 || isCommentLine(line)) { // line starts with # if (read_len == 0 || isCommentLine(line)) { // line starts with #

View File

@ -44,10 +44,7 @@ void mnodeRemoveSuperTableFromDb(SDbObj *pDb);
void mnodeAddTableIntoDb(SDbObj *pDb); void mnodeAddTableIntoDb(SDbObj *pDb);
void mnodeRemoveTableFromDb(SDbObj *pDb); void mnodeRemoveTableFromDb(SDbObj *pDb);
void mnodeAddVgroupIntoDb(SVgObj *pVgroup); void mnodeAddVgroupIntoDb(SVgObj *pVgroup);
void mnodeAddVgroupIntoDbTail(SVgObj *pVgroup);
void mnodeRemoveVgroupFromDb(SVgObj *pVgroup); void mnodeRemoveVgroupFromDb(SVgObj *pVgroup);
void mnodeMoveVgroupToTail(SVgObj *pVgroup);
void mnodeMoveVgroupToHead(SVgObj *pVgroup);
#ifdef __cplusplus #ifdef __cplusplus
} }

View File

@ -145,10 +145,8 @@ typedef struct SVgObj {
int64_t totalStorage; int64_t totalStorage;
int64_t compStorage; int64_t compStorage;
int64_t pointsWritten; int64_t pointsWritten;
struct SVgObj *prev, *next;
struct SDbObj *pDb; struct SDbObj *pDb;
void * idPool; void * idPool;
SChildTableObj **tableList;
} SVgObj; } SVgObj;
typedef struct { typedef struct {
@ -183,9 +181,11 @@ typedef struct SDbObj {
int32_t numOfVgroups; int32_t numOfVgroups;
int32_t numOfTables; int32_t numOfTables;
int32_t numOfSuperTables; int32_t numOfSuperTables;
SVgObj *pHead; int32_t vgListSize;
SVgObj *pTail; int32_t vgListIndex;
SVgObj **vgList;
struct SAcctObj *pAcct; struct SAcctObj *pAcct;
pthread_mutex_t mutex;
} SDbObj; } SDbObj;
typedef struct SUserObj { typedef struct SUserObj {
@ -246,7 +246,8 @@ typedef struct {
int16_t offset[TSDB_MAX_COLUMNS]; int16_t offset[TSDB_MAX_COLUMNS];
int16_t bytes[TSDB_MAX_COLUMNS]; int16_t bytes[TSDB_MAX_COLUMNS];
int32_t numOfReads; int32_t numOfReads;
int8_t reserved0[2]; int8_t maxReplica;
int8_t reserved0[0];
uint16_t payloadLen; uint16_t payloadLen;
char payload[]; char payload[];
} SShowObj; } SShowObj;

View File

@ -40,6 +40,7 @@ char* mnodeGetDnodeStatusStr(int32_t dnodeStatus);
void mgmtMonitorDnodeModule(); void mgmtMonitorDnodeModule();
int32_t mnodeGetDnodesNum(); int32_t mnodeGetDnodesNum();
int32_t mnodeGetOnlinDnodesCpuCoreNum();
int32_t mnodeGetOnlinDnodesNum(); int32_t mnodeGetOnlinDnodesNum();
void * mnodeGetNextDnode(void *pIter, SDnodeObj **pDnode); void * mnodeGetNextDnode(void *pIter, SDnodeObj **pDnode);
void mnodeIncDnodeRef(SDnodeObj *pDnode); void mnodeIncDnodeRef(SDnodeObj *pDnode);

View File

@ -94,6 +94,7 @@ void sdbDecRef(void *thandle, void *pRow);
int64_t sdbGetNumOfRows(void *handle); int64_t sdbGetNumOfRows(void *handle);
int32_t sdbGetId(void *handle); int32_t sdbGetId(void *handle);
uint64_t sdbGetVersion(); uint64_t sdbGetVersion();
bool sdbCheckRowDeleted(void *thandle, void *pRow);
#ifdef __cplusplus #ifdef __cplusplus
} }

View File

@ -30,17 +30,17 @@ void mnodeDecVgroupRef(SVgObj *pVgroup);
void mnodeDropAllDbVgroups(SDbObj *pDropDb); void mnodeDropAllDbVgroups(SDbObj *pDropDb);
void mnodeSendDropAllDbVgroupsMsg(SDbObj *pDropDb); void mnodeSendDropAllDbVgroupsMsg(SDbObj *pDropDb);
void mnodeDropAllDnodeVgroups(SDnodeObj *pDropDnode); void mnodeDropAllDnodeVgroups(SDnodeObj *pDropDnode);
void mnodeUpdateAllDbVgroups(SDbObj *pAlterDb); //void mnodeUpdateAllDbVgroups(SDbObj *pAlterDb);
void * mnodeGetNextVgroup(void *pIter, SVgObj **pVgroup); void * mnodeGetNextVgroup(void *pIter, SVgObj **pVgroup);
void mnodeUpdateVgroup(SVgObj *pVgroup); void mnodeUpdateVgroup(SVgObj *pVgroup);
void mnodeUpdateVgroupStatus(SVgObj *pVgroup, SDnodeObj *pDnode, SVnodeLoad *pVload); void mnodeUpdateVgroupStatus(SVgObj *pVgroup, SDnodeObj *pDnode, SVnodeLoad *pVload);
void mnodeCheckUnCreatedVgroup(SDnodeObj *pDnode, SVnodeLoad *pVloads, int32_t openVnodes); void mnodeCheckUnCreatedVgroup(SDnodeObj *pDnode, SVnodeLoad *pVloads, int32_t openVnodes);
int32_t mnodeCreateVgroup(struct SMnodeMsg *pMsg, SDbObj *pDb); int32_t mnodeCreateVgroup(struct SMnodeMsg *pMsg);
void mnodeDropVgroup(SVgObj *pVgroup, void *ahandle); void mnodeDropVgroup(SVgObj *pVgroup, void *ahandle);
void mnodeAlterVgroup(SVgObj *pVgroup, void *ahandle); void mnodeAlterVgroup(SVgObj *pVgroup, void *ahandle);
SVgObj *mnodeGetAvailableVgroup(SDbObj *pDb); int32_t mnodeGetAvailableVgroup(struct SMnodeMsg *pMsg, SVgObj **pVgroup, int32_t *sid);
void mnodeAddTableIntoVgroup(SVgObj *pVgroup, SChildTableObj *pTable); void mnodeAddTableIntoVgroup(SVgObj *pVgroup, SChildTableObj *pTable);
void mnodeRemoveTableFromVgroup(SVgObj *pVgroup, SChildTableObj *pTable); void mnodeRemoveTableFromVgroup(SVgObj *pVgroup, SChildTableObj *pTable);

View File

@ -38,6 +38,7 @@
#include "mnodeUser.h" #include "mnodeUser.h"
#include "mnodeVgroup.h" #include "mnodeVgroup.h"
#define VG_LIST_SIZE 8
static void * tsDbSdb = NULL; static void * tsDbSdb = NULL;
static int32_t tsDbUpdateSize; static int32_t tsDbUpdateSize;
@ -50,8 +51,14 @@ static int32_t mnodeProcessCreateDbMsg(SMnodeMsg *pMsg);
static int32_t mnodeProcessAlterDbMsg(SMnodeMsg *pMsg); static int32_t mnodeProcessAlterDbMsg(SMnodeMsg *pMsg);
static int32_t mnodeProcessDropDbMsg(SMnodeMsg *pMsg); static int32_t mnodeProcessDropDbMsg(SMnodeMsg *pMsg);
static void mnodeDestroyDb(SDbObj *pDb) {
pthread_mutex_destroy(&pDb->mutex);
tfree(pDb->vgList);
tfree(pDb);
}
static int32_t mnodeDbActionDestroy(SSdbOper *pOper) { static int32_t mnodeDbActionDestroy(SSdbOper *pOper) {
tfree(pOper->pObj); mnodeDestroyDb(pOper->pObj);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
@ -59,8 +66,9 @@ static int32_t mnodeDbActionInsert(SSdbOper *pOper) {
SDbObj *pDb = pOper->pObj; SDbObj *pDb = pOper->pObj;
SAcctObj *pAcct = mnodeGetAcct(pDb->acct); SAcctObj *pAcct = mnodeGetAcct(pDb->acct);
pDb->pHead = NULL; pthread_mutex_init(&pDb->mutex, NULL);
pDb->pTail = NULL; pDb->vgListSize = VG_LIST_SIZE;
pDb->vgList = calloc(pDb->vgListSize, sizeof(SVgObj *));
pDb->numOfVgroups = 0; pDb->numOfVgroups = 0;
pDb->numOfTables = 0; pDb->numOfTables = 0;
pDb->numOfSuperTables = 0; pDb->numOfSuperTables = 0;
@ -94,14 +102,15 @@ static int32_t mnodeDbActionDelete(SSdbOper *pOper) {
} }
static int32_t mnodeDbActionUpdate(SSdbOper *pOper) { static int32_t mnodeDbActionUpdate(SSdbOper *pOper) {
SDbObj *pDb = pOper->pObj; SDbObj *pNew = pOper->pObj;
SDbObj *pSaved = mnodeGetDb(pDb->name); SDbObj *pDb = mnodeGetDb(pNew->name);
if (pDb != pSaved) { if (pDb != NULL && pNew != pDb) {
memcpy(pSaved, pDb, pOper->rowSize); memcpy(pDb, pNew, pOper->rowSize);
free(pDb); free(pNew->vgList);
free(pNew);
} }
mnodeUpdateAllDbVgroups(pSaved); //mnodeUpdateAllDbVgroups(pDb);
mnodeDecDbRef(pSaved); mnodeDecDbRef(pDb);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
@ -180,8 +189,13 @@ void mnodeDecDbRef(SDbObj *pDb) {
SDbObj *mnodeGetDbByTableId(char *tableId) { SDbObj *mnodeGetDbByTableId(char *tableId) {
char db[TSDB_TABLE_ID_LEN], *pos; char db[TSDB_TABLE_ID_LEN], *pos;
// tableId format should be : acct.db.table
pos = strstr(tableId, TS_PATH_DELIMITER); pos = strstr(tableId, TS_PATH_DELIMITER);
assert(NULL != pos);
pos = strstr(pos + 1, TS_PATH_DELIMITER); pos = strstr(pos + 1, TS_PATH_DELIMITER);
assert(NULL != pos);
memset(db, 0, sizeof(db)); memset(db, 0, sizeof(db));
strncpy(db, tableId, pos - tableId); strncpy(db, tableId, pos - tableId);
@ -273,9 +287,9 @@ static int32_t mnodeCheckDbCfg(SDbCfg *pCfg) {
return TSDB_CODE_MND_INVALID_DB_OPTION; return TSDB_CODE_MND_INVALID_DB_OPTION;
} }
if (pCfg->replications < TSDB_MIN_REPLICA_NUM || pCfg->replications > TSDB_MAX_REPLICA_NUM) { if (pCfg->replications < TSDB_MIN_DB_REPLICA_OPTION || pCfg->replications > TSDB_MAX_DB_REPLICA_OPTION) {
mError("invalid db option replications:%d valid range: [%d, %d]", pCfg->replications, TSDB_MIN_REPLICA_NUM, mError("invalid db option replications:%d valid range: [%d, %d]", pCfg->replications, TSDB_MIN_DB_REPLICA_OPTION,
TSDB_MAX_REPLICA_NUM); TSDB_MAX_DB_REPLICA_OPTION);
return TSDB_CODE_MND_INVALID_DB_OPTION; return TSDB_CODE_MND_INVALID_DB_OPTION;
} }
@ -379,7 +393,7 @@ static int32_t mnodeCreateDb(SAcctObj *pAcct, SCMCreateDbMsg *pCreate, void *pMs
code = sdbInsertRow(&oper); code = sdbInsertRow(&oper);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
tfree(pDb); mnodeDestroyDb(pDb);
mLInfo("db:%s, failed to create, reason:%s", pDb->name, tstrerror(code)); mLInfo("db:%s, failed to create, reason:%s", pDb->name, tstrerror(code));
return code; return code;
} else { } else {
@ -416,45 +430,33 @@ void mnodePrintVgroups(SDbObj *pDb, char *oper) {
void mnodeAddVgroupIntoDb(SVgObj *pVgroup) { void mnodeAddVgroupIntoDb(SVgObj *pVgroup) {
SDbObj *pDb = pVgroup->pDb; SDbObj *pDb = pVgroup->pDb;
pVgroup->next = pDb->pHead; pthread_mutex_lock(&pDb->mutex);
pVgroup->prev = NULL; int32_t vgPos = pDb->numOfVgroups++;
if (vgPos >= pDb->vgListSize) {
pDb->vgList = realloc(pDb->vgList, pDb->vgListSize * 2 * sizeof(SVgObj *));
memset(pDb->vgList + pDb->vgListSize, 0, pDb->vgListSize * sizeof(SVgObj *));
pDb->vgListSize *= 2;
}
if (pDb->pHead) pDb->pHead->prev = pVgroup; pDb->vgList[vgPos] = pVgroup;
if (pDb->pTail == NULL) pDb->pTail = pVgroup; pthread_mutex_unlock(&pDb->mutex);
pDb->pHead = pVgroup;
pDb->numOfVgroups++;
}
void mnodeAddVgroupIntoDbTail(SVgObj *pVgroup) {
SDbObj *pDb = pVgroup->pDb;
pVgroup->next = NULL;
pVgroup->prev = pDb->pTail;
if (pDb->pTail) pDb->pTail->next = pVgroup;
if (pDb->pHead == NULL) pDb->pHead = pVgroup;
pDb->pTail = pVgroup;
pDb->numOfVgroups++;
} }
void mnodeRemoveVgroupFromDb(SVgObj *pVgroup) { void mnodeRemoveVgroupFromDb(SVgObj *pVgroup) {
SDbObj *pDb = pVgroup->pDb; SDbObj *pDb = pVgroup->pDb;
if (pVgroup->prev) pVgroup->prev->next = pVgroup->next;
if (pVgroup->next) pVgroup->next->prev = pVgroup->prev; pthread_mutex_lock(&pDb->mutex);
if (pVgroup->prev == NULL) pDb->pHead = pVgroup->next; for (int32_t v1 = 0; v1 < pDb->numOfVgroups; ++v1) {
if (pVgroup->next == NULL) pDb->pTail = pVgroup->prev; if (pDb->vgList[v1] == pVgroup) {
for (int32_t v2 = v1; v2 < pDb->numOfVgroups - 1; ++v2) {
pDb->vgList[v2] = pDb->vgList[v2 + 1];
}
pDb->numOfVgroups--; pDb->numOfVgroups--;
} break;
}
}
void mnodeMoveVgroupToTail(SVgObj *pVgroup) { pthread_mutex_unlock(&pDb->mutex);
mnodeRemoveVgroupFromDb(pVgroup);
mnodeAddVgroupIntoDbTail(pVgroup);
}
void mnodeMoveVgroupToHead(SVgObj *pVgroup) {
mnodeRemoveVgroupFromDb(pVgroup);
mnodeAddVgroupIntoDb(pVgroup);
} }
void mnodeCleanupDbs() { void mnodeCleanupDbs() {
@ -526,11 +528,6 @@ static int32_t mnodeGetDbMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn
#ifndef __CLOUD_VERSION__ #ifndef __CLOUD_VERSION__
if (strcmp(pUser->user, TSDB_DEFAULT_USER) == 0) { if (strcmp(pUser->user, TSDB_DEFAULT_USER) == 0) {
#endif #endif
pShow->bytes[cols] = 4;
pSchema[cols].type = TSDB_DATA_TYPE_INT;
strcpy(pSchema[cols].name, "maxtables");
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
pShow->bytes[cols] = 4; pShow->bytes[cols] = 4;
pSchema[cols].type = TSDB_DATA_TYPE_INT; pSchema[cols].type = TSDB_DATA_TYPE_INT;
@ -556,12 +553,6 @@ static int32_t mnodeGetDbMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn
pSchema[cols].bytes = htons(pShow->bytes[cols]); pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++; cols++;
pShow->bytes[cols] = 4;
pSchema[cols].type = TSDB_DATA_TYPE_INT;
strcpy(pSchema[cols].name, "ctime(Sec.)");
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
pShow->bytes[cols] = 1; pShow->bytes[cols] = 1;
pSchema[cols].type = TSDB_DATA_TYPE_TINYINT; pSchema[cols].type = TSDB_DATA_TYPE_TINYINT;
strcpy(pSchema[cols].name, "wallevel"); strcpy(pSchema[cols].name, "wallevel");
@ -671,10 +662,6 @@ static int32_t mnodeRetrieveDbs(SShowObj *pShow, char *data, int32_t rows, void
#ifndef __CLOUD_VERSION__ #ifndef __CLOUD_VERSION__
if (strcmp(pUser->user, TSDB_DEFAULT_USER) == 0) { if (strcmp(pUser->user, TSDB_DEFAULT_USER) == 0) {
#endif #endif
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(int32_t *)pWrite = pDb->cfg.maxTables; // table num can be created should minus 1
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(int32_t *)pWrite = pDb->cfg.cacheBlockSize; *(int32_t *)pWrite = pDb->cfg.cacheBlockSize;
cols++; cols++;
@ -691,10 +678,6 @@ static int32_t mnodeRetrieveDbs(SShowObj *pShow, char *data, int32_t rows, void
*(int32_t *)pWrite = pDb->cfg.maxRowsPerFileBlock; *(int32_t *)pWrite = pDb->cfg.maxRowsPerFileBlock;
cols++; cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(int32_t *)pWrite = pDb->cfg.commitTime;
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(int8_t *)pWrite = pDb->cfg.walLevel; *(int8_t *)pWrite = pDb->cfg.walLevel;
cols++; cols++;

View File

@ -69,6 +69,7 @@ static int32_t mnodeDnodeActionInsert(SSdbOper *pOper) {
SDnodeObj *pDnode = pOper->pObj; SDnodeObj *pDnode = pOper->pObj;
if (pDnode->status != TAOS_DN_STATUS_DROPPING) { if (pDnode->status != TAOS_DN_STATUS_DROPPING) {
pDnode->status = TAOS_DN_STATUS_OFFLINE; pDnode->status = TAOS_DN_STATUS_OFFLINE;
pDnode->lastAccess = tsAccessSquence;
} }
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
@ -187,7 +188,27 @@ int32_t mnodeGetDnodesNum() {
return sdbGetNumOfRows(tsDnodeSdb); return sdbGetNumOfRows(tsDnodeSdb);
} }
int32_t mnodeGetOnlinDnodesNum(char *ep) { int32_t mnodeGetOnlinDnodesCpuCoreNum() {
SDnodeObj *pDnode = NULL;
void * pIter = NULL;
int32_t cpuCores = 0;
while (1) {
pIter = mnodeGetNextDnode(pIter, &pDnode);
if (pDnode == NULL) break;
if (pDnode->status != TAOS_DN_STATUS_OFFLINE) {
cpuCores += pDnode->numOfCores;
}
mnodeDecDnodeRef(pDnode);
}
sdbFreeIter(pIter);
if (cpuCores < 2) cpuCores = 2;
return cpuCores;
}
int32_t mnodeGetOnlinDnodesNum() {
SDnodeObj *pDnode = NULL; SDnodeObj *pDnode = NULL;
void * pIter = NULL; void * pIter = NULL;
int32_t onlineDnodes = 0; int32_t onlineDnodes = 0;
@ -283,10 +304,12 @@ static void mnodeProcessCfgDnodeMsgRsp(SRpcMsg *rpcMsg) {
} }
static bool mnodeCheckClusterCfgPara(const SClusterCfg *clusterCfg) { static bool mnodeCheckClusterCfgPara(const SClusterCfg *clusterCfg) {
if (clusterCfg->numOfMnodes != tsNumOfMnodes) return false; if (clusterCfg->numOfMnodes != htonl(tsNumOfMnodes)) return false;
if (clusterCfg->mnodeEqualVnodeNum != tsMnodeEqualVnodeNum) return false; if (clusterCfg->mnodeEqualVnodeNum != htonl(tsMnodeEqualVnodeNum)) return false;
if (clusterCfg->offlineThreshold != tsOfflineThreshold) return false; if (clusterCfg->offlineThreshold != htonl(tsOfflineThreshold)) return false;
if (clusterCfg->statusInterval != tsStatusInterval) return false; if (clusterCfg->statusInterval != htonl(tsStatusInterval)) return false;
if (clusterCfg->maxtablesPerVnode != htonl(tsMaxTablePerVnode)) return false;
if (clusterCfg->maxVgroupsPerDb != htonl(tsMaxVgroupsPerDb)) return false;
if (0 != strncasecmp(clusterCfg->arbitrator, tsArbitrator, strlen(tsArbitrator))) return false; if (0 != strncasecmp(clusterCfg->arbitrator, tsArbitrator, strlen(tsArbitrator))) return false;
if (0 != strncasecmp(clusterCfg->timezone, tsTimezone, strlen(tsTimezone))) return false; if (0 != strncasecmp(clusterCfg->timezone, tsTimezone, strlen(tsTimezone))) return false;
@ -376,7 +399,7 @@ static int32_t mnodeProcessDnodeStatusMsg(SMnodeMsg *pMsg) {
if (false == ret) { if (false == ret) {
mnodeDecDnodeRef(pDnode); mnodeDecDnodeRef(pDnode);
rpcFreeCont(pRsp); rpcFreeCont(pRsp);
mError("dnode %s cluster cfg parameters inconsistent", pStatus->dnodeEp); mError("dnode:%d, %s cluster cfg parameters inconsistent", pDnode->dnodeId, pStatus->dnodeEp);
return TSDB_CODE_MND_CLUSTER_CFG_INCONSISTENT; return TSDB_CODE_MND_CLUSTER_CFG_INCONSISTENT;
} }
@ -468,18 +491,22 @@ static int32_t mnodeDropDnodeByEp(char *ep, SMnodeMsg *pMsg) {
return TSDB_CODE_MND_DNODE_NOT_EXIST; return TSDB_CODE_MND_DNODE_NOT_EXIST;
} }
mnodeDecDnodeRef(pDnode);
if (strcmp(pDnode->dnodeEp, mnodeGetMnodeMasterEp()) == 0) { if (strcmp(pDnode->dnodeEp, mnodeGetMnodeMasterEp()) == 0) {
mError("dnode:%d, can't drop dnode:%s which is master", pDnode->dnodeId, ep); mError("dnode:%d, can't drop dnode:%s which is master", pDnode->dnodeId, ep);
mnodeDecDnodeRef(pDnode);
return TSDB_CODE_MND_NO_REMOVE_MASTER; return TSDB_CODE_MND_NO_REMOVE_MASTER;
} }
mInfo("dnode:%d, start to drop it", pDnode->dnodeId); mInfo("dnode:%d, start to drop it", pDnode->dnodeId);
#ifndef _SYNC #ifndef _SYNC
return mnodeDropDnode(pDnode, pMsg); int32_t code = mnodeDropDnode(pDnode, pMsg);
#else #else
return balanceDropDnode(pDnode); int32_t code = balanceDropDnode(pDnode);
#endif #endif
mnodeDecDnodeRef(pDnode);
return code;
} }
static int32_t mnodeProcessCreateDnodeMsg(SMnodeMsg *pMsg) { static int32_t mnodeProcessCreateDnodeMsg(SMnodeMsg *pMsg) {

View File

@ -264,8 +264,12 @@ static void sdbConfirmForward(void *ahandle, void *param, int32_t code) {
if (pOper->cb != NULL) { if (pOper->cb != NULL) {
pOper->retCode = (*pOper->cb)(pMsg, pOper->retCode); pOper->retCode = (*pOper->cb)(pMsg, pOper->retCode);
} }
dnodeSendRpcMnodeWriteRsp(pMsg, pOper->retCode); dnodeSendRpcMnodeWriteRsp(pMsg, pOper->retCode);
// if ahandle, means this func is called by sdb write
if (ahandle == NULL) {
sdbDecRef(pOper->table, pOper->pObj);
}
taosFreeQitem(pOper); taosFreeQitem(pOper);
} }
@ -389,7 +393,7 @@ void sdbCleanUp() {
} }
void sdbIncRef(void *handle, void *pObj) { void sdbIncRef(void *handle, void *pObj) {
if (pObj == NULL) return; if (pObj == NULL || handle == NULL) return;
SSdbTable *pTable = handle; SSdbTable *pTable = handle;
int32_t * pRefCount = (int32_t *)(pObj + pTable->refCountPos); int32_t * pRefCount = (int32_t *)(pObj + pTable->refCountPos);
@ -398,7 +402,7 @@ void sdbIncRef(void *handle, void *pObj) {
} }
void sdbDecRef(void *handle, void *pObj) { void sdbDecRef(void *handle, void *pObj) {
if (pObj == NULL) return; if (pObj == NULL || handle == NULL) return;
SSdbTable *pTable = handle; SSdbTable *pTable = handle;
int32_t * pRefCount = (int32_t *)(pObj + pTable->refCountPos); int32_t * pRefCount = (int32_t *)(pObj + pTable->refCountPos);
@ -581,16 +585,26 @@ static int sdbWrite(void *param, void *data, int type) {
return sdbInsertHash(pTable, &oper); return sdbInsertHash(pTable, &oper);
} else if (action == SDB_ACTION_DELETE) { } else if (action == SDB_ACTION_DELETE) {
SSdbRow *rowMeta = sdbGetRowMeta(pTable, pHead->cont); SSdbRow *rowMeta = sdbGetRowMeta(pTable, pHead->cont);
assert(rowMeta != NULL && rowMeta->row != NULL); if (rowMeta == NULL || rowMeta->row == NULL) {
sdbError("table:%s, failed to get object:%s from wal while dispose delete action", pTable->tableName,
pHead->cont);
return TSDB_CODE_SUCCESS;
}
SSdbOper oper = {.table = pTable, .pObj = rowMeta->row}; SSdbOper oper = {.table = pTable, .pObj = rowMeta->row};
return sdbDeleteHash(pTable, &oper); return sdbDeleteHash(pTable, &oper);
} else if (action == SDB_ACTION_UPDATE) { } else if (action == SDB_ACTION_UPDATE) {
SSdbRow *rowMeta = sdbGetRowMeta(pTable, pHead->cont); SSdbRow *rowMeta = sdbGetRowMeta(pTable, pHead->cont);
assert(rowMeta != NULL && rowMeta->row != NULL); if (rowMeta == NULL || rowMeta->row == NULL) {
sdbError("table:%s, failed to get object:%s from wal while dispose update action", pTable->tableName,
pHead->cont);
return TSDB_CODE_SUCCESS;
}
SSdbOper oper = {.rowSize = pHead->len, .rowData = pHead->cont, .table = pTable}; SSdbOper oper = {.rowSize = pHead->len, .rowData = pHead->cont, .table = pTable};
code = (*pTable->decodeFp)(&oper); code = (*pTable->decodeFp)(&oper);
return sdbUpdateHash(pTable, &oper); return sdbUpdateHash(pTable, &oper);
} else { return TSDB_CODE_MND_INVALID_MSG_TYPE; } } else {
return TSDB_CODE_MND_INVALID_MSG_TYPE;
}
} }
int32_t sdbInsertRow(SSdbOper *pOper) { int32_t sdbInsertRow(SSdbOper *pOper) {
@ -647,6 +661,14 @@ int32_t sdbInsertRow(SSdbOper *pOper) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
bool sdbCheckRowDeleted(void *pTableInput, void *pRow) {
SSdbTable *pTable = pTableInput;
if (pTable == NULL) return false;
int8_t *updateEnd = pRow + pTable->refCountPos - 1;
return (*updateEnd == 1);
}
int32_t sdbDeleteRow(SSdbOper *pOper) { int32_t sdbDeleteRow(SSdbOper *pOper) {
SSdbTable *pTable = (SSdbTable *)pOper->table; SSdbTable *pTable = (SSdbTable *)pOper->table;
if (pTable == NULL) return TSDB_CODE_MND_SDB_INVALID_TABLE_TYPE; if (pTable == NULL) return TSDB_CODE_MND_SDB_INVALID_TABLE_TYPE;
@ -663,14 +685,18 @@ int32_t sdbDeleteRow(SSdbOper *pOper) {
return TSDB_CODE_MND_SDB_INVAID_META_ROW; return TSDB_CODE_MND_SDB_INVAID_META_ROW;
} }
sdbIncRef(pTable, pOper->pObj);
int32_t code = sdbDeleteHash(pTable, pOper); int32_t code = sdbDeleteHash(pTable, pOper);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
sdbError("table:%s, failed to delete from hash", pTable->tableName); sdbError("table:%s, failed to delete from hash", pTable->tableName);
sdbDecRef(pTable, pOper->pObj);
return code; return code;
} }
// just delete data from memory // just delete data from memory
if (pOper->type != SDB_OPER_GLOBAL) { if (pOper->type != SDB_OPER_GLOBAL) {
sdbDecRef(pTable, pOper->pObj);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
@ -692,7 +718,6 @@ int32_t sdbDeleteRow(SSdbOper *pOper) {
pNewOper->pMsg, pTable->tableName, pOper->pObj, sdbGetKeyStrFromObj(pTable, pOper->pObj)); pNewOper->pMsg, pTable->tableName, pOper->pObj, sdbGetKeyStrFromObj(pTable, pOper->pObj));
} }
sdbIncRef(pNewOper->table, pNewOper->pObj);
taosWriteQitem(tsSdbWriteQueue, TAOS_QTYPE_RPC, pNewOper); taosWriteQitem(tsSdbWriteQueue, TAOS_QTYPE_RPC, pNewOper);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
@ -968,11 +993,12 @@ static void *sdbWorkerFp(void *param) {
int32_t code = sdbWrite(pOper, pHead, type); int32_t code = sdbWrite(pOper, pHead, type);
if (code > 0) code = 0; if (code > 0) code = 0;
if (pOper) if (pOper) {
pOper->retCode = code; pOper->retCode = code;
else } else {
pHead->len = code; // hackway pHead->len = code; // hackway
} }
}
walFsync(tsSdbObj.wal); walFsync(tsSdbObj.wal);
@ -983,7 +1009,6 @@ static void *sdbWorkerFp(void *param) {
if (type == TAOS_QTYPE_RPC) { if (type == TAOS_QTYPE_RPC) {
pOper = (SSdbOper *)item; pOper = (SSdbOper *)item;
sdbDecRef(pOper->table, pOper->pObj);
sdbConfirmForward(NULL, pOper, pOper->retCode); sdbConfirmForward(NULL, pOper, pOper->retCode);
} else if (type == TAOS_QTYPE_FWD) { } else if (type == TAOS_QTYPE_FWD) {
pHead = (SWalHead *)item; pHead = (SWalHead *)item;

View File

@ -72,7 +72,7 @@ static void mnodeProcessCreateChildTableRsp(SRpcMsg *rpcMsg);
static int32_t mnodeProcessDropTableMsg(SMnodeMsg *mnodeMsg); static int32_t mnodeProcessDropTableMsg(SMnodeMsg *mnodeMsg);
static int32_t mnodeProcessDropSuperTableMsg(SMnodeMsg *pMsg); static int32_t mnodeProcessDropSuperTableMsg(SMnodeMsg *pMsg);
static void mnodeProcessDropSuperTableRsp(SRpcMsg *rpcMsg); static void mnodeProcessDropSuperTableRsp(SRpcMsg *rpcMsg);
static int32_t mnodeProcessDropChildTableMsg(SMnodeMsg *pMsg); static int32_t mnodeProcessDropChildTableMsg(SMnodeMsg *pMsg, bool needReturn);
static void mnodeProcessDropChildTableRsp(SRpcMsg *rpcMsg); static void mnodeProcessDropChildTableRsp(SRpcMsg *rpcMsg);
static int32_t mnodeProcessSuperTableVgroupMsg(SMnodeMsg *mnodeMsg); static int32_t mnodeProcessSuperTableVgroupMsg(SMnodeMsg *mnodeMsg);
@ -319,15 +319,6 @@ static int32_t mnodeChildTableActionRestored() {
continue; continue;
} }
if (pVgroup->tableList == NULL) {
mError("ctable:%s, vgId:%d tableList is null", pTable->info.tableId, pTable->vgId);
pTable->vgId = 0;
SSdbOper desc = {.type = SDB_OPER_LOCAL, .pObj = pTable, .table = tsChildTableSdb};
sdbDeleteRow(&desc);
mnodeDecTableRef(pTable);
continue;
}
if (pTable->info.type == TSDB_CHILD_TABLE) { if (pTable->info.type == TSDB_CHILD_TABLE) {
SSuperTableObj *pSuperTable = mnodeGetSuperTableByUid(pTable->suid); SSuperTableObj *pSuperTable = mnodeGetSuperTableByUid(pTable->suid);
if (pSuperTable == NULL) { if (pSuperTable == NULL) {
@ -385,7 +376,7 @@ static void mnodeCleanupChildTables() {
} }
static void mnodeAddTableIntoStable(SSuperTableObj *pStable, SChildTableObj *pCtable) { static void mnodeAddTableIntoStable(SSuperTableObj *pStable, SChildTableObj *pCtable) {
pStable->numOfTables++; atomic_add_fetch_32(&pStable->numOfTables, 1);
if (pStable->vgHash == NULL) { if (pStable->vgHash == NULL) {
pStable->vgHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false); pStable->vgHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false);
@ -394,18 +385,22 @@ static void mnodeAddTableIntoStable(SSuperTableObj *pStable, SChildTableObj *pCt
if (pStable->vgHash != NULL) { if (pStable->vgHash != NULL) {
if (taosHashGet(pStable->vgHash, &pCtable->vgId, sizeof(pCtable->vgId)) == NULL) { if (taosHashGet(pStable->vgHash, &pCtable->vgId, sizeof(pCtable->vgId)) == NULL) {
taosHashPut(pStable->vgHash, &pCtable->vgId, sizeof(pCtable->vgId), &pCtable->vgId, sizeof(pCtable->vgId)); taosHashPut(pStable->vgHash, &pCtable->vgId, sizeof(pCtable->vgId), &pCtable->vgId, sizeof(pCtable->vgId));
mDebug("table:%s, vgId:%d is put into stable vgList, sizeOfVgList:%d", pStable->info.tableId, pCtable->vgId,
(int32_t)taosHashGetSize(pStable->vgHash));
} }
} }
} }
static void mnodeRemoveTableFromStable(SSuperTableObj *pStable, SChildTableObj *pCtable) { static void mnodeRemoveTableFromStable(SSuperTableObj *pStable, SChildTableObj *pCtable) {
pStable->numOfTables--; atomic_sub_fetch_32(&pStable->numOfTables, 1);
if (pStable->vgHash == NULL) return; if (pStable->vgHash == NULL) return;
SVgObj *pVgroup = mnodeGetVgroup(pCtable->vgId); SVgObj *pVgroup = mnodeGetVgroup(pCtable->vgId);
if (pVgroup == NULL) { if (pVgroup == NULL) {
taosHashRemove(pStable->vgHash, (char *)&pCtable->vgId, sizeof(pCtable->vgId)); taosHashRemove(pStable->vgHash, (char *)&pCtable->vgId, sizeof(pCtable->vgId));
mDebug("table:%s, vgId:%d is remove from stable vgList, sizeOfVgList:%d", pStable->info.tableId, pCtable->vgId,
(int32_t)taosHashGetSize(pStable->vgHash));
} }
mnodeDecVgroupRef(pVgroup); mnodeDecVgroupRef(pVgroup);
} }
@ -757,11 +752,15 @@ static int32_t mnodeProcessDropTableMsg(SMnodeMsg *pMsg) {
} }
if (pMsg->pTable->type == TSDB_SUPER_TABLE) { if (pMsg->pTable->type == TSDB_SUPER_TABLE) {
mInfo("app:%p:%p, table:%s, start to drop stable", pMsg->rpcMsg.ahandle, pMsg, pDrop->tableId); SSuperTableObj *pSTable = (SSuperTableObj *)pMsg->pTable;
mInfo("app:%p:%p, table:%s, start to drop stable, uid:%" PRIu64 ", numOfChildTables:%d, sizeOfVgList:%d",
pMsg->rpcMsg.ahandle, pMsg, pDrop->tableId, pSTable->uid, pSTable->numOfTables, (int32_t)taosHashGetSize(pSTable->vgHash));
return mnodeProcessDropSuperTableMsg(pMsg); return mnodeProcessDropSuperTableMsg(pMsg);
} else { } else {
mInfo("app:%p:%p, table:%s, start to drop ctable", pMsg->rpcMsg.ahandle, pMsg, pDrop->tableId); SChildTableObj *pCTable = (SChildTableObj *)pMsg->pTable;
return mnodeProcessDropChildTableMsg(pMsg); mInfo("app:%p:%p, table:%s, start to drop ctable, vgId:%d sid:%d uid:%" PRIu64, pMsg->rpcMsg.ahandle, pMsg,
pDrop->tableId, pCTable->vgId, pCTable->sid, pCTable->uid);
return mnodeProcessDropChildTableMsg(pMsg, true);
} }
} }
@ -808,7 +807,7 @@ static int32_t mnodeCreateSuperTableCb(SMnodeMsg *pMsg, int32_t code) {
assert(pTable); assert(pTable);
if (code == TSDB_CODE_SUCCESS) { if (code == TSDB_CODE_SUCCESS) {
mLInfo("stable:%s, is created in sdb", pTable->info.tableId); mLInfo("stable:%s, is created in sdb, uid:%" PRIu64, pTable->info.tableId, pTable->uid);
} else { } else {
mError("app:%p:%p, stable:%s, failed to create in sdb, reason:%s", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId, mError("app:%p:%p, stable:%s, failed to create in sdb, reason:%s", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId,
tstrerror(code)); tstrerror(code));
@ -896,7 +895,7 @@ static int32_t mnodeProcessDropSuperTableMsg(SMnodeMsg *pMsg) {
if (pMsg == NULL) return TSDB_CODE_MND_APP_ERROR; if (pMsg == NULL) return TSDB_CODE_MND_APP_ERROR;
SSuperTableObj *pStable = (SSuperTableObj *)pMsg->pTable; SSuperTableObj *pStable = (SSuperTableObj *)pMsg->pTable;
if (pStable->numOfTables != 0) { if (pStable->vgHash != NULL /*pStable->numOfTables != 0*/) {
SHashMutableIterator *pIter = taosHashCreateIter(pStable->vgHash); SHashMutableIterator *pIter = taosHashCreateIter(pStable->vgHash);
while (taosHashIterNext(pIter)) { while (taosHashIterNext(pIter)) {
int32_t *pVgId = taosHashIterGet(pIter); int32_t *pVgId = taosHashIterGet(pIter);
@ -1600,8 +1599,8 @@ static int32_t mnodeDoCreateChildTableCb(SMnodeMsg *pMsg, int32_t code) {
assert(pTable); assert(pTable);
if (code == TSDB_CODE_SUCCESS) { if (code == TSDB_CODE_SUCCESS) {
mDebug("app:%p:%p, table:%s, create table in sid:%d, uid:%" PRIu64, pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId, mDebug("app:%p:%p, table:%s, created in mnode, vgId:%d sid:%d, uid:%" PRIu64 ", result:%s", pMsg->rpcMsg.ahandle,
pTable->sid, pTable->uid); pMsg, pTable->info.tableId, pTable->vgId, pTable->sid, pTable->uid, tstrerror(code));
} else { } else {
mError("app:%p:%p, table:%s, failed to create table sid:%d, uid:%" PRIu64 ", reason:%s", pMsg->rpcMsg.ahandle, pMsg, mError("app:%p:%p, table:%s, failed to create table sid:%d, uid:%" PRIu64 ", reason:%s", pMsg->rpcMsg.ahandle, pMsg,
pTable->info.tableId, pTable->sid, pTable->uid, tstrerror(code)); pTable->info.tableId, pTable->sid, pTable->uid, tstrerror(code));
@ -1730,19 +1729,15 @@ static int32_t mnodeProcessCreateChildTableMsg(SMnodeMsg *pMsg) {
return code; return code;
} }
SVgObj *pVgroup = mnodeGetAvailableVgroup(pMsg->pDb);
if (pVgroup == NULL) {
mDebug("app:%p:%p, table:%s, start to create a new vgroup", pMsg->rpcMsg.ahandle, pMsg, pCreate->tableId);
return mnodeCreateVgroup(pMsg, pMsg->pDb);
}
if (pMsg->retry == 0) { if (pMsg->retry == 0) {
if (pMsg->pTable == NULL) { if (pMsg->pTable == NULL) {
int32_t sid = taosAllocateId(pVgroup->idPool); SVgObj *pVgroup = NULL;
if (sid <= 0) { int32_t sid = 0;
mDebug("app:%p:%p, table:%s, no enough sid in vgId:%d", pMsg->rpcMsg.ahandle, pMsg, pCreate->tableId, code = mnodeGetAvailableVgroup(pMsg, &pVgroup, &sid);
pVgroup->vgId); if (code != TSDB_CODE_SUCCESS) {
return mnodeCreateVgroup(pMsg, pMsg->pDb); mDebug("app:%p:%p, table:%s, failed to get available vgroup, reason:%s", pMsg->rpcMsg.ahandle, pMsg,
pCreate->tableId, tstrerror(code));
return code;
} }
if (pMsg->pVgroup == NULL) { if (pMsg->pVgroup == NULL) {
@ -1750,7 +1745,7 @@ static int32_t mnodeProcessCreateChildTableMsg(SMnodeMsg *pMsg) {
mnodeIncVgroupRef(pVgroup); mnodeIncVgroupRef(pVgroup);
} }
mDebug("app:%p:%p, table:%s, create table in vgroup, vgId:%d sid:%d", pMsg->rpcMsg.ahandle, pMsg, pCreate->tableId, mDebug("app:%p:%p, table:%s, allocated in vgroup, vgId:%d sid:%d", pMsg->rpcMsg.ahandle, pMsg, pCreate->tableId,
pVgroup->vgId, sid); pVgroup->vgId, sid);
return mnodeDoCreateChildTable(pMsg, sid); return mnodeDoCreateChildTable(pMsg, sid);
@ -1769,7 +1764,7 @@ static int32_t mnodeProcessCreateChildTableMsg(SMnodeMsg *pMsg) {
} }
} }
static int32_t mnodeProcessDropChildTableMsg(SMnodeMsg *pMsg) { static int32_t mnodeProcessDropChildTableMsg(SMnodeMsg *pMsg, bool needReturn) {
SChildTableObj *pTable = (SChildTableObj *)pMsg->pTable; SChildTableObj *pTable = (SChildTableObj *)pMsg->pTable;
if (pMsg->pVgroup == NULL) pMsg->pVgroup = mnodeGetVgroup(pTable->vgId); if (pMsg->pVgroup == NULL) pMsg->pVgroup = mnodeGetVgroup(pTable->vgId);
if (pMsg->pVgroup == NULL) { if (pMsg->pVgroup == NULL) {
@ -1793,7 +1788,9 @@ static int32_t mnodeProcessDropChildTableMsg(SMnodeMsg *pMsg) {
SRpcIpSet ipSet = mnodeGetIpSetFromVgroup(pMsg->pVgroup); SRpcIpSet ipSet = mnodeGetIpSetFromVgroup(pMsg->pVgroup);
mInfo("app:%p:%p, table:%s, send drop ctable msg", pMsg->rpcMsg.ahandle, pMsg, pDrop->tableId); mInfo("app:%p:%p, table:%s, send drop ctable msg, vgId:%d sid:%d uid:%" PRIu64, pMsg->rpcMsg.ahandle, pMsg,
pDrop->tableId, pTable->vgId, pTable->sid, pTable->uid);
SRpcMsg rpcMsg = { SRpcMsg rpcMsg = {
.ahandle = pMsg, .ahandle = pMsg,
.pCont = pDrop, .pCont = pDrop,
@ -1802,6 +1799,8 @@ static int32_t mnodeProcessDropChildTableMsg(SMnodeMsg *pMsg) {
.msgType = TSDB_MSG_TYPE_MD_DROP_TABLE .msgType = TSDB_MSG_TYPE_MD_DROP_TABLE
}; };
if (!needReturn) rpcMsg.ahandle = NULL;
dnodeSendMsgToDnode(&ipSet, &rpcMsg); dnodeSendMsgToDnode(&ipSet, &rpcMsg);
return TSDB_CODE_MND_ACTION_IN_PROGRESS; return TSDB_CODE_MND_ACTION_IN_PROGRESS;
@ -2125,7 +2124,7 @@ static void mnodeDropAllChildTablesInStable(SSuperTableObj *pStable) {
int32_t numOfTables = 0; int32_t numOfTables = 0;
SChildTableObj *pTable = NULL; SChildTableObj *pTable = NULL;
mInfo("stable:%s, all child tables(%d) will dropped from sdb", pStable->info.tableId, numOfTables); mInfo("stable:%s, all child tables:%d will dropped from sdb", pStable->info.tableId, pStable->numOfTables);
while (1) { while (1) {
pIter = mnodeGetNextChildTable(pIter, &pTable); pIter = mnodeGetNextChildTable(pIter, &pTable);
@ -2149,6 +2148,7 @@ static void mnodeDropAllChildTablesInStable(SSuperTableObj *pStable) {
mInfo("stable:%s, all child tables:%d is dropped from sdb", pStable->info.tableId, numOfTables); mInfo("stable:%s, all child tables:%d is dropped from sdb", pStable->info.tableId, numOfTables);
} }
#if 0
static SChildTableObj* mnodeGetTableByPos(int32_t vnode, int32_t sid) { static SChildTableObj* mnodeGetTableByPos(int32_t vnode, int32_t sid) {
SVgObj *pVgroup = mnodeGetVgroup(vnode); SVgObj *pVgroup = mnodeGetVgroup(vnode);
if (pVgroup == NULL) return NULL; if (pVgroup == NULL) return NULL;
@ -2159,8 +2159,11 @@ static SChildTableObj* mnodeGetTableByPos(int32_t vnode, int32_t sid) {
mnodeDecVgroupRef(pVgroup); mnodeDecVgroupRef(pVgroup);
return pTable; return pTable;
} }
#endif
static int32_t mnodeProcessTableCfgMsg(SMnodeMsg *pMsg) { static int32_t mnodeProcessTableCfgMsg(SMnodeMsg *pMsg) {
return TSDB_CODE_COM_OPS_NOT_SUPPORT;
#if 0
SDMConfigTableMsg *pCfg = pMsg->rpcMsg.pCont; SDMConfigTableMsg *pCfg = pMsg->rpcMsg.pCont;
pCfg->dnodeId = htonl(pCfg->dnodeId); pCfg->dnodeId = htonl(pCfg->dnodeId);
pCfg->vgId = htonl(pCfg->vgId); pCfg->vgId = htonl(pCfg->vgId);
@ -2184,6 +2187,7 @@ static int32_t mnodeProcessTableCfgMsg(SMnodeMsg *pMsg) {
pMsg->rpcRsp.rsp = pCreate; pMsg->rpcRsp.rsp = pCreate;
pMsg->rpcRsp.len = htonl(pCreate->contLen); pMsg->rpcRsp.len = htonl(pCreate->contLen);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
#endif
} }
// handle drop child response // handle drop child response
@ -2195,12 +2199,15 @@ static void mnodeProcessDropChildTableRsp(SRpcMsg *rpcMsg) {
SChildTableObj *pTable = (SChildTableObj *)mnodeMsg->pTable; SChildTableObj *pTable = (SChildTableObj *)mnodeMsg->pTable;
assert(pTable); assert(pTable);
mInfo("app:%p:%p, table:%s, drop table rsp received, thandle:%p result:%s", mnodeMsg->rpcMsg.ahandle, mnodeMsg,
pTable->info.tableId, mnodeMsg->rpcMsg.handle, tstrerror(rpcMsg->code)); mInfo("app:%p:%p, table:%s, drop table rsp received, vgId:%d sid:%d uid:%" PRIu64 ", thandle:%p result:%s",
mnodeMsg->rpcMsg.ahandle, mnodeMsg, pTable->info.tableId, pTable->vgId, pTable->sid, pTable->uid,
mnodeMsg->rpcMsg.handle, tstrerror(rpcMsg->code));
if (rpcMsg->code != TSDB_CODE_SUCCESS) { if (rpcMsg->code != TSDB_CODE_SUCCESS) {
mError("app:%p:%p, table:%s, failed to drop in dnode, reason:%s", mnodeMsg->rpcMsg.ahandle, mnodeMsg, mError("app:%p:%p, table:%s, failed to drop in dnode, vgId:%d sid:%d uid:%" PRIu64 ", reason:%s",
pTable->info.tableId, tstrerror(rpcMsg->code)); mnodeMsg->rpcMsg.ahandle, mnodeMsg, pTable->info.tableId, pTable->vgId, pTable->sid, pTable->uid,
tstrerror(rpcMsg->code));
dnodeSendRpcMnodeWriteRsp(mnodeMsg, rpcMsg->code); dnodeSendRpcMnodeWriteRsp(mnodeMsg, rpcMsg->code);
return; return;
} }
@ -2247,6 +2254,14 @@ static void mnodeProcessCreateChildTableRsp(SRpcMsg *rpcMsg) {
SChildTableObj *pTable = (SChildTableObj *)mnodeMsg->pTable; SChildTableObj *pTable = (SChildTableObj *)mnodeMsg->pTable;
assert(pTable); assert(pTable);
// If the table is deleted by another thread during creation, stop creating and send drop msg to vnode
if (sdbCheckRowDeleted(tsChildTableSdb, pTable)) {
mDebug("app:%p:%p, table:%s, create table rsp received, but a deleting opertion incoming, vgId:%d sid:%d uid:%" PRIu64,
mnodeMsg->rpcMsg.ahandle, mnodeMsg, pTable->info.tableId, pTable->vgId, pTable->sid, pTable->uid);
mnodeProcessDropChildTableMsg(mnodeMsg, false);
rpcMsg->code = TSDB_CODE_SUCCESS;
}
if (rpcMsg->code == TSDB_CODE_SUCCESS || rpcMsg->code == TSDB_CODE_TDB_TABLE_ALREADY_EXIST) { if (rpcMsg->code == TSDB_CODE_SUCCESS || rpcMsg->code == TSDB_CODE_TDB_TABLE_ALREADY_EXIST) {
SCMCreateTableMsg *pCreate = mnodeMsg->rpcMsg.pCont; SCMCreateTableMsg *pCreate = mnodeMsg->rpcMsg.pCont;
if (pCreate->getMeta) { if (pCreate->getMeta) {

View File

@ -46,6 +46,7 @@ typedef enum {
static void *tsVgroupSdb = NULL; static void *tsVgroupSdb = NULL;
static int32_t tsVgUpdateSize = 0; static int32_t tsVgUpdateSize = 0;
static int32_t mnodeAllocVgroupIdPool(SVgObj *pInputVgroup);
static int32_t mnodeGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn); static int32_t mnodeGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn);
static int32_t mnodeRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, void *pConn); static int32_t mnodeRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, void *pConn);
static void mnodeProcessCreateVnodeRsp(SRpcMsg *rpcMsg); static void mnodeProcessCreateVnodeRsp(SRpcMsg *rpcMsg);
@ -53,17 +54,17 @@ static void mnodeProcessDropVnodeRsp(SRpcMsg *rpcMsg);
static int32_t mnodeProcessVnodeCfgMsg(SMnodeMsg *pMsg) ; static int32_t mnodeProcessVnodeCfgMsg(SMnodeMsg *pMsg) ;
static void mnodeSendDropVgroupMsg(SVgObj *pVgroup, void *ahandle); static void mnodeSendDropVgroupMsg(SVgObj *pVgroup, void *ahandle);
static int32_t mnodeVgroupActionDestroy(SSdbOper *pOper) { static void mnodeDestroyVgroup(SVgObj *pVgroup) {
SVgObj *pVgroup = pOper->pObj;
if (pVgroup->idPool) { if (pVgroup->idPool) {
taosIdPoolCleanUp(pVgroup->idPool); taosIdPoolCleanUp(pVgroup->idPool);
pVgroup->idPool = NULL; pVgroup->idPool = NULL;
} }
if (pVgroup->tableList) {
tfree(pVgroup->tableList);
}
tfree(pOper->pObj); tfree(pVgroup);
}
static int32_t mnodeVgroupActionDestroy(SSdbOper *pOper) {
mnodeDestroyVgroup(pOper->pObj);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
@ -82,21 +83,9 @@ static int32_t mnodeVgroupActionInsert(SSdbOper *pOper) {
} }
pVgroup->pDb = pDb; pVgroup->pDb = pDb;
pVgroup->prev = NULL;
pVgroup->next = NULL;
pVgroup->accessState = TSDB_VN_ALL_ACCCESS; pVgroup->accessState = TSDB_VN_ALL_ACCCESS;
if (mnodeAllocVgroupIdPool(pVgroup) < 0) {
int32_t size = sizeof(SChildTableObj *) * pDb->cfg.maxTables; mError("vgId:%d, failed to init idpool for vgroups", pVgroup->vgId);
pVgroup->tableList = calloc(pDb->cfg.maxTables, sizeof(SChildTableObj *));
if (pVgroup->tableList == NULL) {
mError("vgId:%d, failed to malloc(size:%d) for the tableList of vgroups", pVgroup->vgId, size);
return -1;
}
pVgroup->idPool = taosInitIdPool(pDb->cfg.maxTables);
if (pVgroup->idPool == NULL) {
mError("vgId:%d, failed to taosInitIdPool for vgroups", pVgroup->vgId);
tfree(pVgroup->tableList);
return -1; return -1;
} }
@ -134,20 +123,6 @@ static int32_t mnodeVgroupActionDelete(SSdbOper *pOper) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
static void mnodeVgroupUpdateIdPool(SVgObj *pVgroup) {
int32_t oldTables = taosIdPoolMaxSize(pVgroup->idPool);
SDbObj *pDb = pVgroup->pDb;
if (pDb != NULL) {
if (pDb->cfg.maxTables != oldTables) {
mInfo("vgId:%d tables change from %d to %d", pVgroup->vgId, oldTables, pDb->cfg.maxTables);
taosUpdateIdPool(pVgroup->idPool, pDb->cfg.maxTables);
int32_t size = sizeof(SChildTableObj *) * pDb->cfg.maxTables;
pVgroup->tableList = (SChildTableObj **)realloc(pVgroup->tableList, size);
memset(pVgroup->tableList + oldTables, 0, (pDb->cfg.maxTables - oldTables) * sizeof(SChildTableObj *));
}
}
}
static int32_t mnodeVgroupActionUpdate(SSdbOper *pOper) { static int32_t mnodeVgroupActionUpdate(SSdbOper *pOper) {
SVgObj *pNew = pOper->pObj; SVgObj *pNew = pOper->pObj;
SVgObj *pVgroup = mnodeGetVgroup(pNew->vgId); SVgObj *pVgroup = mnodeGetVgroup(pNew->vgId);
@ -174,7 +149,6 @@ static int32_t mnodeVgroupActionUpdate(SSdbOper *pOper) {
free(pNew); free(pNew);
} }
mnodeVgroupUpdateIdPool(pVgroup);
// reset vgid status on vgroup changed // reset vgid status on vgroup changed
mDebug("vgId:%d, reset sync status to unsynced", pVgroup->vgId); mDebug("vgId:%d, reset sync status to unsynced", pVgroup->vgId);
@ -344,8 +318,132 @@ void mnodeUpdateVgroupStatus(SVgObj *pVgroup, SDnodeObj *pDnode, SVnodeLoad *pVl
} }
} }
SVgObj *mnodeGetAvailableVgroup(SDbObj *pDb) { static int32_t mnodeAllocVgroupIdPool(SVgObj *pInputVgroup) {
return pDb->pHead; SDbObj *pDb = pInputVgroup->pDb;
if (pDb == NULL) return TSDB_CODE_MND_APP_ERROR;
int32_t minIdPoolSize = TSDB_MAX_TABLES;
int32_t maxIdPoolSize = TSDB_MIN_TABLES;
for (int32_t v = 0; v < pDb->numOfVgroups; ++v) {
SVgObj *pVgroup = pDb->vgList[v];
if (pVgroup == NULL) continue;
int32_t idPoolSize = taosIdPoolMaxSize(pVgroup->idPool);
minIdPoolSize = MIN(minIdPoolSize, idPoolSize);
maxIdPoolSize = MAX(maxIdPoolSize, idPoolSize);
}
// new vgroup
if (pInputVgroup->idPool == NULL) {
pInputVgroup->idPool = taosInitIdPool(maxIdPoolSize);
if (pInputVgroup->idPool == NULL) {
mError("vgId:%d, failed to init idPool for vgroup, size:%d", pInputVgroup->vgId, maxIdPoolSize);
return TSDB_CODE_MND_OUT_OF_MEMORY;
} else {
mDebug("vgId:%d, init idPool for vgroup, size:%d", pInputVgroup->vgId, maxIdPoolSize);
return TSDB_CODE_SUCCESS;
}
}
// realloc all vgroups in db
int32_t newIdPoolSize;
if (minIdPoolSize * 4 < TSDB_TABLES_STEP) {
newIdPoolSize = minIdPoolSize * 4;
} else {
newIdPoolSize = ((minIdPoolSize / TSDB_TABLES_STEP) + 1) * TSDB_TABLES_STEP;
}
if (newIdPoolSize > tsMaxTablePerVnode) {
if (minIdPoolSize >= tsMaxTablePerVnode) {
mError("db:%s, minIdPoolSize:%d newIdPoolSize:%d larger than maxTablesPerVnode:%d", pDb->name, minIdPoolSize, newIdPoolSize,
tsMaxTablePerVnode);
return TSDB_CODE_MND_NO_ENOUGH_DNODES;
} else {
newIdPoolSize = tsMaxTablePerVnode;
}
}
for (int32_t v = 0; v < pDb->numOfVgroups; ++v) {
SVgObj *pVgroup = pDb->vgList[v];
if (pVgroup == NULL) continue;
int32_t oldIdPoolSize = taosIdPoolMaxSize(pVgroup->idPool);
if (newIdPoolSize == oldIdPoolSize) continue;
if (taosUpdateIdPool(pVgroup->idPool, newIdPoolSize) < 0) {
mError("vgId:%d, failed to update idPoolSize from %d to %d", pVgroup->vgId, oldIdPoolSize, newIdPoolSize);
return TSDB_CODE_MND_NO_ENOUGH_DNODES;
} else {
mDebug("vgId:%d, idPoolSize update from %d to %d", pVgroup->vgId, oldIdPoolSize, newIdPoolSize);
}
}
return TSDB_CODE_SUCCESS;
}
int32_t mnodeGetAvailableVgroup(SMnodeMsg *pMsg, SVgObj **ppVgroup, int32_t *pSid) {
SDbObj *pDb = pMsg->pDb;
pthread_mutex_lock(&pDb->mutex);
for (int32_t v = 0; v < pDb->numOfVgroups; ++v) {
int vgIndex = (v + pDb->vgListIndex) % pDb->numOfVgroups;
SVgObj *pVgroup = pDb->vgList[vgIndex];
if (pVgroup == NULL) {
mError("db:%s, index:%d vgroup is null", pDb->name, vgIndex);
pthread_mutex_unlock(&pDb->mutex);
return TSDB_CODE_MND_APP_ERROR;
}
int32_t sid = taosAllocateId(pVgroup->idPool);
if (sid <= 0) {
mDebug("app:%p:%p, db:%s, no enough sid in vgId:%d", pMsg->rpcMsg.ahandle, pMsg, pDb->name, pVgroup->vgId);
continue;
}
*pSid = sid;
*ppVgroup = pVgroup;
pDb->vgListIndex = vgIndex;
pthread_mutex_unlock(&pDb->mutex);
return TSDB_CODE_SUCCESS;
}
int maxVgroupsPerDb = tsMaxVgroupsPerDb;
if (maxVgroupsPerDb <= 0) {
maxVgroupsPerDb = mnodeGetOnlinDnodesCpuCoreNum();
maxVgroupsPerDb = MAX(maxVgroupsPerDb, 2);
}
if (pDb->numOfVgroups < maxVgroupsPerDb) {
mDebug("app:%p:%p, db:%s, try to create a new vgroup, numOfVgroups:%d maxVgroupsPerDb:%d", pMsg->rpcMsg.ahandle, pMsg,
pDb->name, pDb->numOfVgroups, maxVgroupsPerDb);
pthread_mutex_unlock(&pDb->mutex);
int32_t code = mnodeCreateVgroup(pMsg);
if (code == TSDB_CODE_MND_ACTION_IN_PROGRESS) return code;
}
SVgObj *pVgroup = pDb->vgList[0];
if (pVgroup == NULL) return TSDB_CODE_MND_NO_ENOUGH_DNODES;
int32_t code = mnodeAllocVgroupIdPool(pVgroup);
if (code != TSDB_CODE_SUCCESS) {
pthread_mutex_unlock(&pDb->mutex);
return code;
}
int32_t sid = taosAllocateId(pVgroup->idPool);
if (sid <= 0) {
mError("app:%p:%p, db:%s, no enough sid in vgId:%d", pMsg->rpcMsg.ahandle, pMsg, pDb->name, pVgroup->vgId);
pthread_mutex_unlock(&pDb->mutex);
return TSDB_CODE_MND_NO_ENOUGH_DNODES;
}
*pSid = sid;
*ppVgroup = pVgroup;
pDb->vgListIndex = 0;
pthread_mutex_unlock(&pDb->mutex);
return TSDB_CODE_SUCCESS;
} }
void *mnodeGetNextVgroup(void *pIter, SVgObj **pVgroup) { void *mnodeGetNextVgroup(void *pIter, SVgObj **pVgroup) {
@ -378,8 +476,9 @@ static int32_t mnodeCreateVgroupCb(SMnodeMsg *pMsg, int32_t code) {
return TSDB_CODE_MND_ACTION_IN_PROGRESS; return TSDB_CODE_MND_ACTION_IN_PROGRESS;
} }
int32_t mnodeCreateVgroup(SMnodeMsg *pMsg, SDbObj *pDb) { int32_t mnodeCreateVgroup(SMnodeMsg *pMsg) {
if (pMsg == NULL) return TSDB_CODE_MND_APP_ERROR; if (pMsg == NULL) return TSDB_CODE_MND_APP_ERROR;
SDbObj *pDb = pMsg->pDb;
SVgObj *pVgroup = (SVgObj *)calloc(1, sizeof(SVgObj)); SVgObj *pVgroup = (SVgObj *)calloc(1, sizeof(SVgObj));
tstrncpy(pVgroup->dbName, pDb->name, TSDB_ACCT_LEN + TSDB_DB_NAME_LEN); tstrncpy(pVgroup->dbName, pDb->name, TSDB_ACCT_LEN + TSDB_DB_NAME_LEN);
@ -407,7 +506,7 @@ int32_t mnodeCreateVgroup(SMnodeMsg *pMsg, SDbObj *pDb) {
int32_t code = sdbInsertRow(&oper); int32_t code = sdbInsertRow(&oper);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
pMsg->pVgroup = NULL; pMsg->pVgroup = NULL;
tfree(pVgroup); mnodeDestroyVgroup(pVgroup);
} else { } else {
code = TSDB_CODE_MND_ACTION_IN_PROGRESS; code = TSDB_CODE_MND_ACTION_IN_PROGRESS;
} }
@ -461,29 +560,27 @@ int32_t mnodeGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) {
pSchema[cols].bytes = htons(pShow->bytes[cols]); pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++; cols++;
int32_t maxReplica = 0; pShow->bytes[cols] = 4;
SVgObj *pVgroup = NULL; pSchema[cols].type = TSDB_DATA_TYPE_INT;
STableObj *pTable = NULL; strcpy(pSchema[cols].name, "poolSize");
if (pShow->payloadLen > 0 ) { pSchema[cols].bytes = htons(pShow->bytes[cols]);
pTable = mnodeGetTable(pShow->payload); cols++;
if (NULL == pTable || pTable->type == TSDB_SUPER_TABLE) {
mnodeDecTableRef(pTable); pShow->bytes[cols] = 4;
return TSDB_CODE_MND_INVALID_TABLE_NAME; pSchema[cols].type = TSDB_DATA_TYPE_INT;
} strcpy(pSchema[cols].name, "maxTables");
mnodeDecTableRef(pTable); pSchema[cols].bytes = htons(pShow->bytes[cols]);
pVgroup = mnodeGetVgroup(((SChildTableObj*)pTable)->vgId); cols++;
if (NULL == pVgroup) return TSDB_CODE_MND_INVALID_TABLE_NAME;
mnodeDecVgroupRef(pVgroup); pShow->maxReplica = 1;
maxReplica = pVgroup->numOfVnodes > maxReplica ? pVgroup->numOfVnodes : maxReplica; for (int32_t v = 0; v < pDb->numOfVgroups; ++v) {
} else { SVgObj *pVgroup = pDb->vgList[v];
SVgObj *pVgroup = pDb->pHead; if (pVgroup != NULL) {
while (pVgroup != NULL) { pShow->maxReplica = pVgroup->numOfVnodes > pShow->maxReplica ? pVgroup->numOfVnodes : pShow->maxReplica;
maxReplica = pVgroup->numOfVnodes > maxReplica ? pVgroup->numOfVnodes : maxReplica;
pVgroup = pVgroup->next;
} }
} }
for (int32_t i = 0; i < maxReplica; ++i) { for (int32_t i = 0; i < pShow->maxReplica; ++i) {
pShow->bytes[cols] = 2; pShow->bytes[cols] = 2;
pSchema[cols].type = TSDB_DATA_TYPE_SMALLINT; pSchema[cols].type = TSDB_DATA_TYPE_SMALLINT;
strcpy(pSchema[cols].name, "dnode"); strcpy(pSchema[cols].name, "dnode");
@ -507,27 +604,33 @@ int32_t mnodeGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) {
pShow->numOfColumns = cols; pShow->numOfColumns = cols;
pShow->offset[0] = 0; pShow->offset[0] = 0;
for (int32_t i = 1; i < cols; ++i) pShow->offset[i] = pShow->offset[i - 1] + pShow->bytes[i - 1]; for (int32_t i = 1; i < cols; ++i) {
pShow->offset[i] = pShow->offset[i - 1] + pShow->bytes[i - 1];
pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1];
if (NULL == pTable) {
pShow->numOfRows = pDb->numOfVgroups;
pShow->pIter = pDb->pHead;
} else {
pShow->numOfRows = 1;
pShow->pIter = pVgroup;
} }
mnodeDecDbRef(pDb); pShow->numOfRows = pDb->numOfVgroups;
pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1];
mnodeDecDbRef(pDb);
return 0; return 0;
} }
static bool mnodeFilterVgroups(SVgObj *pVgroup, STableObj *pTable) {
if (NULL == pTable || pTable->type == TSDB_SUPER_TABLE) {
return true;
}
SChildTableObj *pCTable = (SChildTableObj *)pTable;
if (pVgroup->vgId == pCTable->vgId) {
return true;
} else {
return false;
}
}
int32_t mnodeRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, void *pConn) { int32_t mnodeRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, void *pConn) {
int32_t numOfRows = 0; int32_t numOfRows = 0;
SVgObj *pVgroup = NULL; SVgObj *pVgroup = NULL;
int32_t maxReplica = 0;
int32_t cols = 0; int32_t cols = 0;
char * pWrite; char * pWrite;
@ -539,16 +642,16 @@ int32_t mnodeRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, void *pC
return 0; return 0;
} }
pVgroup = pDb->pHead; STableObj *pTable = NULL;
while (pVgroup != NULL) { if (pShow->payloadLen > 0 ) {
maxReplica = pVgroup->numOfVnodes > maxReplica ? pVgroup->numOfVnodes : maxReplica; pTable = mnodeGetTable(pShow->payload);
pVgroup = pVgroup->next;
} }
while (numOfRows < rows) { while (numOfRows < rows) {
pVgroup = (SVgObj *) pShow->pIter; pShow->pIter = mnodeGetNextVgroup(pShow->pIter, &pVgroup);
if (pVgroup == NULL) break; if (pVgroup == NULL) break;
pShow->pIter = (void *) pVgroup->next; if (pVgroup->pDb != pDb) continue;
if (!mnodeFilterVgroups(pVgroup, pTable)) continue;
cols = 0; cols = 0;
@ -560,7 +663,15 @@ int32_t mnodeRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, void *pC
*(int32_t *) pWrite = pVgroup->numOfTables; *(int32_t *) pWrite = pVgroup->numOfTables;
cols++; cols++;
for (int32_t i = 0; i < maxReplica; ++i) { pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(int32_t *)pWrite = taosIdPoolMaxSize(pVgroup->idPool);
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(int32_t *)pWrite = tsMaxTablePerVnode;
cols++;
for (int32_t i = 0; i < pShow->maxReplica; ++i) {
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(int16_t *) pWrite = pVgroup->vnodeGid[i].dnodeId; *(int16_t *) pWrite = pVgroup->vnodeGid[i].dnodeId;
cols++; cols++;
@ -588,38 +699,36 @@ int32_t mnodeRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, void *pC
} }
} }
mnodeDecVgroupRef(pVgroup);
numOfRows++; numOfRows++;
} }
pShow->numOfReads += numOfRows; pShow->numOfReads += numOfRows;
mnodeDecTableRef(pTable);
mnodeDecDbRef(pDb); mnodeDecDbRef(pDb);
return numOfRows; return numOfRows;
} }
void mnodeAddTableIntoVgroup(SVgObj *pVgroup, SChildTableObj *pTable) { void mnodeAddTableIntoVgroup(SVgObj *pVgroup, SChildTableObj *pTable) {
if (pTable->sid >= 1 && pVgroup->tableList[pTable->sid - 1] == NULL) { int32_t idPoolSize = taosIdPoolMaxSize(pVgroup->idPool);
pVgroup->tableList[pTable->sid - 1] = pTable; if (pTable->sid > idPoolSize) {
mnodeAllocVgroupIdPool(pVgroup);
}
if (pTable->sid >= 1) {
taosIdPoolMarkStatus(pVgroup->idPool, pTable->sid); taosIdPoolMarkStatus(pVgroup->idPool, pTable->sid);
pVgroup->numOfTables++; pVgroup->numOfTables++;
}
if (pVgroup->numOfTables >= pVgroup->pDb->cfg.maxTables) {
mnodeMoveVgroupToTail(pVgroup);
}
mnodeIncVgroupRef(pVgroup); mnodeIncVgroupRef(pVgroup);
}
} }
void mnodeRemoveTableFromVgroup(SVgObj *pVgroup, SChildTableObj *pTable) { void mnodeRemoveTableFromVgroup(SVgObj *pVgroup, SChildTableObj *pTable) {
if (pTable->sid >= 1 && pVgroup->tableList[pTable->sid - 1] != NULL) { if (pTable->sid >= 1) {
pVgroup->tableList[pTable->sid - 1] = NULL;
taosFreeId(pVgroup->idPool, pTable->sid); taosFreeId(pVgroup->idPool, pTable->sid);
pVgroup->numOfTables--; pVgroup->numOfTables--;
}
mnodeMoveVgroupToHead(pVgroup);
mnodeDecVgroupRef(pVgroup); mnodeDecVgroupRef(pVgroup);
}
} }
SMDCreateVnodeMsg *mnodeBuildCreateVnodeMsg(SVgObj *pVgroup) { SMDCreateVnodeMsg *mnodeBuildCreateVnodeMsg(SVgObj *pVgroup) {
@ -630,13 +739,16 @@ SMDCreateVnodeMsg *mnodeBuildCreateVnodeMsg(SVgObj *pVgroup) {
if (pVnode == NULL) return NULL; if (pVnode == NULL) return NULL;
strcpy(pVnode->db, pVgroup->dbName); strcpy(pVnode->db, pVgroup->dbName);
int32_t maxTables = taosIdPoolMaxSize(pVgroup->idPool);
//TODO: dynamic alloc tables in tsdb
maxTables = MAX(10000, tsMaxTablePerVnode);
SMDVnodeCfg *pCfg = &pVnode->cfg; SMDVnodeCfg *pCfg = &pVnode->cfg;
pCfg->vgId = htonl(pVgroup->vgId); pCfg->vgId = htonl(pVgroup->vgId);
pCfg->cfgVersion = htonl(pDb->cfgVersion); pCfg->cfgVersion = htonl(pDb->cfgVersion);
pCfg->cacheBlockSize = htonl(pDb->cfg.cacheBlockSize); pCfg->cacheBlockSize = htonl(pDb->cfg.cacheBlockSize);
pCfg->totalBlocks = htonl(pDb->cfg.totalBlocks); pCfg->totalBlocks = htonl(pDb->cfg.totalBlocks);
pCfg->maxTables = htonl(pDb->cfg.maxTables + 1); pCfg->maxTables = htonl(maxTables + 1);
pCfg->daysPerFile = htonl(pDb->cfg.daysPerFile); pCfg->daysPerFile = htonl(pDb->cfg.daysPerFile);
pCfg->daysToKeep = htonl(pDb->cfg.daysToKeep); pCfg->daysToKeep = htonl(pDb->cfg.daysToKeep);
pCfg->daysToKeep1 = htonl(pDb->cfg.daysToKeep1); pCfg->daysToKeep1 = htonl(pDb->cfg.daysToKeep1);
@ -857,9 +969,10 @@ void mnodeDropAllDnodeVgroups(SDnodeObj *pDropDnode) {
sdbFreeIter(pIter); sdbFreeIter(pIter);
mInfo("dnode:%d, all vgroups is dropped from sdb", pDropDnode->dnodeId); mInfo("dnode:%d, all vgroups:%d is dropped from sdb", pDropDnode->dnodeId, numOfVgroups);
} }
#if 0
void mnodeUpdateAllDbVgroups(SDbObj *pAlterDb) { void mnodeUpdateAllDbVgroups(SDbObj *pAlterDb) {
void * pIter = NULL; void * pIter = NULL;
SVgObj *pVgroup = NULL; SVgObj *pVgroup = NULL;
@ -881,6 +994,7 @@ void mnodeUpdateAllDbVgroups(SDbObj *pAlterDb) {
mInfo("db:%s, all vgroups is updated in sdb", pAlterDb->name); mInfo("db:%s, all vgroups is updated in sdb", pAlterDb->name);
} }
#endif
void mnodeDropAllDbVgroups(SDbObj *pDropDb) { void mnodeDropAllDbVgroups(SDbObj *pDropDb) {
void * pIter = NULL; void * pIter = NULL;

View File

@ -61,6 +61,9 @@
#define HTTP_CHECK_BODY_CONTINUE 0 #define HTTP_CHECK_BODY_CONTINUE 0
#define HTTP_CHECK_BODY_SUCCESS 1 #define HTTP_CHECK_BODY_SUCCESS 1
#define HTTP_READ_DATA_SUCCESS 0
#define HTTP_READ_DATA_FAILED 1
#define HTTP_WRITE_RETRY_TIMES 500 #define HTTP_WRITE_RETRY_TIMES 500
#define HTTP_WRITE_WAIT_TIME_MS 5 #define HTTP_WRITE_WAIT_TIME_MS 5
#define HTTP_EXPIRED_TIME 60000 #define HTTP_EXPIRED_TIME 60000

View File

@ -23,6 +23,6 @@ void httpCleanUpConnect();
void *httpInitServer(char *ip, uint16_t port, char *label, int numOfThreads, void *fp, void *shandle); void *httpInitServer(char *ip, uint16_t port, char *label, int numOfThreads, void *fp, void *shandle);
void httpCleanUpServer(HttpServer *pServer); void httpCleanUpServer(HttpServer *pServer);
bool httpReadDataImp(HttpContext *pContext); int httpReadDataImp(HttpContext *pContext);
#endif #endif

View File

@ -60,6 +60,7 @@ bool httpParseURL(HttpContext* pContext) {
char* pSeek; char* pSeek;
char* pEnd = strchr(pParser->pLast, ' '); char* pEnd = strchr(pParser->pLast, ' ');
if (pEnd == NULL) { if (pEnd == NULL) {
httpSendErrorResp(pContext, HTTP_UNSUPPORT_URL);
return false; return false;
} }
@ -275,14 +276,14 @@ bool httpParseChunkedBody(HttpContext* pContext, HttpParser* pParser, bool test)
return true; return true;
} }
bool httpReadChunkedBody(HttpContext* pContext, HttpParser* pParser) { int httpReadChunkedBody(HttpContext* pContext, HttpParser* pParser) {
bool parsedOk = httpParseChunkedBody(pContext, pParser, true); bool parsedOk = httpParseChunkedBody(pContext, pParser, true);
if (parsedOk) { if (parsedOk) {
httpParseChunkedBody(pContext, pParser, false); httpParseChunkedBody(pContext, pParser, false);
return HTTP_CHECK_BODY_SUCCESS; return HTTP_CHECK_BODY_SUCCESS;
} else { } else {
httpTrace("context:%p, fd:%d, ip:%s, chunked body not finished, continue read", pContext, pContext->fd, pContext->ipstr); httpTrace("context:%p, fd:%d, ip:%s, chunked body not finished, continue read", pContext, pContext->fd, pContext->ipstr);
if (!httpReadDataImp(pContext)) { if (httpReadDataImp(pContext) != HTTP_READ_DATA_SUCCESS) {
httpError("context:%p, fd:%d, ip:%s, read chunked request error", pContext, pContext->fd, pContext->ipstr); httpError("context:%p, fd:%d, ip:%s, read chunked request error", pContext, pContext->fd, pContext->ipstr);
return HTTP_CHECK_BODY_ERROR; return HTTP_CHECK_BODY_ERROR;
} else { } else {
@ -296,7 +297,6 @@ int httpReadUnChunkedBody(HttpContext* pContext, HttpParser* pParser) {
if (dataReadLen > pParser->data.len) { if (dataReadLen > pParser->data.len) {
httpError("context:%p, fd:%d, ip:%s, un-chunked body length invalid, read size:%d dataReadLen:%d > pContext->data.len:%d", httpError("context:%p, fd:%d, ip:%s, un-chunked body length invalid, read size:%d dataReadLen:%d > pContext->data.len:%d",
pContext, pContext->fd, pContext->ipstr, pContext->parser.bufsize, dataReadLen, pParser->data.len); pContext, pContext->fd, pContext->ipstr, pContext->parser.bufsize, dataReadLen, pParser->data.len);
httpSendErrorResp(pContext, HTTP_PARSE_BODY_ERROR);
return HTTP_CHECK_BODY_ERROR; return HTTP_CHECK_BODY_ERROR;
} else if (dataReadLen < pParser->data.len) { } else if (dataReadLen < pParser->data.len) {
httpTrace("context:%p, fd:%d, ip:%s, un-chunked body not finished, read size:%d dataReadLen:%d < pContext->data.len:%d, continue read", httpTrace("context:%p, fd:%d, ip:%s, un-chunked body not finished, read size:%d dataReadLen:%d < pContext->data.len:%d, continue read",
@ -358,20 +358,13 @@ bool httpParseRequest(HttpContext* pContext) {
} }
int httpCheckReadCompleted(HttpContext* pContext) { int httpCheckReadCompleted(HttpContext* pContext) {
HttpParser *pParser = &pContext->parser; HttpParser* pParser = &pContext->parser;
if (pContext->httpChunked == HTTP_UNCUNKED) {
int ret = httpReadUnChunkedBody(pContext, pParser);
if (ret != HTTP_CHECK_BODY_SUCCESS) {
return ret;
}
} else {
int ret = httpReadChunkedBody(pContext, pParser);
if (ret != HTTP_CHECK_BODY_SUCCESS) {
return ret;
}
}
return HTTP_CHECK_BODY_SUCCESS; if (pContext->httpChunked == HTTP_UNCUNKED) {
return httpReadUnChunkedBody(pContext, pParser);
} else {
return httpReadChunkedBody(pContext, pParser);
}
} }
bool httpDecodeRequest(HttpContext* pContext) { bool httpDecodeRequest(HttpContext* pContext) {

View File

@ -69,7 +69,7 @@ void httpCleanUpConnect() {
httpDebug("http server:%s is cleaned up", pServer->label); httpDebug("http server:%s is cleaned up", pServer->label);
} }
bool httpReadDataImp(HttpContext *pContext) { int httpReadDataImp(HttpContext *pContext) {
HttpParser *pParser = &pContext->parser; HttpParser *pParser = &pContext->parser;
while (pParser->bufsize <= (HTTP_BUFFER_SIZE - HTTP_STEP_SIZE)) { while (pParser->bufsize <= (HTTP_BUFFER_SIZE - HTTP_STEP_SIZE)) {
@ -85,8 +85,7 @@ bool httpReadDataImp(HttpContext *pContext) {
} else { } else {
httpError("context:%p, fd:%d, ip:%s, read from socket error:%d, close connect", httpError("context:%p, fd:%d, ip:%s, read from socket error:%d, close connect",
pContext, pContext->fd, pContext->ipstr, errno); pContext, pContext->fd, pContext->ipstr, errno);
httpReleaseContext(pContext); return HTTP_READ_DATA_FAILED;
return false;
} }
} else { } else {
pParser->bufsize += nread; pParser->bufsize += nread;
@ -95,15 +94,13 @@ bool httpReadDataImp(HttpContext *pContext) {
if (pParser->bufsize >= (HTTP_BUFFER_SIZE - HTTP_STEP_SIZE)) { if (pParser->bufsize >= (HTTP_BUFFER_SIZE - HTTP_STEP_SIZE)) {
httpError("context:%p, fd:%d, ip:%s, thread:%s, request big than:%d", httpError("context:%p, fd:%d, ip:%s, thread:%s, request big than:%d",
pContext, pContext->fd, pContext->ipstr, pContext->pThread->label, HTTP_BUFFER_SIZE); pContext, pContext->fd, pContext->ipstr, pContext->pThread->label, HTTP_BUFFER_SIZE);
httpSendErrorResp(pContext, HTTP_REQUSET_TOO_BIG); return HTTP_REQUSET_TOO_BIG;
httpNotifyContextClose(pContext);
return false;
} }
} }
pParser->buffer[pParser->bufsize] = 0; pParser->buffer[pParser->bufsize] = 0;
return true; return HTTP_READ_DATA_SUCCESS;
} }
static bool httpDecompressData(HttpContext *pContext) { static bool httpDecompressData(HttpContext *pContext) {
@ -141,8 +138,14 @@ static bool httpReadData(HttpContext *pContext) {
httpInitContext(pContext); httpInitContext(pContext);
} }
if (!httpReadDataImp(pContext)) { int32_t code = httpReadDataImp(pContext);
if (code != HTTP_READ_DATA_SUCCESS) {
if (code == HTTP_READ_DATA_FAILED) {
httpReleaseContext(pContext);
} else {
httpSendErrorResp(pContext, code);
httpNotifyContextClose(pContext); httpNotifyContextClose(pContext);
}
return false; return false;
} }

View File

@ -233,10 +233,11 @@ void httpProcessSingleSqlRetrieveCallBack(void *param, TAOS_RES *result, int num
} }
} }
void httpProcessSingleSqlCallBack(void *param, TAOS_RES *result, int code) { void httpProcessSingleSqlCallBack(void *param, TAOS_RES *result, int unUsedCode) {
HttpContext *pContext = (HttpContext *)param; HttpContext *pContext = (HttpContext *)param;
if (pContext == NULL) return; if (pContext == NULL) return;
int32_t code = taos_errno(result);
HttpEncodeMethod *encode = pContext->encodeMethod; HttpEncodeMethod *encode = pContext->encodeMethod;
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
@ -260,8 +261,8 @@ void httpProcessSingleSqlCallBack(void *param, TAOS_RES *result, int code) {
return; return;
} }
int num_fields = taos_field_count(result); bool isUpdate = tscIsUpdateQuery(result);
if (num_fields == 0) { if (isUpdate) {
// not select or show commands // not select or show commands
int affectRows = taos_affected_rows(result); int affectRows = taos_affected_rows(result);

View File

@ -192,7 +192,6 @@ typedef struct SQInfo {
int32_t offset; // offset in group result set of subgroup, todo refactor int32_t offset; // offset in group result set of subgroup, todo refactor
SArray* arrTableIdInfo; SArray* arrTableIdInfo;
T_REF_DECLARE()
/* /*
* the query is executed position on which meter of the whole list. * the query is executed position on which meter of the whole list.
* when the index reaches the last one of the list, it means the query is completed. * when the index reaches the last one of the list, it means the query is completed.
@ -201,8 +200,6 @@ typedef struct SQInfo {
*/ */
int32_t tableIndex; int32_t tableIndex;
int32_t numOfGroupResultPages; int32_t numOfGroupResultPages;
_qinfo_free_fn_t freeFn; //todo remove it
void* pBuf; // allocated buffer for STableQueryInfo, sizeof(STableQueryInfo)*numOfTables; void* pBuf; // allocated buffer for STableQueryInfo, sizeof(STableQueryInfo)*numOfTables;
} SQInfo; } SQInfo;

View File

@ -107,7 +107,7 @@ STSBuf* tsBufCreate(bool autoDelete, int32_t order);
STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete); STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete);
STSBuf* tsBufCreateFromCompBlocks(const char* pData, int32_t numOfBlocks, int32_t len, int32_t tsOrder); STSBuf* tsBufCreateFromCompBlocks(const char* pData, int32_t numOfBlocks, int32_t len, int32_t tsOrder);
void* tsBufDestory(STSBuf* pTSBuf); void* tsBufDestroy(STSBuf* pTSBuf);
void tsBufAppend(STSBuf* pTSBuf, int32_t vnodeId, int64_t tag, const char* pData, int32_t len); void tsBufAppend(STSBuf* pTSBuf, int32_t vnodeId, int64_t tag, const char* pData, int32_t len);
int32_t tsBufMerge(STSBuf* pDestBuf, const STSBuf* pSrcBuf, int32_t vnodeIdx); int32_t tsBufMerge(STSBuf* pDestBuf, const STSBuf* pSrcBuf, int32_t vnodeIdx);

View File

@ -44,7 +44,7 @@
#define SET_MASTER_SCAN_FLAG(runtime) ((runtime)->scanFlag = MASTER_SCAN) #define SET_MASTER_SCAN_FLAG(runtime) ((runtime)->scanFlag = MASTER_SCAN)
#define SET_REVERSE_SCAN_FLAG(runtime) ((runtime)->scanFlag = REVERSE_SCAN) #define SET_REVERSE_SCAN_FLAG(runtime) ((runtime)->scanFlag = REVERSE_SCAN)
#define GET_QINFO_ADDR(x) ((void *)((char *)(x)-offsetof(SQInfo, runtimeEnv))) #define GET_QINFO_ADDR(x) ((SQInfo *)((char *)(x)-offsetof(SQInfo, runtimeEnv)))
#define GET_COL_DATA_POS(query, index, step) ((query)->pos + (index) * (step)) #define GET_COL_DATA_POS(query, index, step) ((query)->pos + (index) * (step))
#define SWITCH_ORDER(n) (((n) = ((n) == TSDB_ORDER_ASC) ? TSDB_ORDER_DESC : TSDB_ORDER_ASC)) #define SWITCH_ORDER(n) (((n) = ((n) == TSDB_ORDER_ASC) ? TSDB_ORDER_DESC : TSDB_ORDER_ASC))
@ -120,6 +120,7 @@ static UNUSED_FUNC void* u_calloc(size_t num, size_t __size) {
#define GET_TABLEGROUP(q, _index) ((SArray*) taosArrayGetP((q)->tableqinfoGroupInfo.pGroupList, (_index))) #define GET_TABLEGROUP(q, _index) ((SArray*) taosArrayGetP((q)->tableqinfoGroupInfo.pGroupList, (_index)))
static void setQueryStatus(SQuery *pQuery, int8_t status); static void setQueryStatus(SQuery *pQuery, int8_t status);
static void finalizeQueryResult(SQueryRuntimeEnv *pRuntimeEnv);
#define QUERY_IS_INTERVAL_QUERY(_q) ((_q)->intervalTime > 0) #define QUERY_IS_INTERVAL_QUERY(_q) ((_q)->intervalTime > 0)
@ -351,27 +352,6 @@ static bool hasTagValOutput(SQuery* pQuery) {
return false; return false;
} }
static SDataStatis *getStatisInfo(SQuery *pQuery, SDataStatis *pStatis, int32_t numOfCols, int32_t index) {
// for a tag column, no corresponding field info
SColIndex *pColIndex = &pQuery->pSelectExpr[index].base.colInfo;
if (TSDB_COL_IS_TAG(pColIndex->flag)) {
return NULL;
}
/*
* Choose the right column field info by field id, since the file block may be out of date,
* which means the newest table schema is not equalled to the schema of this block.
* TODO: speedup by using bsearch
*/
for (int32_t i = 0; i < numOfCols; ++i) {
if (pColIndex->colId == pStatis[i].colId) {
return &pStatis[i];
}
}
return NULL;
}
/** /**
* @param pQuery * @param pQuery
* @param col * @param col
@ -380,19 +360,14 @@ static SDataStatis *getStatisInfo(SQuery *pQuery, SDataStatis *pStatis, int32_t
* @param pColStatis * @param pColStatis
* @return * @return
*/ */
static bool hasNullValue(SQuery *pQuery, int32_t col, int32_t numOfCols, SDataStatis *pStatis, SDataStatis **pColStatis) { static bool hasNullValue(SColIndex* pColIndex, SDataStatis *pStatis, SDataStatis **pColStatis) {
SColIndex *pColIndex = &pQuery->pSelectExpr[col].base.colInfo; if (TSDB_COL_IS_TAG(pColIndex->flag) || pColIndex->colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
if (TSDB_COL_IS_TAG(pColIndex->flag)) {
return false;
}
// query on primary timestamp column, not null value at all
if (pColIndex->colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
return false; return false;
} }
if (pStatis != NULL) { if (pStatis != NULL) {
*pColStatis = getStatisInfo(pQuery, pStatis, numOfCols, col); *pColStatis = &pStatis[pColIndex->colIndex];
assert((*pColStatis)->colId == pColIndex->colId);
} else { } else {
*pColStatis = NULL; *pColStatis = NULL;
} }
@ -842,8 +817,8 @@ static char *getDataBlock(SQueryRuntimeEnv *pRuntimeEnv, SArithmeticSupport *sas
if (pDataBlock == NULL) { if (pDataBlock == NULL) {
return NULL; return NULL;
} }
char *dataBlock = NULL;
char *dataBlock = NULL;
SQuery *pQuery = pRuntimeEnv->pQuery; SQuery *pQuery = pRuntimeEnv->pQuery;
SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx; SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx;
@ -864,6 +839,7 @@ static char *getDataBlock(SQueryRuntimeEnv *pRuntimeEnv, SArithmeticSupport *sas
sas->data = calloc(pQuery->numOfCols, POINTER_BYTES); sas->data = calloc(pQuery->numOfCols, POINTER_BYTES);
if (sas->data == NULL) { if (sas->data == NULL) {
finalizeQueryResult(pRuntimeEnv); // clean up allocated resource during query
longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
} }
@ -887,10 +863,14 @@ static char *getDataBlock(SQueryRuntimeEnv *pRuntimeEnv, SArithmeticSupport *sas
} else { // other type of query function } else { // other type of query function
SColIndex *pCol = &pQuery->pSelectExpr[col].base.colInfo; SColIndex *pCol = &pQuery->pSelectExpr[col].base.colInfo;
if (TSDB_COL_IS_TAG(pCol->flag) || pDataBlock == NULL) { if (TSDB_COL_IS_TAG(pCol->flag)) {
dataBlock = NULL; dataBlock = NULL;
} else { } else {
dataBlock = getDataBlockImpl(pDataBlock, pCol->colId); SColIndex* pColIndex = &pQuery->pSelectExpr[col].base.colInfo;
SColumnInfoData *p = taosArrayGet(pDataBlock, pColIndex->colIndex);
assert(p->info.colId == pColIndex->colId);
dataBlock = p->pData;
} }
} }
@ -922,6 +902,7 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *
SArithmeticSupport *sasArray = calloc((size_t)pQuery->numOfOutput, sizeof(SArithmeticSupport)); SArithmeticSupport *sasArray = calloc((size_t)pQuery->numOfOutput, sizeof(SArithmeticSupport));
if (sasArray == NULL) { if (sasArray == NULL) {
finalizeQueryResult(pRuntimeEnv); // clean up allocated resource during query
longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
} }
@ -1168,6 +1149,7 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS
SArithmeticSupport *sasArray = calloc((size_t)pQuery->numOfOutput, sizeof(SArithmeticSupport)); SArithmeticSupport *sasArray = calloc((size_t)pQuery->numOfOutput, sizeof(SArithmeticSupport));
if (sasArray == NULL) { if (sasArray == NULL) {
finalizeQueryResult(pRuntimeEnv); // clean up allocated resource during query
longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
} }
@ -1365,7 +1347,7 @@ void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, void* inputData, TSKEY
int32_t colId = pQuery->pSelectExpr[colIndex].base.colInfo.colId; int32_t colId = pQuery->pSelectExpr[colIndex].base.colInfo.colId;
SDataStatis *tpField = NULL; SDataStatis *tpField = NULL;
pCtx->hasNull = hasNullValue(pQuery, colIndex, pBlockInfo->numOfCols, pStatis, &tpField); pCtx->hasNull = hasNullValue(&pQuery->pSelectExpr[colIndex].base.colInfo, pStatis, &tpField);
pCtx->aInputElemBuf = inputData; pCtx->aInputElemBuf = inputData;
if (tpField != NULL) { if (tpField != NULL) {
@ -1619,22 +1601,21 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) {
tsdbCleanupQueryHandle(pRuntimeEnv->pQueryHandle); tsdbCleanupQueryHandle(pRuntimeEnv->pQueryHandle);
tsdbCleanupQueryHandle(pRuntimeEnv->pSecQueryHandle); tsdbCleanupQueryHandle(pRuntimeEnv->pSecQueryHandle);
pRuntimeEnv->pTSBuf = tsBufDestory(pRuntimeEnv->pTSBuf); pRuntimeEnv->pTSBuf = tsBufDestroy(pRuntimeEnv->pTSBuf);
} }
static bool isQueryKilled(SQInfo *pQInfo) { #define IS_QUERY_KILLED(_q) ((_q)->code == TSDB_CODE_TSC_QUERY_CANCELLED)
return (pQInfo->code == TSDB_CODE_TSC_QUERY_CANCELLED);
}
static void setQueryKilled(SQInfo *pQInfo) { pQInfo->code = TSDB_CODE_TSC_QUERY_CANCELLED; } static void setQueryKilled(SQInfo *pQInfo) { pQInfo->code = TSDB_CODE_TSC_QUERY_CANCELLED; }
static bool isFixedOutputQuery(SQuery *pQuery) { static bool isFixedOutputQuery(SQueryRuntimeEnv* pRuntimeEnv) {
if (pQuery->intervalTime != 0) { SQuery* pQuery = pRuntimeEnv->pQuery;
if (QUERY_IS_INTERVAL_QUERY(pQuery)) {
return false; return false;
} }
// Note:top/bottom query is fixed output query // Note:top/bottom query is fixed output query
if (isTopBottomQuery(pQuery) || isGroupbyNormalCol(pQuery->pGroupbyExpr)) { if (pRuntimeEnv->topBotQuery || pRuntimeEnv->groupbyNormalCol) {
return true; return true;
} }
@ -2066,8 +2047,7 @@ SArray *loadDataBlockOnDemand(SQueryRuntimeEnv *pRuntimeEnv, void* pQueryHandle,
} }
if (r == BLK_DATA_NO_NEEDED) { if (r == BLK_DATA_NO_NEEDED) {
qDebug("QInfo:%p data block discard, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_QINFO_ADDR(pRuntimeEnv), qDebug("QInfo:%p data block discard, rows:%d", GET_QINFO_ADDR(pRuntimeEnv), pBlockInfo->rows);
pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows);
pRuntimeEnv->summary.discardBlocks += 1; pRuntimeEnv->summary.discardBlocks += 1;
} else if (r == BLK_DATA_STATIS_NEEDED) { } else if (r == BLK_DATA_STATIS_NEEDED) {
if (tsdbRetrieveDataBlockStatisInfo(pQueryHandle, pStatis) != TSDB_CODE_SUCCESS) { if (tsdbRetrieveDataBlockStatisInfo(pQueryHandle, pStatis) != TSDB_CODE_SUCCESS) {
@ -2199,7 +2179,7 @@ static void ensureOutputBufferSimple(SQueryRuntimeEnv* pRuntimeEnv, int32_t capa
static void ensureOutputBuffer(SQueryRuntimeEnv* pRuntimeEnv, SDataBlockInfo* pBlockInfo) { static void ensureOutputBuffer(SQueryRuntimeEnv* pRuntimeEnv, SDataBlockInfo* pBlockInfo) {
// in case of prj/diff query, ensure the output buffer is sufficient to accommodate the results of current block // in case of prj/diff query, ensure the output buffer is sufficient to accommodate the results of current block
SQuery* pQuery = pRuntimeEnv->pQuery; SQuery* pQuery = pRuntimeEnv->pQuery;
if (!QUERY_IS_INTERVAL_QUERY(pQuery) && !pRuntimeEnv->groupbyNormalCol && !isFixedOutputQuery(pQuery)) { if (!QUERY_IS_INTERVAL_QUERY(pQuery) && !pRuntimeEnv->groupbyNormalCol && !isFixedOutputQuery(pRuntimeEnv)) {
SResultRec *pRec = &pQuery->rec; SResultRec *pRec = &pQuery->rec;
if (pQuery->rec.capacity - pQuery->rec.rows < pBlockInfo->rows) { if (pQuery->rec.capacity - pQuery->rec.rows < pBlockInfo->rows) {
@ -2249,8 +2229,10 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) {
SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER; SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER;
while (tsdbNextDataBlock(pQueryHandle)) { while (tsdbNextDataBlock(pQueryHandle)) {
summary->totalBlocks += 1; summary->totalBlocks += 1;
if (isQueryKilled(GET_QINFO_ADDR(pRuntimeEnv))) {
return 0; if (IS_QUERY_KILLED(GET_QINFO_ADDR(pRuntimeEnv))) {
finalizeQueryResult(pRuntimeEnv); // clean up allocated resource during query
longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED);
} }
tsdbRetrieveDataBlockInfo(pQueryHandle, &blockInfo); tsdbRetrieveDataBlockInfo(pQueryHandle, &blockInfo);
@ -3304,8 +3286,9 @@ void scanOneTableDataBlocks(SQueryRuntimeEnv *pRuntimeEnv, TSKEY start) {
cond.twindow.skey, cond.twindow.ekey); cond.twindow.skey, cond.twindow.ekey);
// check if query is killed or not // check if query is killed or not
if (isQueryKilled(pQInfo)) { if (IS_QUERY_KILLED(pQInfo)) {
return; finalizeQueryResult(pRuntimeEnv); // clean up allocated resource during query
longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED);
} }
} }
@ -3695,11 +3678,13 @@ void copyFromWindowResToSData(SQInfo *pQInfo, SWindowResInfo *pResultInfo) {
assert(pQuery->rec.rows <= pQuery->rec.capacity); assert(pQuery->rec.rows <= pQuery->rec.capacity);
} }
static void updateWindowResNumOfRes(SQueryRuntimeEnv *pRuntimeEnv, STableQueryInfo *pTableQueryInfo) { static void updateWindowResNumOfRes(SQueryRuntimeEnv *pRuntimeEnv) {
SQuery *pQuery = pRuntimeEnv->pQuery; SQuery *pQuery = pRuntimeEnv->pQuery;
// update the number of result for each, only update the number of rows for the corresponding window result. // update the number of result for each, only update the number of rows for the corresponding window result.
if (!QUERY_IS_INTERVAL_QUERY(pQuery)) { if (QUERY_IS_INTERVAL_QUERY(pQuery)) {
return;
}
for (int32_t i = 0; i < pRuntimeEnv->windowResInfo.size; ++i) { for (int32_t i = 0; i < pRuntimeEnv->windowResInfo.size; ++i) {
SWindowResult *pResult = &pRuntimeEnv->windowResInfo.pResult[i]; SWindowResult *pResult = &pRuntimeEnv->windowResInfo.pResult[i];
@ -3713,7 +3698,6 @@ static void updateWindowResNumOfRes(SQueryRuntimeEnv *pRuntimeEnv, STableQueryIn
pResult->numOfRows = MAX(pResult->numOfRows, pResult->resultInfo[j].numOfRes); pResult->numOfRows = MAX(pResult->numOfRows, pResult->resultInfo[j].numOfRes);
} }
} }
}
} }
void stableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBlockInfo *pDataBlockInfo, SDataStatis *pStatis, void stableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBlockInfo *pDataBlockInfo, SDataStatis *pStatis,
@ -3729,8 +3713,6 @@ void stableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBlockInfo *
} else { } else {
blockwiseApplyFunctions(pRuntimeEnv, pStatis, pDataBlockInfo, pWindowResInfo, searchFn, pDataBlock); blockwiseApplyFunctions(pRuntimeEnv, pStatis, pDataBlockInfo, pWindowResInfo, searchFn, pDataBlock);
} }
updateWindowResNumOfRes(pRuntimeEnv, pTableQueryInfo);
} }
bool queryHasRemainResults(SQueryRuntimeEnv* pRuntimeEnv) { bool queryHasRemainResults(SQueryRuntimeEnv* pRuntimeEnv) {
@ -3950,8 +3932,9 @@ void skipBlocks(SQueryRuntimeEnv *pRuntimeEnv) {
SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER; SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER;
while (tsdbNextDataBlock(pQueryHandle)) { while (tsdbNextDataBlock(pQueryHandle)) {
if (isQueryKilled(GET_QINFO_ADDR(pRuntimeEnv))) { if (IS_QUERY_KILLED(GET_QINFO_ADDR(pRuntimeEnv))) {
return; finalizeQueryResult(pRuntimeEnv); // clean up allocated resource during query
longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED);
} }
tsdbRetrieveDataBlockInfo(pQueryHandle, &blockInfo); tsdbRetrieveDataBlockInfo(pQueryHandle, &blockInfo);
@ -4099,7 +4082,7 @@ static void setupQueryHandle(void* tsdb, SQInfo* pQInfo, bool isSTableQuery) {
return; return;
} }
if (isSTableQuery && (!QUERY_IS_INTERVAL_QUERY(pQuery)) && (!isFixedOutputQuery(pQuery))) { if (isSTableQuery && (!QUERY_IS_INTERVAL_QUERY(pQuery)) && (!isFixedOutputQuery(pRuntimeEnv))) {
return; return;
} }
@ -4115,7 +4098,7 @@ static void setupQueryHandle(void* tsdb, SQInfo* pQInfo, bool isSTableQuery) {
&& (cond.order == TSDB_ORDER_ASC) && (cond.order == TSDB_ORDER_ASC)
&& (!QUERY_IS_INTERVAL_QUERY(pQuery)) && (!QUERY_IS_INTERVAL_QUERY(pQuery))
&& (!isGroupbyNormalCol(pQuery->pGroupbyExpr)) && (!isGroupbyNormalCol(pQuery->pGroupbyExpr))
&& (!isFixedOutputQuery(pQuery)) && (!isFixedOutputQuery(pRuntimeEnv))
) { ) {
SArray* pa = GET_TABLEGROUP(pQInfo, 0); SArray* pa = GET_TABLEGROUP(pQInfo, 0);
STableQueryInfo* pCheckInfo = taosArrayGetP(pa, 0); STableQueryInfo* pCheckInfo = taosArrayGetP(pa, 0);
@ -4158,7 +4141,10 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo
SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv; SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv;
SQuery *pQuery = pQInfo->runtimeEnv.pQuery; SQuery *pQuery = pQInfo->runtimeEnv.pQuery;
pQuery->precision = tsdbGetCfg(tsdb)->precision; pQuery->precision = tsdbGetCfg(tsdb)->precision;
pRuntimeEnv->topBotQuery = isTopBottomQuery(pQuery);
pRuntimeEnv->hasTagResults = hasTagValOutput(pQuery);
setScanLimitationByResultBuffer(pQuery); setScanLimitationByResultBuffer(pQuery);
changeExecuteScanOrder(pQInfo, false); changeExecuteScanOrder(pQInfo, false);
@ -4236,10 +4222,6 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo
pQuery->fillType, pColInfo); pQuery->fillType, pColInfo);
} }
// todo refactor
pRuntimeEnv->topBotQuery = isTopBottomQuery(pQuery);
pRuntimeEnv->hasTagResults = hasTagValOutput(pQuery);
setQueryStatus(pQuery, QUERY_NOT_COMPLETED); setQueryStatus(pQuery, QUERY_NOT_COMPLETED);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
@ -4267,8 +4249,9 @@ static int64_t scanMultiTableDataBlocks(SQInfo *pQInfo) {
while (tsdbNextDataBlock(pQueryHandle)) { while (tsdbNextDataBlock(pQueryHandle)) {
summary->totalBlocks += 1; summary->totalBlocks += 1;
if (isQueryKilled(pQInfo)) { if (IS_QUERY_KILLED(pQInfo)) {
break; finalizeQueryResult(pRuntimeEnv); // clean up allocated resource during query
longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED);
} }
tsdbRetrieveDataBlockInfo(pQueryHandle, &blockInfo); tsdbRetrieveDataBlockInfo(pQueryHandle, &blockInfo);
@ -4304,6 +4287,8 @@ static int64_t scanMultiTableDataBlocks(SQInfo *pQInfo) {
pQInfo, blockInfo.uid, blockInfo.tid, blockInfo.window.skey, blockInfo.window.ekey, blockInfo.rows, pQuery->current->lastKey); pQInfo, blockInfo.uid, blockInfo.tid, blockInfo.window.skey, blockInfo.window.ekey, blockInfo.rows, pQuery->current->lastKey);
} }
updateWindowResNumOfRes(pRuntimeEnv);
int64_t et = taosGetTimestampMs(); int64_t et = taosGetTimestampMs();
return et - st; return et - st;
} }
@ -4316,7 +4301,9 @@ static bool multiTableMultioutputHelper(SQInfo *pQInfo, int32_t index) {
SArray *group = GET_TABLEGROUP(pQInfo, 0); SArray *group = GET_TABLEGROUP(pQInfo, 0);
STableQueryInfo* pCheckInfo = taosArrayGetP(group, index); STableQueryInfo* pCheckInfo = taosArrayGetP(group, index);
if (pRuntimeEnv->hasTagResults || pRuntimeEnv->pTSBuf != NULL) {
setTagVal(pRuntimeEnv, pCheckInfo->pTable, pQInfo->tsdb); setTagVal(pRuntimeEnv, pCheckInfo->pTable, pQInfo->tsdb);
}
STableId* id = TSDB_TABLEID(pCheckInfo->pTable); STableId* id = TSDB_TABLEID(pCheckInfo->pTable);
qDebug("QInfo:%p query on (%d): uid:%" PRIu64 ", tid:%d, qrange:%" PRId64 "-%" PRId64, pQInfo, index, qDebug("QInfo:%p query on (%d): uid:%" PRIu64 ", tid:%d, qrange:%" PRId64 "-%" PRId64, pQInfo, index,
@ -4549,8 +4536,9 @@ static void sequentialTableProcess(SQInfo *pQInfo) {
1 == taosArrayGetSize(pQInfo->tableqinfoGroupInfo.pGroupList)); 1 == taosArrayGetSize(pQInfo->tableqinfoGroupInfo.pGroupList));
while (pQInfo->tableIndex < pQInfo->tableqinfoGroupInfo.numOfTables) { while (pQInfo->tableIndex < pQInfo->tableqinfoGroupInfo.numOfTables) {
if (isQueryKilled(pQInfo)) { if (IS_QUERY_KILLED(pQInfo)) {
return; finalizeQueryResult(pRuntimeEnv); // clean up allocated resource during query
longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED);
} }
pQuery->current = taosArrayGetP(group, pQInfo->tableIndex); pQuery->current = taosArrayGetP(group, pQInfo->tableIndex);
@ -4744,9 +4732,10 @@ static void multiTableQueryProcess(SQInfo *pQInfo) {
qDebug("QInfo:%p master scan completed, elapsed time: %" PRId64 "ms, reverse scan start", pQInfo, el); qDebug("QInfo:%p master scan completed, elapsed time: %" PRId64 "ms, reverse scan start", pQInfo, el);
// query error occurred or query is killed, abort current execution // query error occurred or query is killed, abort current execution
if (pQInfo->code != TSDB_CODE_SUCCESS || isQueryKilled(pQInfo)) { if (pQInfo->code != TSDB_CODE_SUCCESS || IS_QUERY_KILLED(pQInfo)) {
qDebug("QInfo:%p query killed or error occurred, code:%s, abort", pQInfo, tstrerror(pQInfo->code)); qDebug("QInfo:%p query killed or error occurred, code:%s, abort", pQInfo, tstrerror(pQInfo->code));
return; finalizeQueryResult(pRuntimeEnv); // clean up allocated resource during query
longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED);
} }
// close all time window results // close all time window results
@ -4766,9 +4755,10 @@ static void multiTableQueryProcess(SQInfo *pQInfo) {
setQueryStatus(pQuery, QUERY_COMPLETED); setQueryStatus(pQuery, QUERY_COMPLETED);
if (pQInfo->code != TSDB_CODE_SUCCESS || isQueryKilled(pQInfo)) { if (pQInfo->code != TSDB_CODE_SUCCESS || IS_QUERY_KILLED(pQInfo)) {
qDebug("QInfo:%p query killed or error occurred, code:%s, abort", pQInfo, tstrerror(pQInfo->code)); qDebug("QInfo:%p query killed or error occurred, code:%s, abort", pQInfo, tstrerror(pQInfo->code));
return; finalizeQueryResult(pRuntimeEnv); // clean up allocated resource during query
longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED);
} }
if (QUERY_IS_INTERVAL_QUERY(pQuery) || isSumAvgRateQuery(pQuery)) { if (QUERY_IS_INTERVAL_QUERY(pQuery) || isSumAvgRateQuery(pQuery)) {
@ -4806,8 +4796,9 @@ static void tableFixedOutputProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo)
scanOneTableDataBlocks(pRuntimeEnv, pTableInfo->lastKey); scanOneTableDataBlocks(pRuntimeEnv, pTableInfo->lastKey);
finalizeQueryResult(pRuntimeEnv); finalizeQueryResult(pRuntimeEnv);
if (isQueryKilled(pQInfo)) { if (IS_QUERY_KILLED(pQInfo)) {
return; finalizeQueryResult(pRuntimeEnv); // clean up allocated resource during query
longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED);
} }
// since the numOfRows must be identical for all sql functions that are allowed to be executed simutaneously. // since the numOfRows must be identical for all sql functions that are allowed to be executed simutaneously.
@ -4839,10 +4830,6 @@ static void tableMultiOutputProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo)
scanOneTableDataBlocks(pRuntimeEnv, pQuery->current->lastKey); scanOneTableDataBlocks(pRuntimeEnv, pQuery->current->lastKey);
finalizeQueryResult(pRuntimeEnv); finalizeQueryResult(pRuntimeEnv);
if (isQueryKilled(pQInfo)) {
return;
}
pQuery->rec.rows = getNumOfResult(pRuntimeEnv); pQuery->rec.rows = getNumOfResult(pRuntimeEnv);
if (pQuery->limit.offset > 0 && pQuery->numOfFilterCols > 0 && pQuery->rec.rows > 0) { if (pQuery->limit.offset > 0 && pQuery->numOfFilterCols > 0 && pQuery->rec.rows > 0) {
skipResults(pRuntimeEnv); skipResults(pRuntimeEnv);
@ -4887,10 +4874,6 @@ static void tableIntervalProcessImpl(SQueryRuntimeEnv *pRuntimeEnv, TSKEY start)
while (1) { while (1) {
scanOneTableDataBlocks(pRuntimeEnv, start); scanOneTableDataBlocks(pRuntimeEnv, start);
if (isQueryKilled(GET_QINFO_ADDR(pRuntimeEnv))) {
return;
}
assert(!Q_STATUS_EQUAL(pQuery->status, QUERY_NOT_COMPLETED)); assert(!Q_STATUS_EQUAL(pQuery->status, QUERY_NOT_COMPLETED));
finalizeQueryResult(pRuntimeEnv); finalizeQueryResult(pRuntimeEnv);
@ -5024,7 +5007,7 @@ static void tableQueryImpl(SQInfo *pQInfo) {
// group by normal column, sliding window query, interval query are handled by interval query processor // group by normal column, sliding window query, interval query are handled by interval query processor
if (QUERY_IS_INTERVAL_QUERY(pQuery) || pRuntimeEnv->groupbyNormalCol) { // interval (down sampling operation) if (QUERY_IS_INTERVAL_QUERY(pQuery) || pRuntimeEnv->groupbyNormalCol) { // interval (down sampling operation)
tableIntervalProcess(pQInfo, item); tableIntervalProcess(pQInfo, item);
} else if (isFixedOutputQuery(pQuery)) { } else if (isFixedOutputQuery(pRuntimeEnv)) {
tableFixedOutputProcess(pQInfo, item); tableFixedOutputProcess(pQInfo, item);
} else { // diff/add/multiply/subtract/division } else { // diff/add/multiply/subtract/division
assert(pQuery->checkBuffer == 1); assert(pQuery->checkBuffer == 1);
@ -5044,7 +5027,7 @@ static void stableQueryImpl(SQInfo *pQInfo) {
int64_t st = taosGetTimestampUs(); int64_t st = taosGetTimestampUs();
if (QUERY_IS_INTERVAL_QUERY(pQuery) || if (QUERY_IS_INTERVAL_QUERY(pQuery) ||
(isFixedOutputQuery(pQuery) && (!isPointInterpoQuery(pQuery)) && !pRuntimeEnv->groupbyNormalCol && (isFixedOutputQuery(pRuntimeEnv) && (!isPointInterpoQuery(pQuery)) && !pRuntimeEnv->groupbyNormalCol &&
!isFirstLastRowQuery(pQuery))) { !isFirstLastRowQuery(pQuery))) {
multiTableQueryProcess(pQInfo); multiTableQueryProcess(pQInfo);
} else { } else {
@ -5811,7 +5794,7 @@ static bool isValidQInfo(void *param) {
return (sig == (uint64_t)pQInfo); return (sig == (uint64_t)pQInfo);
} }
static int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQInfo *pQInfo, bool isSTable, void* param, _qinfo_free_fn_t fn) { static int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQInfo *pQInfo, bool isSTable, void* param) {
int32_t code = TSDB_CODE_SUCCESS; int32_t code = TSDB_CODE_SUCCESS;
SQuery *pQuery = pQInfo->runtimeEnv.pQuery; SQuery *pQuery = pQInfo->runtimeEnv.pQuery;
@ -5836,7 +5819,6 @@ static int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQ
} }
pQInfo->param = param; pQInfo->param = param;
pQInfo->freeFn = fn;
if (pQInfo->tableqinfoGroupInfo.numOfTables == 0) { if (pQInfo->tableqinfoGroupInfo.numOfTables == 0) {
qDebug("QInfo:%p no table qualified for tag filter, abort query", pQInfo); qDebug("QInfo:%p no table qualified for tag filter, abort query", pQInfo);
@ -6038,8 +6020,7 @@ typedef struct SQueryMgmt {
pthread_mutex_t lock; pthread_mutex_t lock;
} SQueryMgmt; } SQueryMgmt;
int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, void* param, _qinfo_free_fn_t fn, int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, void* param, qinfo_t* pQInfo) {
qinfo_t* pQInfo) {
assert(pQueryMsg != NULL && tsdb != NULL); assert(pQueryMsg != NULL && tsdb != NULL);
int32_t code = TSDB_CODE_SUCCESS; int32_t code = TSDB_CODE_SUCCESS;
@ -6129,7 +6110,7 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, vo
goto _over; goto _over;
} }
code = initQInfo(pQueryMsg, tsdb, vgId, *pQInfo, isSTableQuery, param, fn); code = initQInfo(pQueryMsg, tsdb, vgId, *pQInfo, isSTableQuery, param);
_over: _over:
free(tagCond); free(tagCond);
@ -6153,43 +6134,25 @@ _over:
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
*pQInfo = NULL; *pQInfo = NULL;
} else { } else {
SQInfo* pq = (SQInfo*) (*pQInfo); // SQInfo* pq = (SQInfo*) (*pQInfo);
T_REF_INC(pq); // T_REF_INC(pq);
T_REF_INC(pq); // T_REF_INC(pq);
} }
// if failed to add ref for all meters in this query, abort current query // if failed to add ref for all meters in this query, abort current query
return code; return code;
} }
static void doDestoryQueryInfo(SQInfo* pQInfo) {
assert(pQInfo != NULL);
qDebug("QInfo:%p query completed", pQInfo);
queryCostStatis(pQInfo); // print the query cost summary
freeQInfo(pQInfo);
}
void qDestroyQueryInfo(qinfo_t qHandle) { void qDestroyQueryInfo(qinfo_t qHandle) {
SQInfo* pQInfo = (SQInfo*) qHandle; SQInfo* pQInfo = (SQInfo*) qHandle;
if (!isValidQInfo(pQInfo)) { if (!isValidQInfo(pQInfo)) {
return; return;
} }
int32_t ref = T_REF_DEC(pQInfo); qDebug("QInfo:%p query completed", pQInfo);
qDebug("QInfo:%p dec refCount, value:%d", pQInfo, ref); queryCostStatis(pQInfo); // print the query cost summary
freeQInfo(pQInfo);
if (ref == 0) {
_qinfo_free_fn_t freeFp = pQInfo->freeFn;
void* param = pQInfo->param;
doDestoryQueryInfo(pQInfo);
if (freeFp != NULL) {
assert(param != NULL);
freeFp(param);
}
}
} }
void qTableQuery(qinfo_t qinfo) { void qTableQuery(qinfo_t qinfo) {
@ -6200,31 +6163,24 @@ void qTableQuery(qinfo_t qinfo) {
return; return;
} }
if (isQueryKilled(pQInfo)) { if (IS_QUERY_KILLED(pQInfo)) {
qDebug("QInfo:%p it is already killed, abort", pQInfo); qDebug("QInfo:%p it is already killed, abort", pQInfo);
sem_post(&pQInfo->dataReady); sem_post(&pQInfo->dataReady);
qDestroyQueryInfo(pQInfo);
return; return;
} }
if (pQInfo->tableqinfoGroupInfo.numOfTables == 0) { if (pQInfo->tableqinfoGroupInfo.numOfTables == 0) {
qDebug("QInfo:%p no table exists for query, abort", pQInfo); qDebug("QInfo:%p no table exists for query, abort", pQInfo);
sem_post(&pQInfo->dataReady); sem_post(&pQInfo->dataReady);
qDestroyQueryInfo(pQInfo);
return; return;
} }
int32_t ret = setjmp(pQInfo->runtimeEnv.env);
// error occurs, record the error code and return to client // error occurs, record the error code and return to client
int32_t ret = setjmp(pQInfo->runtimeEnv.env);
if (ret != TSDB_CODE_SUCCESS) { if (ret != TSDB_CODE_SUCCESS) {
pQInfo->code = ret; pQInfo->code = ret;
qDebug("QInfo:%p query abort due to error occurs, code:%s", pQInfo, tstrerror(pQInfo->code)); qDebug("QInfo:%p query abort due to error/cancel occurs, code:%s", pQInfo, tstrerror(pQInfo->code));
sem_post(&pQInfo->dataReady); sem_post(&pQInfo->dataReady);
qDestroyQueryInfo(pQInfo);
return; return;
} }
@ -6241,7 +6197,7 @@ void qTableQuery(qinfo_t qinfo) {
} }
SQuery* pQuery = pRuntimeEnv->pQuery; SQuery* pQuery = pRuntimeEnv->pQuery;
if (isQueryKilled(pQInfo)) { if (IS_QUERY_KILLED(pQInfo)) {
qDebug("QInfo:%p query is killed", pQInfo); qDebug("QInfo:%p query is killed", pQInfo);
} else if (pQuery->rec.rows == 0) { } else if (pQuery->rec.rows == 0) {
qDebug("QInfo:%p over, %zu tables queried, %"PRId64" rows are returned", pQInfo, pQInfo->tableqinfoGroupInfo.numOfTables, pQuery->rec.total); qDebug("QInfo:%p over, %zu tables queried, %"PRId64" rows are returned", pQInfo, pQInfo->tableqinfoGroupInfo.numOfTables, pQuery->rec.total);
@ -6251,7 +6207,6 @@ void qTableQuery(qinfo_t qinfo) {
} }
sem_post(&pQInfo->dataReady); sem_post(&pQInfo->dataReady);
qDestroyQueryInfo(pQInfo);
} }
int32_t qRetrieveQueryResultInfo(qinfo_t qinfo) { int32_t qRetrieveQueryResultInfo(qinfo_t qinfo) {
@ -6262,7 +6217,7 @@ int32_t qRetrieveQueryResultInfo(qinfo_t qinfo) {
} }
SQuery *pQuery = pQInfo->runtimeEnv.pQuery; SQuery *pQuery = pQInfo->runtimeEnv.pQuery;
if (isQueryKilled(pQInfo)) { if (IS_QUERY_KILLED(pQInfo)) {
qDebug("QInfo:%p query is killed, code:%d", pQInfo, pQInfo->code); qDebug("QInfo:%p query is killed, code:%d", pQInfo, pQInfo->code);
return pQInfo->code; return pQInfo->code;
} }
@ -6295,7 +6250,7 @@ bool qHasMoreResultsToRetrieve(qinfo_t qinfo) {
} }
if (ret) { if (ret) {
T_REF_INC(pQInfo); // T_REF_INC(pQInfo);
qDebug("QInfo:%p has more results waits for client retrieve", pQInfo); qDebug("QInfo:%p has more results waits for client retrieve", pQInfo);
} }
@ -6337,7 +6292,7 @@ int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *co
code = pQInfo->code; code = pQInfo->code;
} }
if (isQueryKilled(pQInfo) || Q_STATUS_EQUAL(pQuery->status, QUERY_OVER)) { if (IS_QUERY_KILLED(pQInfo) || Q_STATUS_EQUAL(pQuery->status, QUERY_OVER)) {
(*pRsp)->completed = 1; // notify no more result to client (*pRsp)->completed = 1; // notify no more result to client
} }
@ -6352,7 +6307,6 @@ int32_t qKillQuery(qinfo_t qinfo) {
} }
setQueryKilled(pQInfo); setQueryKilled(pQInfo);
qDestroyQueryInfo(pQInfo);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
@ -6497,6 +6451,7 @@ void freeqinfoFn(void *qhandle) {
} }
qKillQuery(*handle); qKillQuery(*handle);
qDestroyQueryInfo(*handle);
} }
void* qOpenQueryMgmt(int32_t vgId) { void* qOpenQueryMgmt(int32_t vgId) {
@ -6515,7 +6470,11 @@ void* qOpenQueryMgmt(int32_t vgId) {
return pQueryHandle; return pQueryHandle;
} }
void qSetQueryMgmtClosed(void* pQMgmt) { static void queryMgmtKillQueryFn(void* handle) {
qKillQuery(handle);
}
void qQueryMgmtNotifyClosed(void* pQMgmt) {
if (pQMgmt == NULL) { if (pQMgmt == NULL) {
return; return;
} }
@ -6527,7 +6486,7 @@ void qSetQueryMgmtClosed(void* pQMgmt) {
pQueryMgmt->closed = true; pQueryMgmt->closed = true;
pthread_mutex_unlock(&pQueryMgmt->lock); pthread_mutex_unlock(&pQueryMgmt->lock);
taosCacheRefresh(pQueryMgmt->qinfoPool, freeqinfoFn); taosCacheRefresh(pQueryMgmt->qinfoPool, queryMgmtKillQueryFn);
} }
void qCleanupQueryMgmt(void* pQMgmt) { void qCleanupQueryMgmt(void* pQMgmt) {

View File

@ -509,9 +509,10 @@ uint32_t tSQLGetToken(char* z, uint32_t* tokenType) {
for (i = 1; isdigit(z[i]); i++) { for (i = 1; isdigit(z[i]); i++) {
} }
/* here is the 1a/2s/3m/9y */ /* here is the 1u/1a/2s/3m/9y */
if ((z[i] == 'a' || z[i] == 's' || z[i] == 'm' || z[i] == 'h' || z[i] == 'd' || z[i] == 'n' || z[i] == 'y' || if ((z[i] == 'u' || z[i] == 'a' || z[i] == 's' || z[i] == 'm' || z[i] == 'h' || z[i] == 'd' || z[i] == 'n' ||
z[i] == 'w' || z[i] == 'A' || z[i] == 'S' || z[i] == 'M' || z[i] == 'H' || z[i] == 'D' || z[i] == 'N' || z[i] == 'y' || z[i] == 'w' ||
z[i] == 'U' || z[i] == 'A' || z[i] == 'S' || z[i] == 'M' || z[i] == 'H' || z[i] == 'D' || z[i] == 'N' ||
z[i] == 'Y' || z[i] == 'W') && z[i] == 'Y' || z[i] == 'W') &&
(isIdChar[(uint8_t)z[i + 1]] == 0)) { (isIdChar[(uint8_t)z[i + 1]] == 0)) {
*tokenType = TK_VARIABLE; *tokenType = TK_VARIABLE;

View File

@ -79,7 +79,7 @@ STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete) {
pTSBuf->numOfAlloc = header.numOfVnode; pTSBuf->numOfAlloc = header.numOfVnode;
STSVnodeBlockInfoEx* tmp = realloc(pTSBuf->pData, sizeof(STSVnodeBlockInfoEx) * pTSBuf->numOfAlloc); STSVnodeBlockInfoEx* tmp = realloc(pTSBuf->pData, sizeof(STSVnodeBlockInfoEx) * pTSBuf->numOfAlloc);
if (tmp == NULL) { if (tmp == NULL) {
tsBufDestory(pTSBuf); tsBufDestroy(pTSBuf);
return NULL; return NULL;
} }
@ -92,7 +92,7 @@ STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete) {
pTSBuf->tsOrder = header.tsOrder; pTSBuf->tsOrder = header.tsOrder;
if (pTSBuf->tsOrder != TSDB_ORDER_ASC && pTSBuf->tsOrder != TSDB_ORDER_DESC) { if (pTSBuf->tsOrder != TSDB_ORDER_ASC && pTSBuf->tsOrder != TSDB_ORDER_DESC) {
// tscError("invalid order info in buf:%d", pTSBuf->tsOrder); // tscError("invalid order info in buf:%d", pTSBuf->tsOrder);
tsBufDestory(pTSBuf); tsBufDestroy(pTSBuf);
return NULL; return NULL;
} }
@ -100,7 +100,7 @@ STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete) {
STSVnodeBlockInfo* buf = (STSVnodeBlockInfo*)calloc(1, infoSize); STSVnodeBlockInfo* buf = (STSVnodeBlockInfo*)calloc(1, infoSize);
if (buf == NULL) { if (buf == NULL) {
tsBufDestory(pTSBuf); tsBufDestroy(pTSBuf);
return NULL; return NULL;
} }
@ -120,7 +120,7 @@ STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete) {
struct stat fileStat; struct stat fileStat;
if (fstat(fileno(pTSBuf->f), &fileStat) != 0) { if (fstat(fileno(pTSBuf->f), &fileStat) != 0) {
tsBufDestory(pTSBuf); tsBufDestroy(pTSBuf);
return NULL; return NULL;
} }
@ -137,7 +137,7 @@ STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete) {
return pTSBuf; return pTSBuf;
} }
void* tsBufDestory(STSBuf* pTSBuf) { void* tsBufDestroy(STSBuf* pTSBuf) {
if (pTSBuf == NULL) { if (pTSBuf == NULL) {
return NULL; return NULL;
} }
@ -920,13 +920,13 @@ static STSBuf* allocResForTSBuf(STSBuf* pTSBuf) {
pTSBuf->numOfAlloc = INITIAL_VNODEINFO_SIZE; pTSBuf->numOfAlloc = INITIAL_VNODEINFO_SIZE;
pTSBuf->pData = calloc(pTSBuf->numOfAlloc, sizeof(STSVnodeBlockInfoEx)); pTSBuf->pData = calloc(pTSBuf->numOfAlloc, sizeof(STSVnodeBlockInfoEx));
if (pTSBuf->pData == NULL) { if (pTSBuf->pData == NULL) {
tsBufDestory(pTSBuf); tsBufDestroy(pTSBuf);
return NULL; return NULL;
} }
pTSBuf->tsData.rawBuf = malloc(MEM_BUF_SIZE); pTSBuf->tsData.rawBuf = malloc(MEM_BUF_SIZE);
if (pTSBuf->tsData.rawBuf == NULL) { if (pTSBuf->tsData.rawBuf == NULL) {
tsBufDestory(pTSBuf); tsBufDestroy(pTSBuf);
return NULL; return NULL;
} }
@ -936,13 +936,13 @@ static STSBuf* allocResForTSBuf(STSBuf* pTSBuf) {
pTSBuf->assistBuf = malloc(MEM_BUF_SIZE); pTSBuf->assistBuf = malloc(MEM_BUF_SIZE);
if (pTSBuf->assistBuf == NULL) { if (pTSBuf->assistBuf == NULL) {
tsBufDestory(pTSBuf); tsBufDestroy(pTSBuf);
return NULL; return NULL;
} }
pTSBuf->block.payload = malloc(MEM_BUF_SIZE); pTSBuf->block.payload = malloc(MEM_BUF_SIZE);
if (pTSBuf->block.payload == NULL) { if (pTSBuf->block.payload == NULL) {
tsBufDestory(pTSBuf); tsBufDestroy(pTSBuf);
return NULL; return NULL;
} }

View File

@ -47,7 +47,7 @@ void simpleTest() {
EXPECT_EQ(pTSBuf->tsData.len, 0); EXPECT_EQ(pTSBuf->tsData.len, 0);
EXPECT_EQ(pTSBuf->block.numOfElem, num); EXPECT_EQ(pTSBuf->block.numOfElem, num);
tsBufDestory(pTSBuf); tsBufDestroy(pTSBuf);
} }
// one large list of ts, the ts list need to be split into several small blocks // one large list of ts, the ts list need to be split into several small blocks
@ -71,7 +71,7 @@ void largeTSTest() {
EXPECT_EQ(pTSBuf->tsData.len, 0); EXPECT_EQ(pTSBuf->tsData.len, 0);
EXPECT_EQ(pTSBuf->block.numOfElem, num); EXPECT_EQ(pTSBuf->block.numOfElem, num);
tsBufDestory(pTSBuf); tsBufDestroy(pTSBuf);
} }
void multiTagsTest() { void multiTagsTest() {
@ -101,7 +101,7 @@ void multiTagsTest() {
EXPECT_EQ(pTSBuf->tsData.len, 0); EXPECT_EQ(pTSBuf->tsData.len, 0);
EXPECT_EQ(pTSBuf->block.numOfElem, num); EXPECT_EQ(pTSBuf->block.numOfElem, num);
tsBufDestory(pTSBuf); tsBufDestroy(pTSBuf);
} }
void multiVnodeTagsTest() { void multiVnodeTagsTest() {
@ -139,7 +139,7 @@ void multiVnodeTagsTest() {
EXPECT_EQ(pTSBuf->tsData.len, 0); EXPECT_EQ(pTSBuf->tsData.len, 0);
EXPECT_EQ(pTSBuf->block.numOfElem, num); EXPECT_EQ(pTSBuf->block.numOfElem, num);
tsBufDestory(pTSBuf); tsBufDestroy(pTSBuf);
} }
void loadDataTest() { void loadDataTest() {
@ -386,8 +386,8 @@ void mergeDiffVnodeBufferTest() {
tsBufDisplay(pTSBuf1); tsBufDisplay(pTSBuf1);
tsBufDestory(pTSBuf2); tsBufDestroy(pTSBuf2);
tsBufDestory(pTSBuf1); tsBufDestroy(pTSBuf1);
} }
void mergeIdenticalVnodeBufferTest() { void mergeIdenticalVnodeBufferTest() {
@ -432,8 +432,8 @@ void mergeIdenticalVnodeBufferTest() {
printf("%d-%" PRIu64 "-%" PRIu64 "\n", elem.vnode, elem.tag, elem.ts); printf("%d-%" PRIu64 "-%" PRIu64 "\n", elem.vnode, elem.tag, elem.ts);
} }
tsBufDestory(pTSBuf1); tsBufDestroy(pTSBuf1);
tsBufDestory(pTSBuf2); tsBufDestroy(pTSBuf2);
} }
} // namespace } // namespace

View File

@ -111,7 +111,8 @@ int tsdbInsertRowToMem(STsdbRepo *pRepo, SDataRow row, STable *pTable) {
int tsdbRefMemTable(STsdbRepo *pRepo, SMemTable *pMemTable) { int tsdbRefMemTable(STsdbRepo *pRepo, SMemTable *pMemTable) {
if (pMemTable == NULL) return 0; if (pMemTable == NULL) return 0;
T_REF_INC(pMemTable); int ref = T_REF_INC(pMemTable);
tsdbDebug("vgId:%d ref memtable %p ref %d", REPO_ID(pRepo), pMemTable, ref);
return 0; return 0;
} }
@ -119,7 +120,9 @@ int tsdbRefMemTable(STsdbRepo *pRepo, SMemTable *pMemTable) {
int tsdbUnRefMemTable(STsdbRepo *pRepo, SMemTable *pMemTable) { int tsdbUnRefMemTable(STsdbRepo *pRepo, SMemTable *pMemTable) {
if (pMemTable == NULL) return 0; if (pMemTable == NULL) return 0;
if (T_REF_DEC(pMemTable) == 0) { int ref = T_REF_DEC(pMemTable);
tsdbDebug("vgId:%d unref memtable %p ref %d", REPO_ID(pRepo), pMemTable, ref);
if (ref == 0) {
STsdbCfg * pCfg = &pRepo->config; STsdbCfg * pCfg = &pRepo->config;
STsdbBufPool *pBufPool = pRepo->pPool; STsdbBufPool *pBufPool = pRepo->pPool;
@ -159,6 +162,7 @@ int tsdbTakeMemSnapshot(STsdbRepo *pRepo, SMemTable **pMem, SMemTable **pIMem) {
tsdbRefMemTable(pRepo, *pIMem); tsdbRefMemTable(pRepo, *pIMem);
if (tsdbUnlockRepo(pRepo) < 0) return -1; if (tsdbUnlockRepo(pRepo) < 0) return -1;
tsdbDebug("vgId:%d take memory snapshot, pMem %p pIMem %p", REPO_ID(pRepo), *pMem, *pIMem);
return 0; return 0;
} }

View File

@ -57,8 +57,30 @@ int tsdbCreateTable(TSDB_REPO_T *repo, STableCfg *pCfg) {
STable * super = NULL; STable * super = NULL;
STable * table = NULL; STable * table = NULL;
int newSuper = 0; int newSuper = 0;
int tid = pCfg->tableId.tid;
STable * pTable = NULL;
STable *pTable = tsdbGetTableByUid(pMeta, pCfg->tableId.uid); if (tid < 0 || tid >= pRepo->config.maxTables) {
tsdbError("vgId:%d failed to create table since invalid tid %d", REPO_ID(pRepo), tid);
terrno = TSDB_CODE_TDB_IVD_CREATE_TABLE_INFO;
goto _err;
}
if (pMeta->tables[tid] != NULL) {
if (TABLE_UID(pMeta->tables[tid]) == pCfg->tableId.uid) {
tsdbError("vgId:%d table %s already exists, tid %d uid %" PRId64, REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
TABLE_TID(pTable), TABLE_UID(pTable));
return TSDB_CODE_TDB_TABLE_ALREADY_EXIST;
} else {
tsdbError("vgId:%d table %s at tid %d uid %" PRIu64
" exists, replace it with new table, this can be not reasonable",
REPO_ID(pRepo), TABLE_CHAR_NAME(pMeta->tables[tid]), TABLE_TID(pMeta->tables[tid]),
TABLE_UID(pMeta->tables[tid]));
tsdbDropTable(pRepo, pMeta->tables[tid]->tableId);
}
}
pTable = tsdbGetTableByUid(pMeta, pCfg->tableId.uid);
if (pTable != NULL) { if (pTable != NULL) {
tsdbError("vgId:%d table %s already exists, tid %d uid %" PRId64, REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), tsdbError("vgId:%d table %s already exists, tid %d uid %" PRId64, REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
TABLE_TID(pTable), TABLE_UID(pTable)); TABLE_TID(pTable), TABLE_UID(pTable));
@ -72,10 +94,10 @@ int tsdbCreateTable(TSDB_REPO_T *repo, STableCfg *pCfg) {
super = tsdbNewTable(pCfg, true); super = tsdbNewTable(pCfg, true);
if (super == NULL) goto _err; if (super == NULL) goto _err;
} else { } else {
// TODO if (TABLE_TYPE(super) != TSDB_SUPER_TABLE || TABLE_UID(super) != pCfg->superUid) {
if (super->type != TSDB_SUPER_TABLE) return -1; terrno = TSDB_CODE_TDB_IVD_CREATE_TABLE_INFO;
if (super->tableId.uid != pCfg->superUid) return -1; goto _err;
// tsdbUpdateTable(pRepo, super, pCfg); }
} }
} }
@ -705,6 +727,9 @@ static STable *tsdbNewTable(STableCfg *pCfg, bool isSuper) {
T_REF_INC(pTable); T_REF_INC(pTable);
tsdbDebug("table %s tid %d uid %" PRIu64 " is created", TABLE_CHAR_NAME(pTable), TABLE_TID(pTable),
TABLE_UID(pTable));
return pTable; return pTable;
_err: _err:
@ -714,7 +739,9 @@ _err:
static void tsdbFreeTable(STable *pTable) { static void tsdbFreeTable(STable *pTable) {
if (pTable) { if (pTable) {
if (pTable->name != NULL) tsdbDebug("table %s is destroyed", TABLE_CHAR_NAME(pTable)); if (pTable->name != NULL)
tsdbDebug("table %s tid %d uid %" PRIu64 " is destroyed", TABLE_CHAR_NAME(pTable), TABLE_TID(pTable),
TABLE_UID(pTable));
tfree(TABLE_NAME(pTable)); tfree(TABLE_NAME(pTable));
if (TABLE_TYPE(pTable) != TSDB_CHILD_TABLE) { if (TABLE_TYPE(pTable) != TSDB_CHILD_TABLE) {
for (int i = 0; i < TSDB_MAX_TABLE_SCHEMAS; i++) { for (int i = 0; i < TSDB_MAX_TABLE_SCHEMAS; i++) {
@ -782,7 +809,7 @@ static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx, boo
tsdbGetTableSchemaImpl(pTable, false, false, -1)); tsdbGetTableSchemaImpl(pTable, false, false, -1));
} }
tsdbTrace("vgId:%d table %s tid %d uid %" PRIu64 " is added to meta", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), tsdbDebug("vgId:%d table %s tid %d uid %" PRIu64 " is added to meta", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
TABLE_TID(pTable), TABLE_UID(pTable)); TABLE_TID(pTable), TABLE_UID(pTable));
return 0; return 0;

View File

@ -877,7 +877,7 @@ static int tsdbWriteBlockToFile(SRWHelper *pHelper, SFile *pFile, SDataCols *pDa
pCompBlock->keyFirst = dataColsKeyFirst(pDataCols); pCompBlock->keyFirst = dataColsKeyFirst(pDataCols);
pCompBlock->keyLast = dataColsKeyAt(pDataCols, rowsToWrite - 1); pCompBlock->keyLast = dataColsKeyAt(pDataCols, rowsToWrite - 1);
tsdbDebug("vgId:%d tid:%d a block of data is written to file %s, offset %" PRId64 tsdbTrace("vgId:%d tid:%d a block of data is written to file %s, offset %" PRId64
" numOfRows %d len %d numOfCols %" PRId16 " keyFirst %" PRId64 " keyLast %" PRId64, " numOfRows %d len %d numOfCols %" PRId16 " keyFirst %" PRId64 " keyLast %" PRId64,
REPO_ID(helperRepo(pHelper)), pHelper->tableInfo.tid, pFile->fname, (int64_t)(pCompBlock->offset), REPO_ID(helperRepo(pHelper)), pHelper->tableInfo.tid, pFile->fname, (int64_t)(pCompBlock->offset),
(int)(pCompBlock->numOfRows), pCompBlock->len, pCompBlock->numOfCols, pCompBlock->keyFirst, (int)(pCompBlock->numOfRows), pCompBlock->len, pCompBlock->numOfCols, pCompBlock->keyFirst,
@ -948,7 +948,7 @@ static int tsdbInsertSuperBlock(SRWHelper *pHelper, SCompBlock *pCompBlock, int
ASSERT(pHelper->pCompInfo->blocks[0].keyLast < pHelper->pCompInfo->blocks[1].keyFirst); ASSERT(pHelper->pCompInfo->blocks[0].keyLast < pHelper->pCompInfo->blocks[1].keyFirst);
} }
tsdbDebug("vgId:%d tid:%d a super block is inserted at index %d", REPO_ID(pHelper->pRepo), pHelper->tableInfo.tid, tsdbTrace("vgId:%d tid:%d a super block is inserted at index %d", REPO_ID(pHelper->pRepo), pHelper->tableInfo.tid,
blkIdx); blkIdx);
return 0; return 0;

View File

@ -110,9 +110,10 @@ typedef struct STsdbQueryHandle {
SFileGroupIter fileIter; SFileGroupIter fileIter;
SRWHelper rhelper; SRWHelper rhelper;
STableBlockInfo* pDataBlockInfo; STableBlockInfo* pDataBlockInfo;
int32_t allocSize; // allocated data block size
SMemTable* mem; // mem-table SMemTable* mem; // mem-table
SMemTable* imem; // imem-table, acquired from snapshot SMemTable* imem; // imem-table, acquired from snapshot
SArray* defaultLoadColumn;// default load column
SDataBlockLoadInfo dataBlockLoadInfo; /* record current block load information */ SDataBlockLoadInfo dataBlockLoadInfo; /* record current block load information */
SLoadCompBlockInfo compBlockLoadInfo; /* record current compblock information in SQuery */ SLoadCompBlockInfo compBlockLoadInfo; /* record current compblock information in SQuery */
} STsdbQueryHandle; } STsdbQueryHandle;
@ -136,6 +137,34 @@ static void tsdbInitCompBlockLoadInfo(SLoadCompBlockInfo* pCompBlockLoadInfo) {
pCompBlockLoadInfo->fileId = -1; pCompBlockLoadInfo->fileId = -1;
} }
static SArray* getColumnIdList(STsdbQueryHandle* pQueryHandle) {
size_t numOfCols = QH_GET_NUM_OF_COLS(pQueryHandle);
assert(numOfCols <= TSDB_MAX_COLUMNS);
SArray* pIdList = taosArrayInit(numOfCols, sizeof(int16_t));
for (int32_t i = 0; i < numOfCols; ++i) {
SColumnInfoData* pCol = taosArrayGet(pQueryHandle->pColumns, i);
taosArrayPush(pIdList, &pCol->info.colId);
}
return pIdList;
}
static SArray* getDefaultLoadColumns(STsdbQueryHandle* pQueryHandle, bool loadTS) {
SArray* pLocalIdList = getColumnIdList(pQueryHandle);
// check if the primary time stamp column needs to load
int16_t colId = *(int16_t*)taosArrayGet(pLocalIdList, 0);
// the primary timestamp column does not be included in the the specified load column list, add it
if (loadTS && colId != 0) {
int16_t columnId = 0;
taosArrayInsert(pLocalIdList, 0, &columnId);
}
return pLocalIdList;
}
TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STableGroupInfo* groupList, void* qinfo) { TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STableGroupInfo* groupList, void* qinfo) {
STsdbQueryHandle* pQueryHandle = calloc(1, sizeof(STsdbQueryHandle)); STsdbQueryHandle* pQueryHandle = calloc(1, sizeof(STsdbQueryHandle));
pQueryHandle->order = pCond->order; pQueryHandle->order = pCond->order;
@ -148,6 +177,7 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab
pQueryHandle->activeIndex = 0; // current active table index pQueryHandle->activeIndex = 0; // current active table index
pQueryHandle->qinfo = qinfo; pQueryHandle->qinfo = qinfo;
pQueryHandle->outputCapacity = ((STsdbRepo*)tsdb)->config.maxRowsPerFileBlock; pQueryHandle->outputCapacity = ((STsdbRepo*)tsdb)->config.maxRowsPerFileBlock;
pQueryHandle->allocSize = 0;
tsdbInitReadHelper(&pQueryHandle->rhelper, (STsdbRepo*) tsdb); tsdbInitReadHelper(&pQueryHandle->rhelper, (STsdbRepo*) tsdb);
tsdbTakeMemSnapshot(pQueryHandle->pTsdb, &pQueryHandle->mem, &pQueryHandle->imem); tsdbTakeMemSnapshot(pQueryHandle->pTsdb, &pQueryHandle->mem, &pQueryHandle->imem);
@ -196,6 +226,8 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab
} }
} }
pQueryHandle->defaultLoadColumn = getDefaultLoadColumns(pQueryHandle, true);
tsdbDebug("%p total numOfTable:%zu in query", pQueryHandle, taosArrayGetSize(pQueryHandle->pTableCheckInfo)); tsdbDebug("%p total numOfTable:%zu in query", pQueryHandle, taosArrayGetSize(pQueryHandle->pTableCheckInfo));
tsdbInitDataBlockLoadInfo(&pQueryHandle->dataBlockLoadInfo); tsdbInitDataBlockLoadInfo(&pQueryHandle->dataBlockLoadInfo);
@ -546,33 +578,7 @@ static int32_t getFileCompInfo(STsdbQueryHandle* pQueryHandle, int32_t* numOfBlo
.tid = (_checkInfo)->tableId.tid, \ .tid = (_checkInfo)->tableId.tid, \
.uid = (_checkInfo)->tableId.uid}) .uid = (_checkInfo)->tableId.uid})
static SArray* getColumnIdList(STsdbQueryHandle* pQueryHandle) {
size_t numOfCols = QH_GET_NUM_OF_COLS(pQueryHandle);
assert(numOfCols <= TSDB_MAX_COLUMNS);
SArray* pIdList = taosArrayInit(numOfCols, sizeof(int16_t));
for (int32_t i = 0; i < numOfCols; ++i) {
SColumnInfoData* pCol = taosArrayGet(pQueryHandle->pColumns, i);
taosArrayPush(pIdList, &pCol->info.colId);
}
return pIdList;
}
static SArray* getDefaultLoadColumns(STsdbQueryHandle* pQueryHandle, bool loadTS) {
SArray* pLocalIdList = getColumnIdList(pQueryHandle);
// check if the primary time stamp column needs to load
int16_t colId = *(int16_t*)taosArrayGet(pLocalIdList, 0);
// the primary timestamp column does not be included in the the specified load column list, add it
if (loadTS && colId != 0) {
int16_t columnId = 0;
taosArrayInsert(pLocalIdList, 0, &columnId);
}
return pLocalIdList;
}
static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock, STableCheckInfo* pCheckInfo) { static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock, STableCheckInfo* pCheckInfo) {
STsdbRepo *pRepo = pQueryHandle->pTsdb; STsdbRepo *pRepo = pQueryHandle->pTsdb;
@ -584,8 +590,6 @@ static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlo
data->uid = pCheckInfo->pTableObj->tableId.uid; data->uid = pCheckInfo->pTableObj->tableId.uid;
bool blockLoaded = false; bool blockLoaded = false;
SArray* sa = getDefaultLoadColumns(pQueryHandle, true);
int64_t st = taosGetTimestampUs(); int64_t st = taosGetTimestampUs();
if (pCheckInfo->pDataCols == NULL) { if (pCheckInfo->pDataCols == NULL) {
@ -613,7 +617,6 @@ static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlo
assert(pCols->numOfRows != 0 && pCols->numOfRows <= pBlock->numOfRows); assert(pCols->numOfRows != 0 && pCols->numOfRows <= pBlock->numOfRows);
pBlock->numOfRows = pCols->numOfRows; pBlock->numOfRows = pCols->numOfRows;
taosArrayDestroy(sa);
tfree(data); tfree(data);
int64_t et = taosGetTimestampUs() - st; int64_t et = taosGetTimestampUs() - st;
@ -656,12 +659,8 @@ static void handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SCompBlock*
return; return;
} }
SArray* sa = getDefaultLoadColumns(pQueryHandle, true);
doLoadFileDataBlock(pQueryHandle, pBlock, pCheckInfo); doLoadFileDataBlock(pQueryHandle, pBlock, pCheckInfo);
doMergeTwoLevelData(pQueryHandle, pCheckInfo, pBlock, sa); doMergeTwoLevelData(pQueryHandle, pCheckInfo, pBlock, pQueryHandle->defaultLoadColumn);
taosArrayDestroy(sa);
} else { } else {
/* /*
* no data in cache, only load data from file * no data in cache, only load data from file
@ -681,14 +680,12 @@ static void handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SCompBlock*
} }
static bool loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock, STableCheckInfo* pCheckInfo) { static bool loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock, STableCheckInfo* pCheckInfo) {
SArray* sa = getDefaultLoadColumns(pQueryHandle, true);
SQueryFilePos* cur = &pQueryHandle->cur; SQueryFilePos* cur = &pQueryHandle->cur;
if (ASCENDING_TRAVERSE(pQueryHandle->order)) { if (ASCENDING_TRAVERSE(pQueryHandle->order)) {
// query ended in current block // query ended in current block
if (pQueryHandle->window.ekey < pBlock->keyLast || pCheckInfo->lastKey > pBlock->keyFirst) { if (pQueryHandle->window.ekey < pBlock->keyLast || pCheckInfo->lastKey > pBlock->keyFirst) {
if (!doLoadFileDataBlock(pQueryHandle, pBlock, pCheckInfo)) { if (!doLoadFileDataBlock(pQueryHandle, pBlock, pCheckInfo)) {
taosArrayDestroy(sa);
return false; return false;
} }
@ -702,7 +699,7 @@ static bool loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock
cur->pos = 0; cur->pos = 0;
} }
doMergeTwoLevelData(pQueryHandle, pCheckInfo, pBlock, sa); doMergeTwoLevelData(pQueryHandle, pCheckInfo, pBlock, pQueryHandle->defaultLoadColumn);
} else { // the whole block is loaded in to buffer } else { // the whole block is loaded in to buffer
handleDataMergeIfNeeded(pQueryHandle, pBlock, pCheckInfo); handleDataMergeIfNeeded(pQueryHandle, pBlock, pCheckInfo);
} }
@ -719,13 +716,12 @@ static bool loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock
cur->pos = pBlock->numOfRows - 1; cur->pos = pBlock->numOfRows - 1;
} }
doMergeTwoLevelData(pQueryHandle, pCheckInfo, pBlock, sa); doMergeTwoLevelData(pQueryHandle, pCheckInfo, pBlock, pQueryHandle->defaultLoadColumn);
} else { } else {
handleDataMergeIfNeeded(pQueryHandle, pBlock, pCheckInfo); handleDataMergeIfNeeded(pQueryHandle, pBlock, pCheckInfo);
} }
} }
taosArrayDestroy(sa);
return pQueryHandle->realNumOfRows > 0; return pQueryHandle->realNumOfRows > 0;
} }
@ -1250,13 +1246,19 @@ static int32_t dataBlockOrderCompar(const void* pLeft, const void* pRight, void*
} }
static int32_t createDataBlocksInfo(STsdbQueryHandle* pQueryHandle, int32_t numOfBlocks, int32_t* numOfAllocBlocks) { static int32_t createDataBlocksInfo(STsdbQueryHandle* pQueryHandle, int32_t numOfBlocks, int32_t* numOfAllocBlocks) {
char* tmp = realloc(pQueryHandle->pDataBlockInfo, sizeof(STableBlockInfo) * numOfBlocks); size_t size = sizeof(STableBlockInfo) * numOfBlocks;
if (pQueryHandle->allocSize < size) {
pQueryHandle->allocSize = size;
char* tmp = realloc(pQueryHandle->pDataBlockInfo, pQueryHandle->allocSize);
if (tmp == NULL) { if (tmp == NULL) {
return TSDB_CODE_TDB_OUT_OF_MEMORY; return TSDB_CODE_TDB_OUT_OF_MEMORY;
} }
pQueryHandle->pDataBlockInfo = (STableBlockInfo*) tmp; pQueryHandle->pDataBlockInfo = (STableBlockInfo*) tmp;
memset(pQueryHandle->pDataBlockInfo, 0, sizeof(STableBlockInfo) * numOfBlocks); }
memset(pQueryHandle->pDataBlockInfo, 0, size);
*numOfAllocBlocks = numOfBlocks; *numOfAllocBlocks = numOfBlocks;
int32_t numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo); int32_t numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo);
@ -1492,9 +1494,8 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) {
return false; return false;
} }
SArray* sa = getDefaultLoadColumns(pQueryHandle, true);
/*SDataBlockInfo* pBlockInfo =*/ tsdbRetrieveDataBlockInfo(pHandle, &blockInfo); /*SDataBlockInfo* pBlockInfo =*/ tsdbRetrieveDataBlockInfo(pHandle, &blockInfo);
/*SArray *pDataBlock = */tsdbRetrieveDataBlock(pHandle, sa); /*SArray *pDataBlock = */tsdbRetrieveDataBlock(pHandle, pQueryHandle->defaultLoadColumn);
if (pQueryHandle->cur.win.ekey == pQueryHandle->window.skey) { if (pQueryHandle->cur.win.ekey == pQueryHandle->window.skey) {
// data already retrieve, discard other data rows and return // data already retrieve, discard other data rows and return
@ -1508,7 +1509,6 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) {
pQueryHandle->window = pQueryHandle->cur.win; pQueryHandle->window = pQueryHandle->cur.win;
pQueryHandle->cur.rows = 1; pQueryHandle->cur.rows = 1;
pQueryHandle->type = TSDB_QUERY_TYPE_EXTERNAL; pQueryHandle->type = TSDB_QUERY_TYPE_EXTERNAL;
taosArrayDestroy(sa);
return true; return true;
} else { } else {
STsdbQueryHandle* pSecQueryHandle = calloc(1, sizeof(STsdbQueryHandle)); STsdbQueryHandle* pSecQueryHandle = calloc(1, sizeof(STsdbQueryHandle));
@ -1565,7 +1565,7 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) {
assert(ret); assert(ret);
/*SDataBlockInfo* pBlockInfo =*/ tsdbRetrieveDataBlockInfo((void*) pSecQueryHandle, &blockInfo); /*SDataBlockInfo* pBlockInfo =*/ tsdbRetrieveDataBlockInfo((void*) pSecQueryHandle, &blockInfo);
/*SArray *pDataBlock = */tsdbRetrieveDataBlock((void*) pSecQueryHandle, sa); /*SArray *pDataBlock = */tsdbRetrieveDataBlock((void*) pSecQueryHandle, pSecQueryHandle->defaultLoadColumn);
for (int32_t i = 0; i < numOfCols; ++i) { for (int32_t i = 0; i < numOfCols; ++i) {
SColumnInfoData* pCol = taosArrayGet(pQueryHandle->pColumns, i); SColumnInfoData* pCol = taosArrayGet(pQueryHandle->pColumns, i);
@ -2333,6 +2333,7 @@ void tsdbCleanupQueryHandle(TsdbQueryHandleT queryHandle) {
} }
taosArrayDestroy(pQueryHandle->pColumns); taosArrayDestroy(pQueryHandle->pColumns);
taosArrayDestroy(pQueryHandle->defaultLoadColumn);
tfree(pQueryHandle->pDataBlockInfo); tfree(pQueryHandle->pDataBlockInfo);
tfree(pQueryHandle->statis); tfree(pQueryHandle->statis);

View File

@ -23,13 +23,12 @@ extern "C" {
#include "os.h" #include "os.h"
#define TARRAY_MIN_SIZE 8 #define TARRAY_MIN_SIZE 8
#define TARRAY_GET_ELEM(array, index) ((array)->pData + (index) * (array)->elemSize) #define TARRAY_GET_ELEM(array, index) ((void*)((char*)((array)->pData) + (index) * (array)->elemSize))
typedef struct SArray { typedef struct SArray {
size_t size; size_t size;
size_t capacity; size_t capacity;
size_t elemSize; size_t elemSize;
void* pData; void* pData;
} SArray; } SArray;

View File

@ -33,15 +33,18 @@ typedef struct SCacheStatis {
int64_t refreshCount; int64_t refreshCount;
} SCacheStatis; } SCacheStatis;
struct STrashElem;
typedef struct SCacheDataNode { typedef struct SCacheDataNode {
uint64_t addedTime; // the added time when this element is added or updated into cache uint64_t addedTime; // the added time when this element is added or updated into cache
uint64_t lifespan; // expiredTime expiredTime when this element should be remove from cache uint64_t lifespan; // life duration when this element should be remove from cache
uint64_t expireTime; // expire time
uint64_t signature; uint64_t signature;
uint32_t size; // allocated size for current SCacheDataNode struct STrashElem *pTNodeHeader; // point to trash node head
T_REF_DECLARE()
uint16_t keySize: 15; // max key size: 32kb uint16_t keySize: 15; // max key size: 32kb
bool inTrashCan: 1;// denote if it is in trash or not bool inTrashCan: 1;// denote if it is in trash or not
int32_t extendFactor; // number of life span extend uint32_t size; // allocated size for current SCacheDataNode
T_REF_DECLARE()
char *key; char *key;
char data[]; char data[];
} SCacheDataNode; } SCacheDataNode;

View File

@ -18,6 +18,7 @@
#ifdef TAOS_RANDOM_FILE_FAIL #ifdef TAOS_RANDOM_FILE_FAIL
void taosSetRandomFileFailFactor(int factor);
ssize_t taos_tread(int fd, void *buf, size_t count); ssize_t taos_tread(int fd, void *buf, size_t count);
ssize_t taos_twrite(int fd, void *buf, size_t count); ssize_t taos_twrite(int fd, void *buf, size_t count);
off_t taos_lseek(int fd, off_t offset, int whence); off_t taos_lseek(int fd, off_t offset, int whence);

View File

@ -116,11 +116,13 @@ static FORCE_INLINE void taosCacheReleaseNode(SCacheObj *pCacheObj, SCacheDataNo
return; return;
} }
int32_t size = pNode->size;
taosHashRemove(pCacheObj->pHashTable, pNode->key, pNode->keySize); taosHashRemove(pCacheObj->pHashTable, pNode->key, pNode->keySize);
pCacheObj->totalSize -= pNode->size;
uDebug("cache:%s, key:%p, %p is destroyed from cache, totalNum:%d totalSize:%" PRId64 "bytes size:%dbytes", uDebug("cache:%s, key:%p, %p is destroyed from cache, totalNum:%d totalSize:%" PRId64 "bytes size:%dbytes",
pCacheObj->name, pNode->key, pNode->data, (int32_t)taosHashGetSize(pCacheObj->pHashTable), pCacheObj->totalSize, size); pCacheObj->name, pNode->key, pNode->data, (int32_t)taosHashGetSize(pCacheObj->pHashTable), pCacheObj->totalSize,
pNode->size);
if (pCacheObj->freeFp) pCacheObj->freeFp(pNode->data); if (pCacheObj->freeFp) pCacheObj->freeFp(pNode->data);
free(pNode); free(pNode);
} }
@ -285,7 +287,7 @@ void *taosCachePut(SCacheObj *pCacheObj, const void *key, size_t keyLen, const v
uDebug("cache:%s, key:%p, %p added into cache, added:%" PRIu64 ", expire:%" PRIu64 ", totalNum:%d totalSize:%" PRId64 uDebug("cache:%s, key:%p, %p added into cache, added:%" PRIu64 ", expire:%" PRIu64 ", totalNum:%d totalSize:%" PRId64
"bytes size:%" PRId64 "bytes", "bytes size:%" PRId64 "bytes",
pCacheObj->name, key, pNode->data, pNode->addedTime, (pNode->lifespan * pNode->extendFactor + pNode->addedTime), pCacheObj->name, key, pNode->data, pNode->addedTime, pNode->expireTime,
(int32_t)taosHashGetSize(pCacheObj->pHashTable), pCacheObj->totalSize, dataSize); (int32_t)taosHashGetSize(pCacheObj->pHashTable), pCacheObj->totalSize, dataSize);
} else { } else {
uError("cache:%s, key:%p, failed to added into cache, out of memory", pCacheObj->name, key); uError("cache:%s, key:%p, failed to added into cache, out of memory", pCacheObj->name, key);
@ -312,16 +314,6 @@ void *taosCacheAcquireByKey(SCacheObj *pCacheObj, const void *key, size_t keyLen
int32_t ref = 0; int32_t ref = 0;
if (ptNode != NULL) { if (ptNode != NULL) {
ref = T_REF_INC(*ptNode); ref = T_REF_INC(*ptNode);
// if the remained life span is less then the (*ptNode)->lifeSpan, add up one lifespan
if (pCacheObj->extendLifespan) {
int64_t now = taosGetTimestampMs();
if ((now - (*ptNode)->addedTime) < (*ptNode)->lifespan * (*ptNode)->extendFactor) {
(*ptNode)->extendFactor += 1;
uDebug("key:%p extend life time to %"PRId64, key, (*ptNode)->lifespan * (*ptNode)->extendFactor + (*ptNode)->addedTime);
}
}
} }
__cache_unlock(pCacheObj); __cache_unlock(pCacheObj);
@ -347,8 +339,7 @@ void* taosCacheUpdateExpireTimeByName(SCacheObj *pCacheObj, void *key, size_t ke
SCacheDataNode **ptNode = (SCacheDataNode **)taosHashGet(pCacheObj->pHashTable, key, keyLen); SCacheDataNode **ptNode = (SCacheDataNode **)taosHashGet(pCacheObj->pHashTable, key, keyLen);
if (ptNode != NULL) { if (ptNode != NULL) {
T_REF_INC(*ptNode); T_REF_INC(*ptNode);
(*ptNode)->extendFactor += 1; (*ptNode)->expireTime = taosGetTimestampMs() + (*ptNode)->lifespan;
// (*ptNode)->lifespan = expireTime;
} }
__cache_unlock(pCacheObj); __cache_unlock(pCacheObj);
@ -380,17 +371,6 @@ void *taosCacheAcquireByData(SCacheObj *pCacheObj, void *data) {
int32_t ref = T_REF_INC(ptNode); int32_t ref = T_REF_INC(ptNode);
uDebug("cache:%s, data: %p acquired by data in cache, refcnt:%d", pCacheObj->name, ptNode->data, ref); uDebug("cache:%s, data: %p acquired by data in cache, refcnt:%d", pCacheObj->name, ptNode->data, ref);
// if the remained life span is less then the (*ptNode)->lifeSpan, add up one lifespan
if (pCacheObj->extendLifespan) {
int64_t now = taosGetTimestampMs();
if ((now - ptNode->addedTime) < ptNode->lifespan * ptNode->extendFactor) {
ptNode->extendFactor += 1;
uDebug("cache:%s, %p extend life time to %" PRId64, pCacheObj->name, ptNode->data,
ptNode->lifespan * ptNode->extendFactor + ptNode->addedTime);
}
}
// the data if referenced by at least one object, so the reference count must be greater than the value of 2. // the data if referenced by at least one object, so the reference count must be greater than the value of 2.
assert(ref >= 2); assert(ref >= 2);
return data; return data;
@ -431,22 +411,58 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) {
} }
*data = NULL; *data = NULL;
int16_t ref = T_REF_DEC(pNode);
uDebug("cache:%s, key:%p, %p is released, refcnt:%d", pCacheObj->name, pNode->key, pNode->data, ref);
if (_remove && (!pNode->inTrashCan)) { // note: extend lifespan before dec ref count
if (pCacheObj->extendLifespan) {
atomic_store_64(&pNode->expireTime, pNode->lifespan + taosGetTimestampMs());
uDebug("cache:%s data:%p extend life time to %"PRId64 " before release", pCacheObj->name, pNode->data, pNode->expireTime);
}
bool inTrashCan = pNode->inTrashCan;
uDebug("cache:%s, key:%p, %p is released, refcnt:%d", pCacheObj->name, pNode->key, pNode->data, T_REF_VAL_GET(pNode) - 1);
// NOTE: once refcount is decrease, pNode may be free by other thread immediately.
int32_t ref = T_REF_DEC(pNode);
if (inTrashCan) {
// Remove it if the ref count is 0.
// The ref count does not need to load and check again after lock acquired, since ref count can not be increased when
// the node is in trashcan.
if (ref == 0) {
__cache_wr_lock(pCacheObj);
assert(pNode->pTNodeHeader->pData == pNode);
taosRemoveFromTrashCan(pCacheObj, pNode->pTNodeHeader);
__cache_unlock(pCacheObj);
}
} else {
assert(pNode->pTNodeHeader == NULL);
if (_remove) { // not in trash can, but need to remove it
__cache_wr_lock(pCacheObj); __cache_wr_lock(pCacheObj);
/*
* If not referenced by other users. Otherwise move this node to trashcan wait for all users
* releasing this resources.
*
* NOTE: previous ref is 0, and current ref is still 0, remove it. If previous is not 0, there is another thread
* that tries to do the same thing.
*/
if (ref == 0) {
if (T_REF_VAL_GET(pNode) == 0) { if (T_REF_VAL_GET(pNode) == 0) {
// remove directly, if not referenced by other users
taosCacheReleaseNode(pCacheObj, pNode); taosCacheReleaseNode(pCacheObj, pNode);
} else { } else {
// pNode may be released immediately by other thread after the reference count of pNode is set to 0,
// So we need to lock it in the first place.
taosCacheMoveToTrash(pCacheObj, pNode); taosCacheMoveToTrash(pCacheObj, pNode);
} }
}
__cache_unlock(pCacheObj); __cache_unlock(pCacheObj);
// } else { // extend its life time
// if (pCacheObj->extendLifespan) {
// atomic_store_64(&pNode->expireTime, pNode->lifespan + taosGetTimestampMs());
// uDebug("cache:%s data:%p extend life time to %"PRId64 " after release", pCacheObj->name, pNode->data, pNode->expireTime);
// }
}
} }
} }
@ -486,7 +502,7 @@ void taosCacheCleanup(SCacheObj *pCacheObj) {
SCacheDataNode *taosCreateCacheNode(const char *key, size_t keyLen, const char *pData, size_t size, SCacheDataNode *taosCreateCacheNode(const char *key, size_t keyLen, const char *pData, size_t size,
uint64_t duration) { uint64_t duration) {
size_t totalSize = size + sizeof(SCacheDataNode) + keyLen + 1; size_t totalSize = size + sizeof(SCacheDataNode) + keyLen;
SCacheDataNode *pNewNode = calloc(1, totalSize); SCacheDataNode *pNewNode = calloc(1, totalSize);
if (pNewNode == NULL) { if (pNewNode == NULL) {
@ -503,7 +519,7 @@ SCacheDataNode *taosCreateCacheNode(const char *key, size_t keyLen, const char *
pNewNode->addedTime = (uint64_t)taosGetTimestampMs(); pNewNode->addedTime = (uint64_t)taosGetTimestampMs();
pNewNode->lifespan = duration; pNewNode->lifespan = duration;
pNewNode->extendFactor = 1; pNewNode->expireTime = pNewNode->addedTime + pNewNode->lifespan;
pNewNode->signature = (uint64_t)pNewNode; pNewNode->signature = (uint64_t)pNewNode;
pNewNode->size = (uint32_t)totalSize; pNewNode->size = (uint32_t)totalSize;
@ -512,6 +528,7 @@ SCacheDataNode *taosCreateCacheNode(const char *key, size_t keyLen, const char *
void taosAddToTrash(SCacheObj *pCacheObj, SCacheDataNode *pNode) { void taosAddToTrash(SCacheObj *pCacheObj, SCacheDataNode *pNode) {
if (pNode->inTrashCan) { /* node is already in trash */ if (pNode->inTrashCan) { /* node is already in trash */
assert(pNode->pTNodeHeader != NULL && pNode->pTNodeHeader->pData == pNode);
return; return;
} }
@ -527,6 +544,7 @@ void taosAddToTrash(SCacheObj *pCacheObj, SCacheDataNode *pNode) {
pCacheObj->pTrash = pElem; pCacheObj->pTrash = pElem;
pNode->inTrashCan = true; pNode->inTrashCan = true;
pNode->pTNodeHeader = pElem;
pCacheObj->numOfElemsInTrash++; pCacheObj->numOfElemsInTrash++;
uDebug("key:%p, %p move to trash, numOfElem in trash:%d", pNode->key, pNode->data, pCacheObj->numOfElemsInTrash); uDebug("key:%p, %p move to trash, numOfElem in trash:%d", pNode->key, pNode->data, pCacheObj->numOfElemsInTrash);
@ -629,7 +647,7 @@ static void doCacheRefresh(SCacheObj* pCacheObj, int64_t time, __cache_free_fn_t
__cache_wr_lock(pCacheObj); __cache_wr_lock(pCacheObj);
while (taosHashIterNext(pIter)) { while (taosHashIterNext(pIter)) {
SCacheDataNode *pNode = *(SCacheDataNode **)taosHashIterGet(pIter); SCacheDataNode *pNode = *(SCacheDataNode **)taosHashIterGet(pIter);
if ((pNode->addedTime + pNode->lifespan * pNode->extendFactor) <= time && T_REF_VAL_GET(pNode) <= 0) { if (pNode->expireTime < time && T_REF_VAL_GET(pNode) <= 0) {
taosCacheReleaseNode(pCacheObj, pNode); taosCacheReleaseNode(pCacheObj, pNode);
continue; continue;
} }

View File

@ -26,40 +26,51 @@
#include "os.h" #include "os.h"
#define RANDOM_FILE_FAIL_FACTOR 5 #ifdef TAOS_RANDOM_FILE_FAIL
static int random_file_fail_factor = 20;
void taosSetRandomFileFailFactor(int factor)
{
random_file_fail_factor = factor;
}
#endif
ssize_t taos_tread(int fd, void *buf, size_t count) ssize_t taos_tread(int fd, void *buf, size_t count)
{ {
#ifdef TAOS_RANDOM_FILE_FAIL #ifdef TAOS_RANDOM_FILE_FAIL
if (rand() % RANDOM_FILE_FAIL_FACTOR == 0) { if (random_file_fail_factor > 0) {
if (rand() % random_file_fail_factor == 0) {
errno = EIO; errno = EIO;
return -1; return -1;
} }
}
#endif #endif
return tread(fd, buf, count); return tread(fd, buf, count);
} }
ssize_t taos_twrite(int fd, void *buf, size_t count) ssize_t taos_twrite(int fd, void *buf, size_t count)
{ {
#ifdef TAOS_RANDOM_FILE_FAIL #ifdef TAOS_RANDOM_FILE_FAIL
if (rand() % RANDOM_FILE_FAIL_FACTOR == 0) { if (random_file_fail_factor > 0) {
if (rand() % random_file_fail_factor == 0) {
errno = EIO; errno = EIO;
return -1; return -1;
} }
}
#endif #endif
return twrite(fd, buf, count); return twrite(fd, buf, count);
} }
off_t taos_lseek(int fd, off_t offset, int whence) off_t taos_lseek(int fd, off_t offset, int whence)
{ {
#ifdef TAOS_RANDOM_FILE_FAIL #ifdef TAOS_RANDOM_FILE_FAIL
if (rand() % RANDOM_FILE_FAIL_FACTOR == 0) { if (random_file_fail_factor > 0) {
if (rand() % random_file_fail_factor == 0) {
errno = EIO; errno = EIO;
return -1; return -1;
} }
}
#endif #endif
return lseek(fd, offset, whence); return lseek(fd, offset, whence);
} }

View File

@ -123,7 +123,7 @@ void taosIdPoolMarkStatus(void *handle, int id) {
int taosUpdateIdPool(id_pool_t *handle, int maxId) { int taosUpdateIdPool(id_pool_t *handle, int maxId) {
id_pool_t *pIdPool = (id_pool_t*)handle; id_pool_t *pIdPool = (id_pool_t*)handle;
if (maxId <= pIdPool->maxId) { if (maxId <= pIdPool->maxId) {
return -1; return 0;
} }
bool *idList = calloc(maxId, sizeof(bool)); bool *idList = calloc(maxId, sizeof(bool));

View File

@ -260,7 +260,7 @@ int tdUpdateKVStoreRecord(SKVStore *pStore, uint64_t uid, void *cont, int contLe
} }
taosHashPut(pStore->map, (void *)(&uid), sizeof(uid), (void *)(&rInfo), sizeof(rInfo)); taosHashPut(pStore->map, (void *)(&uid), sizeof(uid), (void *)(&rInfo), sizeof(rInfo));
uDebug("put uid %" PRIu64 " into kvStore %s", uid, pStore->fname); uTrace("put uid %" PRIu64 " into kvStore %s", uid, pStore->fname);
return 0; return 0;
} }

View File

@ -317,29 +317,34 @@ int32_t parseLocaltimeWithDst(char* timestr, int64_t* time, int32_t timePrec) {
static int32_t getTimestampInUsFromStrImpl(int64_t val, char unit, int64_t* result) { static int32_t getTimestampInUsFromStrImpl(int64_t val, char unit, int64_t* result) {
*result = val; *result = val;
int64_t factor = 1000L;
switch (unit) { switch (unit) {
case 's': case 's':
(*result) *= MILLISECOND_PER_SECOND; (*result) *= MILLISECOND_PER_SECOND*factor;
break; break;
case 'm': case 'm':
(*result) *= MILLISECOND_PER_MINUTE; (*result) *= MILLISECOND_PER_MINUTE*factor;
break; break;
case 'h': case 'h':
(*result) *= MILLISECOND_PER_HOUR; (*result) *= MILLISECOND_PER_HOUR*factor;
break; break;
case 'd': case 'd':
(*result) *= MILLISECOND_PER_DAY; (*result) *= MILLISECOND_PER_DAY*factor;
break; break;
case 'w': case 'w':
(*result) *= MILLISECOND_PER_WEEK; (*result) *= MILLISECOND_PER_WEEK*factor;
break; break;
case 'n': case 'n':
(*result) *= MILLISECOND_PER_MONTH; (*result) *= MILLISECOND_PER_MONTH*factor;
break; break;
case 'y': case 'y':
(*result) *= MILLISECOND_PER_YEAR; (*result) *= MILLISECOND_PER_YEAR*factor;
break; break;
case 'a': case 'a':
(*result) *= factor;
break;
case 'u':
break; break;
default: { default: {
; ;
@ -348,7 +353,6 @@ static int32_t getTimestampInUsFromStrImpl(int64_t val, char unit, int64_t* resu
} }
/* get the value in microsecond */ /* get the value in microsecond */
(*result) *= 1000L;
return 0; return 0;
} }

View File

@ -176,16 +176,28 @@ int32_t vnodeAlter(void *param, SMDCreateVnodeMsg *pVnodeCfg) {
pVnode->status = TAOS_VN_STATUS_UPDATING; pVnode->status = TAOS_VN_STATUS_UPDATING;
int32_t code = vnodeSaveCfg(pVnodeCfg); int32_t code = vnodeSaveCfg(pVnodeCfg);
if (code != TSDB_CODE_SUCCESS) return code; if (code != TSDB_CODE_SUCCESS) {
pVnode->status = TAOS_VN_STATUS_READY;
return code;
}
code = vnodeReadCfg(pVnode); code = vnodeReadCfg(pVnode);
if (code != TSDB_CODE_SUCCESS) return code; if (code != TSDB_CODE_SUCCESS) {
pVnode->status = TAOS_VN_STATUS_READY;
return code;
}
code = syncReconfig(pVnode->sync, &pVnode->syncCfg); code = syncReconfig(pVnode->sync, &pVnode->syncCfg);
if (code != TSDB_CODE_SUCCESS) return code; if (code != TSDB_CODE_SUCCESS) {
pVnode->status = TAOS_VN_STATUS_READY;
return code;
}
code = tsdbConfigRepo(pVnode->tsdb, &pVnode->tsdbCfg); code = tsdbConfigRepo(pVnode->tsdb, &pVnode->tsdbCfg);
if (code != TSDB_CODE_SUCCESS) return code; if (code != TSDB_CODE_SUCCESS) {
pVnode->status = TAOS_VN_STATUS_READY;
return code;
}
pVnode->status = TAOS_VN_STATUS_READY; pVnode->status = TAOS_VN_STATUS_READY;
vDebug("vgId:%d, vnode is altered", pVnode->vgId); vDebug("vgId:%d, vnode is altered", pVnode->vgId);
@ -508,7 +520,7 @@ static void vnodeCleanUp(SVnodeObj *pVnode) {
vTrace("vgId:%d, vnode will cleanup, refCount:%d", pVnode->vgId, pVnode->refCount); vTrace("vgId:%d, vnode will cleanup, refCount:%d", pVnode->vgId, pVnode->refCount);
// release local resources only after cutting off outside connections // release local resources only after cutting off outside connections
qSetQueryMgmtClosed(pVnode->qMgmt); qQueryMgmtNotifyClosed(pVnode->qMgmt);
vnodeRelease(pVnode); vnodeRelease(pVnode);
} }

View File

@ -82,6 +82,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
vWarn("QInfo:%p invalid qhandle, no matched query handle, conn:%p", (void*) killQueryMsg->qhandle, pReadMsg->rpcMsg.handle); vWarn("QInfo:%p invalid qhandle, no matched query handle, conn:%p", (void*) killQueryMsg->qhandle, pReadMsg->rpcMsg.handle);
} else { } else {
assert(*qhandle == (void*) killQueryMsg->qhandle); assert(*qhandle == (void*) killQueryMsg->qhandle);
qKillQuery(*qhandle);
qReleaseQInfo(pVnode->qMgmt, (void**) &qhandle, true); qReleaseQInfo(pVnode->qMgmt, (void**) &qhandle, true);
} }
@ -93,7 +94,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
if (contLen != 0) { if (contLen != 0) {
qinfo_t pQInfo = NULL; qinfo_t pQInfo = NULL;
code = qCreateQueryInfo(pVnode->tsdb, pVnode->vgId, pQueryTableMsg, pVnode, NULL, &pQInfo); code = qCreateQueryInfo(pVnode->tsdb, pVnode->vgId, pQueryTableMsg, pVnode, &pQInfo);
SQueryTableRsp *pRsp = (SQueryTableRsp *) rpcMallocCont(sizeof(SQueryTableRsp)); SQueryTableRsp *pRsp = (SQueryTableRsp *) rpcMallocCont(sizeof(SQueryTableRsp));
pRsp->code = code; pRsp->code = code;
@ -108,9 +109,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
handle = qRegisterQInfo(pVnode->qMgmt, (uint64_t) pQInfo); handle = qRegisterQInfo(pVnode->qMgmt, (uint64_t) pQInfo);
if (handle == NULL) { // failed to register qhandle if (handle == NULL) { // failed to register qhandle
pRsp->code = TSDB_CODE_QRY_INVALID_QHANDLE; pRsp->code = TSDB_CODE_QRY_INVALID_QHANDLE;
qDestroyQueryInfo(pQInfo); // destroy it directly
qKillQuery(pQInfo);
qKillQuery(pQInfo);
} else { } else {
assert(*handle == pQInfo); assert(*handle == pQInfo);
pRsp->qhandle = htobe64((uint64_t) pQInfo); pRsp->qhandle = htobe64((uint64_t) pQInfo);
@ -120,10 +119,6 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
if (handle != NULL && vnodeNotifyCurrentQhandle(pReadMsg->rpcMsg.handle, *handle, pVnode->vgId) != TSDB_CODE_SUCCESS) { if (handle != NULL && vnodeNotifyCurrentQhandle(pReadMsg->rpcMsg.handle, *handle, pVnode->vgId) != TSDB_CODE_SUCCESS) {
vError("vgId:%d, QInfo:%p, query discarded since link is broken, %p", pVnode->vgId, *handle, pReadMsg->rpcMsg.handle); vError("vgId:%d, QInfo:%p, query discarded since link is broken, %p", pVnode->vgId, *handle, pReadMsg->rpcMsg.handle);
pRsp->code = TSDB_CODE_RPC_NETWORK_UNAVAIL; pRsp->code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
// NOTE: there two refcount, needs to kill twice
// query has not been put into qhandle pool, kill it directly.
qKillQuery(*handle);
qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true); qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true);
return pRsp->code; return pRsp->code;
} }
@ -134,6 +129,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
dnodePutItemIntoReadQueue(pVnode, *handle); dnodePutItemIntoReadQueue(pVnode, *handle);
qReleaseQInfo(pVnode->qMgmt, (void**) &handle, false); qReleaseQInfo(pVnode->qMgmt, (void**) &handle, false);
} }
vDebug("vgId:%d, QInfo:%p, dnode query msg disposed", vgId, pQInfo); vDebug("vgId:%d, QInfo:%p, dnode query msg disposed", vgId, pQInfo);
} else { } else {
assert(pCont != NULL); assert(pCont != NULL);
@ -183,6 +179,7 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
if (pRetrieve->free == 1) { if (pRetrieve->free == 1) {
vDebug("vgId:%d, QInfo:%p, retrieve msg received to kill query and free qhandle", pVnode->vgId, *handle); vDebug("vgId:%d, QInfo:%p, retrieve msg received to kill query and free qhandle", pVnode->vgId, *handle);
qKillQuery(*handle);
qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true); qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true);
pRet->rsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp)); pRet->rsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp));
@ -209,6 +206,9 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
dnodePutItemIntoReadQueue(pVnode, *handle); dnodePutItemIntoReadQueue(pVnode, *handle);
pRet->qhandle = *handle; pRet->qhandle = *handle;
freeHandle = false; freeHandle = false;
} else {
qKillQuery(*handle);
freeHandle = true;
} }
} }
} }

File diff suppressed because it is too large Load Diff

View File

@ -31,11 +31,22 @@ then
exit -1 exit -1
fi fi
CURR_DIR=`pwd`
IN_TDINTERNAL="community"
if [[ "$CURR_DIR" == *"$IN_TDINTERNAL"* ]]; then
TAOS_DIR=$CURR_DIR/../../..
else
TAOS_DIR=$CURR_DIR/../..
fi
TAOSD_DIR=`find $TAOS_DIR -name "taosd"|grep bin|head -n1`
LIB_DIR=`echo $TAOSD_DIR|rev|cut -d '/' -f 3,4,5,6|rev`/lib
# First we need to set up a path for Python to find our own TAOS modules, so that "import" can work. # First we need to set up a path for Python to find our own TAOS modules, so that "import" can work.
export PYTHONPATH=$(pwd)/../../src/connector/python/linux/python3 export PYTHONPATH=$(pwd)/../../src/connector/python/linux/python3
# Then let us set up the library path so that our compiled SO file can be loaded by Python # Then let us set up the library path so that our compiled SO file can be loaded by Python
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$(pwd)/../../build/build/lib export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$LIB_DIR
# Now we are all let, and let's see if we can find a crash. Note we pass all params # Now we are all let, and let's see if we can find a crash. Note we pass all params
python3 ./crash_gen.py $@ python3 ./crash_gen.py $@

View File

@ -156,3 +156,7 @@ python3 ./test.py -f alter/alter_table_crash.py
# client # client
python3 ./test.py -f client/client.py python3 ./test.py -f client/client.py
# Misc
python3 testCompress.py
python3 testNoCompress.py

View File

@ -150,3 +150,7 @@ python3 ./test.py -f alter/alter_table_crash.py
# client # client
python3 ./test.py -f client/client.py python3 ./test.py -f client/client.py
# Misc
python3 testCompress.py
python3 testNoCompress.py

View File

@ -37,17 +37,8 @@ class TDTestCase:
except Exception as e: except Exception as e:
tdLog.exit(e) tdLog.exit(e)
try: tdSql.error("select * from db.st")
tdSql.execute("select * from db.st") tdSql.error("select * from db.tb")
except Exception as e:
if e.args[0] != 'mnode invalid table name':
tdLog.exit(e)
try:
tdSql.execute("select * from db.tb")
except Exception as e:
if e.args[0] != 'mnode invalid table name':
tdLog.exit(e)
def stop(self): def stop(self):
tdSql.close() tdSql.close()

View File

@ -0,0 +1,500 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
import os.path
import subprocess
from util.log import *
class TDSimClient:
def __init__(self):
self.testCluster = False
self.cfgDict = {
"numOfLogLines": "100000000",
"numOfThreadsPerCore": "2.0",
"locale": "en_US.UTF-8",
"charset": "UTF-8",
"asyncLog": "0",
"anyIp": "0",
"sdbDebugFlag": "135",
"rpcDebugFlag": "135",
"tmrDebugFlag": "131",
"cDebugFlag": "135",
"udebugFlag": "135",
"jnidebugFlag": "135",
"qdebugFlag": "135",
}
def init(self, path):
self.__init__()
self.path = path
def getLogDir(self):
self.logDir = "%s/sim/psim/log" % (self.path)
return self.logDir
def getCfgDir(self):
self.cfgDir = "%s/sim/psim/cfg" % (self.path)
return self.cfgDir
def setTestCluster(self, value):
self.testCluster = value
def addExtraCfg(self, option, value):
self.cfgDict.update({option: value})
def cfg(self, option, value):
cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath)
if os.system(cmd) != 0:
tdLog.exit(cmd)
def deploy(self):
self.logDir = "%s/sim/psim/log" % (self.path)
self.cfgDir = "%s/sim/psim/cfg" % (self.path)
self.cfgPath = "%s/sim/psim/cfg/taos.cfg" % (self.path)
cmd = "rm -rf " + self.logDir
if os.system(cmd) != 0:
tdLog.exit(cmd)
cmd = "mkdir -p " + self.logDir
if os.system(cmd) != 0:
tdLog.exit(cmd)
cmd = "rm -rf " + self.cfgDir
if os.system(cmd) != 0:
tdLog.exit(cmd)
cmd = "mkdir -p " + self.cfgDir
if os.system(cmd) != 0:
tdLog.exit(cmd)
cmd = "touch " + self.cfgPath
if os.system(cmd) != 0:
tdLog.exit(cmd)
if self.testCluster:
self.cfg("masterIp", "192.168.0.1")
self.cfg("secondIp", "192.168.0.2")
self.cfg("logDir", self.logDir)
for key, value in self.cfgDict.items():
self.cfg(key, value)
tdLog.debug("psim is deployed and configured by %s" % (self.cfgPath))
class TDDnode:
def __init__(self, index):
self.index = index
self.running = 0
self.deployed = 0
self.testCluster = False
self.valgrind = 0
def init(self, path):
self.path = path
def setTestCluster(self, value):
self.testCluster = value
def setValgrind(self, value):
self.valgrind = value
def getDataSize(self):
totalSize = 0
if (self.deployed == 1):
for dirpath, dirnames, filenames in os.walk(self.dataDir):
for f in filenames:
fp = os.path.join(dirpath, f)
if not os.path.islink(fp):
totalSize = totalSize + os.path.getsize(fp)
return totalSize
def deploy(self):
self.logDir = "%s/sim/dnode%d/log" % (self.path, self.index)
self.dataDir = "%s/sim/dnode%d/data" % (self.path, self.index)
self.cfgDir = "%s/sim/dnode%d/cfg" % (self.path, self.index)
self.cfgPath = "%s/sim/dnode%d/cfg/taos.cfg" % (
self.path, self.index)
cmd = "rm -rf " + self.dataDir
if os.system(cmd) != 0:
tdLog.exit(cmd)
cmd = "rm -rf " + self.logDir
if os.system(cmd) != 0:
tdLog.exit(cmd)
cmd = "rm -rf " + self.cfgDir
if os.system(cmd) != 0:
tdLog.exit(cmd)
cmd = "mkdir -p " + self.dataDir
if os.system(cmd) != 0:
tdLog.exit(cmd)
cmd = "mkdir -p " + self.logDir
if os.system(cmd) != 0:
tdLog.exit(cmd)
cmd = "mkdir -p " + self.cfgDir
if os.system(cmd) != 0:
tdLog.exit(cmd)
cmd = "touch " + self.cfgPath
if os.system(cmd) != 0:
tdLog.exit(cmd)
if self.testCluster:
self.startIP()
if self.testCluster:
self.cfg("masterIp", "192.168.0.1")
self.cfg("secondIp", "192.168.0.2")
self.cfg("publicIp", "192.168.0.%d" % (self.index))
self.cfg("internalIp", "192.168.0.%d" % (self.index))
self.cfg("privateIp", "192.168.0.%d" % (self.index))
self.cfg("dataDir", self.dataDir)
self.cfg("logDir", self.logDir)
self.cfg("numOfLogLines", "100000000")
self.cfg("mnodeEqualVnodeNum", "0")
self.cfg("walLevel", "1")
self.cfg("statusInterval", "1")
self.cfg("numOfTotalVnodes", "64")
self.cfg("numOfMnodes", "3")
self.cfg("numOfThreadsPerCore", "2.0")
self.cfg("monitor", "0")
self.cfg("maxVnodeConnections", "30000")
self.cfg("maxMgmtConnections", "30000")
self.cfg("maxMeterConnections", "30000")
self.cfg("maxShellConns", "30000")
self.cfg("locale", "en_US.UTF-8")
self.cfg("charset", "UTF-8")
self.cfg("asyncLog", "0")
self.cfg("anyIp", "0")
self.cfg("dDebugFlag", "135")
self.cfg("mDebugFlag", "135")
self.cfg("sdbDebugFlag", "135")
self.cfg("rpcDebugFlag", "135")
self.cfg("tmrDebugFlag", "131")
self.cfg("cDebugFlag", "135")
self.cfg("httpDebugFlag", "135")
self.cfg("monitorDebugFlag", "135")
self.cfg("udebugFlag", "135")
self.cfg("jnidebugFlag", "135")
self.cfg("qdebugFlag", "135")
self.deployed = 1
tdLog.debug(
"dnode:%d is deployed and configured by %s" %
(self.index, self.cfgPath))
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root)-len("/build/bin")]
break
return buildPath
def start(self):
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath + "/build/bin/taosd"
if self.deployed == 0:
tdLog.exit("dnode:%d is not deployed" % (self.index))
if self.valgrind == 0:
cmd = "nohup %s -c %s > /dev/null 2>&1 & " % (
binPath, self.cfgDir)
else:
valgrindCmdline = "valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"
cmd = "nohup %s %s -c %s --random-file-fail-factor 5 2>&1 & " % (
valgrindCmdline, binPath, self.cfgDir)
print(cmd)
if os.system(cmd) != 0:
tdLog.exit(cmd)
self.running = 1
tdLog.debug("dnode:%d is running with %s " % (self.index, cmd))
tdLog.debug("wait 5 seconds for the dnode:%d to start." % (self.index))
time.sleep(5)
def stop(self):
if self.valgrind == 0:
toBeKilled = "taosd"
else:
toBeKilled = "valgrind.bin"
if self.running != 0:
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
while(processID):
killCmd = "kill -INT %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
for port in range(6030, 6041):
fuserCmd = "fuser -k -n tcp %d" % port
os.system(fuserCmd)
if self.valgrind:
time.sleep(2)
self.running = 0
tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index))
def forcestop(self):
if self.valgrind == 0:
toBeKilled = "taosd"
else:
toBeKilled = "valgrind.bin"
if self.running != 0:
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
while(processID):
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
for port in range(6030, 6041):
fuserCmd = "fuser -k -n tcp %d" % port
os.system(fuserCmd)
if self.valgrind:
time.sleep(2)
self.running = 0
tdLog.debug("dnode:%d is stopped by kill -KILL" % (self.index))
def startIP(self):
cmd = "sudo ifconfig lo:%d 192.168.0.%d up" % (self.index, self.index)
if os.system(cmd) != 0:
tdLog.exit(cmd)
def stopIP(self):
cmd = "sudo ifconfig lo:%d 192.168.0.%d down" % (
self.index, self.index)
if os.system(cmd) != 0:
tdLog.exit(cmd)
def cfg(self, option, value):
cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath)
if os.system(cmd) != 0:
tdLog.exit(cmd)
def getDnodeRootDir(self, index):
dnodeRootDir = "%s/sim/psim/dnode%d" % (self.path, index)
return dnodeRootDir
def getDnodesRootDir(self):
dnodesRootDir = "%s/sim/psim" % (self.path)
return dnodesRootDir
class TDDnodes:
def __init__(self):
self.dnodes = []
self.dnodes.append(TDDnode(1))
self.dnodes.append(TDDnode(2))
self.dnodes.append(TDDnode(3))
self.dnodes.append(TDDnode(4))
self.dnodes.append(TDDnode(5))
self.dnodes.append(TDDnode(6))
self.dnodes.append(TDDnode(7))
self.dnodes.append(TDDnode(8))
self.dnodes.append(TDDnode(9))
self.dnodes.append(TDDnode(10))
self.simDeployed = False
def init(self, path):
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
while(processID):
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
while(processID):
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
binPath = os.path.dirname(os.path.realpath(__file__))
binPath = binPath + "/../../../debug/"
tdLog.debug("binPath %s" % (binPath))
binPath = os.path.realpath(binPath)
tdLog.debug("binPath real path %s" % (binPath))
# cmd = "sudo cp %s/build/lib/libtaos.so /usr/local/lib/taos/" % (binPath)
# tdLog.debug(cmd)
# os.system(cmd)
# cmd = "sudo cp %s/build/bin/taos /usr/local/bin/taos/" % (binPath)
# if os.system(cmd) != 0 :
# tdLog.exit(cmd)
# tdLog.debug("execute %s" % (cmd))
# cmd = "sudo cp %s/build/bin/taosd /usr/local/bin/taos/" % (binPath)
# if os.system(cmd) != 0 :
# tdLog.exit(cmd)
# tdLog.debug("execute %s" % (cmd))
if path == "":
# self.path = os.path.expanduser('~')
self.path = os.path.abspath(binPath + "../../")
else:
self.path = os.path.realpath(path)
for i in range(len(self.dnodes)):
self.dnodes[i].init(self.path)
self.sim = TDSimClient()
self.sim.init(self.path)
def setTestCluster(self, value):
self.testCluster = value
def setValgrind(self, value):
self.valgrind = value
def deploy(self, index):
self.sim.setTestCluster(self.testCluster)
if (self.simDeployed == False):
self.sim.deploy()
self.simDeployed = True
self.check(index)
self.dnodes[index - 1].setTestCluster(self.testCluster)
self.dnodes[index - 1].setValgrind(self.valgrind)
self.dnodes[index - 1].deploy()
def cfg(self, index, option, value):
self.check(index)
self.dnodes[index - 1].cfg(option, value)
def start(self, index):
self.check(index)
self.dnodes[index - 1].start()
def stop(self, index):
self.check(index)
self.dnodes[index - 1].stop()
def getDataSize(self, index):
self.check(index)
return self.dnodes[index - 1].getDataSize()
def forcestop(self, index):
self.check(index)
self.dnodes[index - 1].forcestop()
def startIP(self, index):
self.check(index)
if self.testCluster:
self.dnodes[index - 1].startIP()
def stopIP(self, index):
self.check(index)
if self.dnodes[index - 1].testCluster:
self.dnodes[index - 1].stopIP()
def check(self, index):
if index < 1 or index > 10:
tdLog.exit("index:%d should on a scale of [1, 10]" % (index))
def stopAll(self):
tdLog.info("stop all dnodes")
for i in range(len(self.dnodes)):
self.dnodes[i].stop()
psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
if processID:
cmd = "sudo systemctl stop taosd"
os.system(cmd)
# if os.system(cmd) != 0 :
# tdLog.exit(cmd)
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
while(processID):
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
while(processID):
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
# if os.system(cmd) != 0 :
# tdLog.exit(cmd)
def getDnodesRootDir(self):
dnodesRootDir = "%s/sim" % (self.path)
return dnodesRootDir
def getSimCfgPath(self):
return self.sim.getCfgDir()
def getSimLogPath(self):
return self.sim.getLogDir()
def addSimExtraCfg(self, option, value):
self.sim.addExtraCfg(option, value)
tdDnodes = TDDnodes()

View File

@ -28,8 +28,9 @@ class TDSimClient:
"locale": "en_US.UTF-8", "locale": "en_US.UTF-8",
"charset": "UTF-8", "charset": "UTF-8",
"asyncLog": "0", "asyncLog": "0",
"anyIp": "0", "maxTablesPerVnode": "4",
"sdbDebugFlag": "135", "maxVgroupsPerDb": "1000",
"sdbDebugFlag": "143",
"rpcDebugFlag": "135", "rpcDebugFlag": "135",
"tmrDebugFlag": "131", "tmrDebugFlag": "131",
"cDebugFlag": "135", "cDebugFlag": "135",
@ -37,7 +38,6 @@ class TDSimClient:
"jnidebugFlag": "135", "jnidebugFlag": "135",
"qdebugFlag": "135", "qdebugFlag": "135",
} }
def init(self, path): def init(self, path):
self.__init__() self.__init__()
self.path = path self.path = path

View File

@ -2,7 +2,7 @@ system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c wallevel -v 0 system sh/cfg.sh -n dnode1 -c wallevel -v 0
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4 system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 1000 system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 1000
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start
@ -10,7 +10,7 @@ sleep 3000
sql connect sql connect
print ============================ dnode1 start print ============================ dnode1 start
sql create database db maxTables 500 cache 2 blocks 4 days 10 keep 20 minRows 300 maxRows 400 ctime 120 precision 'ms' comp 2 wal 1 replica 1 sql create database db cache 2 blocks 4 days 10 keep 20 minRows 300 maxRows 400 ctime 120 precision 'ms' comp 2 wal 1 replica 1
sql show databases sql show databases
print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09 print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09
if $data00 != db then if $data00 != db then
@ -31,13 +31,10 @@ endi
if $data06 != 20,20,20 then if $data06 != 20,20,20 then
return -1 return -1
endi endi
if $data07 != 500 then if $data07 != 2 then
return -1 return -1
endi endi
if $data08 != 2 then if $data08 != 4 then
return -1
endi
if $data09 != 4 then
return -1 return -1
endi endi
@ -46,7 +43,6 @@ system sh/exec.sh -n dnode1 -s stop -x SIGINT
return return
sql_error alter database db cache 256 sql_error alter database db cache 256
sql_error alter database db blocks 1 sql_error alter database db blocks 1
sql_error alter database db maxTables 10
sql_error alter database db days 10 sql_error alter database db days 10
sql_error alter database db keep 10 sql_error alter database db keep 10
sql_error alter database db minRows 350 sql_error alter database db minRows 350
@ -59,7 +55,6 @@ sql_error alter database db replica 2
print ============== step3 print ============== step3
sql alter database db maxTables 1000
sql alter database db comp 1 sql alter database db comp 1
sql alter database db blocks 40 sql alter database db blocks 40
sql alter database db keep 30 sql alter database db keep 30

View File

@ -1,11 +1,13 @@
system sh/stop_dnodes.sh system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c wallevel -v 2 system sh/cfg.sh -n dnode1 -c wallevel -v 2
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 2 system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 4
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 5
system sh/deploy.sh -n dnode2 -i 2 system sh/deploy.sh -n dnode2 -i 2
system sh/cfg.sh -n dnode2 -c wallevel -v 2 system sh/cfg.sh -n dnode2 -c wallevel -v 2
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 2 system sh/cfg.sh -n dnode2 -c maxVgroupsPerDb -v 4
system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 5
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode2 -s start system sh/exec.sh -n dnode2 -s start
@ -17,7 +19,7 @@ sleep 1000
print ============================ step1 print ============================ step1
sql create database db maxTables 5 sql create database db
sql create table db.st (ts timestamp, i int) tags(t int) sql create table db.st (ts timestamp, i int) tags(t int)
sql create table db.t000 using db.st tags(0) sql create table db.t000 using db.st tags(0)
sql create table db.t001 using db.st tags(1) sql create table db.t001 using db.st tags(1)
@ -74,9 +76,14 @@ if $rows != 20 then
endi endi
print ============================ step3 print ============================ step3
system sh/exec.sh -n dnode1 -s stop -x SIGINT
sql alter database db maxTables 10 system sh/exec.sh -n dnode2 -s stop -x SIGINT
sleep 1000 system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 10
system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 10
sleep 5000
system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode2 -s start
sleep 5000
sql create table db.t100 using db.st tags(0) sql create table db.t100 using db.st tags(0)
sql create table db.t101 using db.st tags(1) sql create table db.t101 using db.st tags(1)
@ -133,9 +140,14 @@ if $rows != 40 then
endi endi
print ============================ step5 print ============================ step5
system sh/exec.sh -n dnode1 -s stop -x SIGINT
sql alter database db maxTables 15 system sh/exec.sh -n dnode2 -s stop -x SIGINT
sleep 1000 system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 15
system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 15
sleep 5000
system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode2 -s start
sleep 5000
sql create table db.t200 using db.st tags(0) sql create table db.t200 using db.st tags(0)
sql create table db.t201 using db.st tags(1) sql create table db.t201 using db.st tags(1)
@ -252,9 +264,14 @@ if $rows != 60 then
endi endi
print ============================ step9 print ============================ step9
system sh/exec.sh -n dnode1 -s stop -x SIGINT
sql alter database db maxTables 20 system sh/exec.sh -n dnode2 -s stop -x SIGINT
sleep 1000 system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 20
system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 20
sleep 5000
system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode2 -s start
sleep 5000
sql create table db.t300 using db.st tags(0) sql create table db.t300 using db.st tags(0)
sql create table db.t301 using db.st tags(1) sql create table db.t301 using db.st tags(1)
@ -380,9 +397,14 @@ if $rows != 80 then
endi endi
print ============================ step9 print ============================ step9
system sh/exec.sh -n dnode1 -s stop -x SIGINT
sql alter database db maxTables 25 system sh/exec.sh -n dnode2 -s stop -x SIGINT
sleep 1000 system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 25
system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 25
sleep 5000
system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode2 -s start
sleep 5000
sql create table db.t400 using db.st tags(0) sql create table db.t400 using db.st tags(0)
sql create table db.t401 using db.st tags(1) sql create table db.t401 using db.st tags(1)

View File

@ -1,8 +1,8 @@
system sh/stop_dnodes.sh system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c wallevel -v 0 system sh/cfg.sh -n dnode1 -c wallevel -v 0
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 1 system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 1000 system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 10
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start
@ -11,7 +11,7 @@ sql connect
print ============================ step1 print ============================ step1
sql create database db maxTables 10 sql create database db
sql create table db.st (ts timestamp, i int) tags(t int) sql create table db.st (ts timestamp, i int) tags(t int)
sql create table db.t0 using db.st tags(0) sql create table db.t0 using db.st tags(0)
sql create table db.t1 using db.st tags(1) sql create table db.t1 using db.st tags(1)
@ -49,8 +49,11 @@ endi
print ============================ step3 print ============================ step3
sql alter database db maxTables 20 system sh/exec.sh -n dnode1 -s stop -x SIGINT
sleep 1000 system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 20
sleep 5000
system sh/exec.sh -n dnode1 -s start
sleep 5000
sql create table db.t10 using db.st tags(0) sql create table db.t10 using db.st tags(0)
sql create table db.t11 using db.st tags(1) sql create table db.t11 using db.st tags(1)
@ -86,9 +89,11 @@ if $rows != 20 then
endi endi
print ============================ step5 print ============================ step5
system sh/exec.sh -n dnode1 -s stop -x SIGINT
sql alter database db maxTables 30 system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 30
sleep 1000 sleep 5000
system sh/exec.sh -n dnode1 -s start
sleep 5000
sql create table db.t20 using db.st tags(0) sql create table db.t20 using db.st tags(0)
sql create table db.t21 using db.st tags(1) sql create table db.t21 using db.st tags(1)
@ -183,9 +188,11 @@ if $rows != 30 then
endi endi
print ============================ step9 print ============================ step9
system sh/exec.sh -n dnode1 -s stop -x SIGINT
sql alter database db maxTables 40 system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 40
sleep 1000 sleep 5000
system sh/exec.sh -n dnode1 -s start
sleep 5000
sql create table db.t30 using db.st tags(0) sql create table db.t30 using db.st tags(0)
sql create table db.t31 using db.st tags(1) sql create table db.t31 using db.st tags(1)
@ -285,9 +292,11 @@ if $rows != 40 then
endi endi
print ============================ step12 print ============================ step12
system sh/exec.sh -n dnode1 -s stop -x SIGINT
sql alter database db maxTables 50 system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 50
sleep 1000 sleep 5000
system sh/exec.sh -n dnode1 -s start
sleep 5000
sql create table db.t40 using db.st tags(0) sql create table db.t40 using db.st tags(0)
sql create table db.t41 using db.st tags(1) sql create table db.t41 using db.st tags(1)

View File

@ -1,8 +1,8 @@
system sh/stop_dnodes.sh system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c wallevel -v 0 system sh/cfg.sh -n dnode1 -c wallevel -v 0
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4 system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 4
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 1000 system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 5
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start
@ -11,7 +11,7 @@ sql connect
print ============================ step1 print ============================ step1
sql create database db maxTables 5 sql create database db
sql create table db.st (ts timestamp, i int) tags(t int) sql create table db.st (ts timestamp, i int) tags(t int)
sql create table db.t000 using db.st tags(0) sql create table db.t000 using db.st tags(0)
sql create table db.t001 using db.st tags(1) sql create table db.t001 using db.st tags(1)
@ -68,9 +68,11 @@ if $rows != 20 then
endi endi
print ============================ step3 print ============================ step3
system sh/exec.sh -n dnode1 -s stop -x SIGINT
sql alter database db maxTables 10 system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 10
sleep 1000 sleep 5000
system sh/exec.sh -n dnode1 -s start
sleep 5000
sql create table db.t100 using db.st tags(0) sql create table db.t100 using db.st tags(0)
sql create table db.t101 using db.st tags(1) sql create table db.t101 using db.st tags(1)
@ -127,9 +129,11 @@ if $rows != 40 then
endi endi
print ============================ step5 print ============================ step5
system sh/exec.sh -n dnode1 -s stop -x SIGINT
sql alter database db maxTables 15 system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 15
sleep 1000 sleep 5000
system sh/exec.sh -n dnode1 -s start
sleep 5000
sql create table db.t200 using db.st tags(0) sql create table db.t200 using db.st tags(0)
sql create table db.t201 using db.st tags(1) sql create table db.t201 using db.st tags(1)
@ -244,9 +248,11 @@ if $rows != 60 then
endi endi
print ============================ step9 print ============================ step9
system sh/exec.sh -n dnode1 -s stop -x SIGINT
sql alter database db maxTables 20 system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 20
sleep 1000 sleep 5000
system sh/exec.sh -n dnode1 -s start
sleep 5000
sql create table db.t300 using db.st tags(0) sql create table db.t300 using db.st tags(0)
sql create table db.t301 using db.st tags(1) sql create table db.t301 using db.st tags(1)
@ -370,10 +376,11 @@ if $rows != 80 then
endi endi
print ============================ step12 print ============================ step12
system sh/exec.sh -n dnode1 -s stop -x SIGINT
sql alter database db maxTables 25 system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 25
sleep 1000 sleep 5000
system sh/exec.sh -n dnode1 -s start
sleep 5000
sql create table db.t400 using db.st tags(0) sql create table db.t400 using db.st tags(0)
sql create table db.t401 using db.st tags(1) sql create table db.t401 using db.st tags(1)
sql create table db.t402 using db.st tags(2) sql create table db.t402 using db.st tags(2)

View File

@ -1,8 +1,8 @@
system sh/stop_dnodes.sh system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c wallevel -v 0 system sh/cfg.sh -n dnode1 -c wallevel -v 0
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 1 system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 1000 system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 20
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start
@ -11,7 +11,7 @@ sql connect
print ============================ step1 print ============================ step1
sql create database db maxTables 20 sql create database db
sql create table db.st (ts timestamp, i int) tags(t int) sql create table db.st (ts timestamp, i int) tags(t int)
sql create table db.t000 using db.st tags(0) sql create table db.t000 using db.st tags(0)
sql create table db.t001 using db.st tags(1) sql create table db.t001 using db.st tags(1)
@ -69,7 +69,7 @@ endi
print ============================ step3 print ============================ step3
system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 2 system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 2
sleep 5000 sleep 5000
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start
sleep 5000 sleep 5000
@ -131,7 +131,7 @@ endi
print ============================ step5 print ============================ step5
system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 3 system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 3
sleep 5000 sleep 5000
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start
sleep 5000 sleep 5000

View File

@ -1,8 +1,8 @@
system sh/stop_dnodes.sh system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c wallevel -v 0 system sh/cfg.sh -n dnode1 -c wallevel -v 0
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4 system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 4
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 1000 system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 1000
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start
@ -17,7 +17,7 @@ $db = $dbPrefix . $i
$tb = $tbPrefix . $i $tb = $tbPrefix . $i
print =============== step1 print =============== step1
sql create database $db replica 1 days 20 keep 2000 sql create database $db replica 1 days 20 keep 2000 cache 16
sql show databases sql show databases
print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07
if $data00 != $db then if $data00 != $db then
@ -35,7 +35,7 @@ endi
if $data05 != 20 then if $data05 != 20 then
return -1 return -1
endi endi
if $data07 != 1000 then if $data07 != 16 then
return -1 return -1
endi endi
@ -76,13 +76,6 @@ if $data05 != 15 then
return -1 return -1
endi endi
#if $data06 != 1500,15000,1500 then
# return -1
#endi
if $data07 != 1000 then
return -1
endi
print =============== step6 print =============== step6
sql use $db sql use $db
sql create table $tb (ts timestamp, speed int) sql create table $tb (ts timestamp, speed int)

View File

@ -2,6 +2,8 @@ system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c wallevel -v 2 system sh/cfg.sh -n dnode1 -c wallevel -v 2
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 10 system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 10
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 10
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 1000
print ========= start dnodes print ========= start dnodes
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start
@ -9,7 +11,7 @@ sleep 3000
sql connect sql connect
print ======== step1 print ======== step1
sql create database db blocks 2 maxtables 1000 sql create database db blocks 2
sql create table db.mt (ts timestamp, tbcol int) TAGS(tgcol int) sql create table db.mt (ts timestamp, tbcol int) TAGS(tgcol int)
$tbPrefix = db.t $tbPrefix = db.t
@ -21,7 +23,7 @@ while $i < 2000
endw endw
sql show db.vgroups sql show db.vgroups
if $rows != 2 then if $rows != 10 then
return -1 return -1
endi endi

View File

@ -2,15 +2,15 @@ system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c walLevel -v 0
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4 system sh/cfg.sh -n dnode2 -c maxVgroupsPerDb -v 4
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 2000 system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start
sleep 3000 sleep 3000
sql connect sql connect
print =============== step2 print =============== step2
sql create database db maxtables 4 sql create database db
sql show databases sql show databases
print $rows $data07 print $rows $data07
@ -18,10 +18,6 @@ if $rows != 1 then
return -1 return -1
endi endi
if $data07 != 4 then
return -1
endi
print =============== step3 print =============== step3
sql use db sql use db
sql create table t1 (ts timestamp, i int) sql create table t1 (ts timestamp, i int)
@ -78,7 +74,7 @@ sql reset query cache
sleep 4000 sleep 4000
print =============== step7 print =============== step7
sql create database db maxtables 4 sql create database db
sql show databases sql show databases
print $rows $data07 print $rows $data07
@ -86,10 +82,6 @@ if $rows != 1 then
return -1 return -1
endi endi
if $data07 != 4 then
return -1
endi
print =============== step8 print =============== step8
sql use db sql use db
sql create table t1 (ts timestamp, i int) sql create table t1 (ts timestamp, i int)

View File

@ -6,7 +6,8 @@ $totalRows = $totalVnodes * $maxTables
system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c walLevel -v 0
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v $maxTables system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v $maxTables
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v $totalVnodes
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v $totalVnodes system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v $totalVnodes
system sh/cfg.sh -n dnode1 -c maxVnodeConnections -v 100000 system sh/cfg.sh -n dnode1 -c maxVnodeConnections -v 100000
system sh/cfg.sh -n dnode1 -c maxMeterConnections -v 100000 system sh/cfg.sh -n dnode1 -c maxMeterConnections -v 100000
@ -17,7 +18,7 @@ print ========== prepare data
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start
sleep 3000 sleep 3000
sql connect sql connect
sql create database db blocks 2 cache 1 maxTables $maxTables sql create database db blocks 2 cache 1
sql use db sql use db
print ========== step1 print ========== step1

View File

@ -81,7 +81,7 @@ print =============== step2 - no db
#11 #11
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show databases' 127.0.0.1:6020/rest/sql system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show databases' 127.0.0.1:6020/rest/sql
print 11-> $system_content print 11-> $system_content
if $system_content != @{"status":"succ","head":["name","created_time","ntables","vgroups","replica","days","keep1,keep2,keep(D)","maxtables","cache(MB)","blocks","minrows","maxrows","ctime(Sec.)","wallevel","comp","precision","status"],"data":[],"rows":0}@ then if $system_content != @{"status":"succ","head":["name","created_time","ntables","vgroups","replica","days","keep1,keep2,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","comp","precision","status"],"data":[],"rows":0}@ then
return -1 return -1
endi endi

View File

@ -128,12 +128,12 @@ endi
if $data06 != 365,365,365 then if $data06 != 365,365,365 then
return -1 return -1
endi endi
print data08 = $data08 print data07 = $data07
if $data08 != $cache then if $data07 != $cache then
print expect $cache, actual:$data08 print expect $cache, actual:$data07
return -1 return -1
endi endi
if $data09 != 4 then if $data08 != 4 then
return -1 return -1
endi endi

View File

@ -652,25 +652,25 @@ endi
if $data01 != 1 then if $data01 != 1 then
return -1 return -1
endi endi
if $data11 != null then if $data11 != NULL then
return -1 return -1
endi endi
if $data21 != 1 then if $data21 != 1 then
return -1 return -1
endi endi
if $data31 != null then if $data31 != NULL then
return -1 return -1
endi endi
if $data41 != 1 then if $data41 != 1 then
return -1 return -1
endi endi
if $data51 != null then if $data51 != NULL then
return -1 return -1
endi endi
if $data61 != 1 then if $data61 != 1 then
return -1 return -1
endi endi
if $data71 != null then if $data71 != NULL then
return -1 return -1
endi endi
if $data81 != 1 then if $data81 != 1 then
@ -689,25 +689,25 @@ endi
if $data01 != 0.000000000 then if $data01 != 0.000000000 then
return -1 return -1
endi endi
if $data11 != null then if $data11 != NULL then
return -1 return -1
endi endi
if $data21 != 1.000000000 then if $data21 != 1.000000000 then
return -1 return -1
endi endi
if $data31 != null then if $data31 != NULL then
return -1 return -1
endi endi
if $data41 != 2.000000000 then if $data41 != 2.000000000 then
return -1 return -1
endi endi
if $data51 != null then if $data51 != NULL then
return -1 return -1
endi endi
if $data61 != 3.000000000 then if $data61 != 3.000000000 then
return -1 return -1
endi endi
if $data71 != null then if $data71 != NULL then
return -1 return -1
endi endi
if $data81 != 4.000000000 then if $data81 != 4.000000000 then
@ -722,25 +722,25 @@ endi
if $data01 != 0 then if $data01 != 0 then
return -1 return -1
endi endi
if $data11 != null then if $data11 != NULL then
return -1 return -1
endi endi
if $data21 != 1 then if $data21 != 1 then
return -1 return -1
endi endi
if $data31 != null then if $data31 != NULL then
return -1 return -1
endi endi
if $data41 != 2 then if $data41 != 2 then
return -1 return -1
endi endi
if $data51 != null then if $data51 != NULL then
return -1 return -1
endi endi
if $data61 != 3 then if $data61 != 3 then
return -1 return -1
endi endi
if $data71 != null then if $data71 != NULL then
return -1 return -1
endi endi
if $data81 != 4 then if $data81 != 4 then
@ -755,25 +755,25 @@ endi
if $data01 != 0 then if $data01 != 0 then
return -1 return -1
endi endi
if $data11 != null then if $data11 != NULL then
return -1 return -1
endi endi
if $data21 != 1 then if $data21 != 1 then
return -1 return -1
endi endi
if $data31 != null then if $data31 != NULL then
return -1 return -1
endi endi
if $data41 != 2 then if $data41 != 2 then
return -1 return -1
endi endi
if $data51 != null then if $data51 != NULL then
return -1 return -1
endi endi
if $data61 != 3 then if $data61 != 3 then
return -1 return -1
endi endi
if $data71 != null then if $data71 != NULL then
return -1 return -1
endi endi
if $data81 != 4 then if $data81 != 4 then
@ -788,25 +788,25 @@ endi
if $data01 != 0 then if $data01 != 0 then
return -1 return -1
endi endi
if $data11 != null then if $data11 != NULL then
return -1 return -1
endi endi
if $data21 != 1 then if $data21 != 1 then
return -1 return -1
endi endi
if $data31 != null then if $data31 != NULL then
return -1 return -1
endi endi
if $data41 != 2 then if $data41 != 2 then
return -1 return -1
endi endi
if $data51 != null then if $data51 != NULL then
return -1 return -1
endi endi
if $data61 != 3 then if $data61 != 3 then
return -1 return -1
endi endi
if $data71 != null then if $data71 != NULL then
return -1 return -1
endi endi
if $data81 != 4 then if $data81 != 4 then
@ -821,25 +821,25 @@ endi
if $data01 != 0 then if $data01 != 0 then
return -1 return -1
endi endi
if $data11 != null then if $data11 != NULL then
return -1 return -1
endi endi
if $data21 != 1 then if $data21 != 1 then
return -1 return -1
endi endi
if $data31 != null then if $data31 != NULL then
return -1 return -1
endi endi
if $data41 != 2 then if $data41 != 2 then
return -1 return -1
endi endi
if $data51 != null then if $data51 != NULL then
return -1 return -1
endi endi
if $data61 != 3 then if $data61 != 3 then
return -1 return -1
endi endi
if $data71 != null then if $data71 != NULL then
return -1 return -1
endi endi
if $data81 != 4 then if $data81 != 4 then

View File

@ -2,6 +2,7 @@ system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c walLevel -v 0
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start
sleep 3000 sleep 3000
sql connect sql connect

View File

@ -2,6 +2,7 @@ system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c walLevel -v 0
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start
sleep 3000 sleep 3000
sql connect sql connect

View File

@ -2,6 +2,7 @@ system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c walLevel -v 0
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start
sleep 3000 sleep 3000
sql connect sql connect

View File

@ -63,6 +63,11 @@ if $data41 != 9 then
endi endi
sql select * from $stb order by ts asc limit 5 sql select * from $stb order by ts asc limit 5
print select * from $stb order by ts asc limit 5
print $data00 $data01
print $data10 $data11
print $data20 $data21
if $rows != 5 then if $rows != 5 then
return -1 return -1
endi endi

View File

@ -356,6 +356,11 @@ if $rows != 0 then
return -1 return -1
endi endi
sql select top(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu order by ts desc limit 3 offset 1 sql select top(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu order by ts desc limit 3 offset 1
print select top(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu order by ts desc limit 3 offset 1
print $data00 $data01
print $data10 $data11
print $data20 $data21
if $rows != 3 then if $rows != 3 then
return -1 return -1
endi endi

View File

@ -2,6 +2,7 @@ system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c walLevel -v 0
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start
sleep 3000 sleep 3000
sql connect sql connect

View File

@ -4,7 +4,8 @@ system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c walLevel -v 0
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 8 system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 8
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 1000
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 3
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start
sleep 3000 sleep 3000

View File

@ -4,7 +4,8 @@ system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c walLevel -v 0
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 8 system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 8
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 1000
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 3
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start
sleep 3000 sleep 3000

View File

@ -3,6 +3,7 @@ system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c walLevel -v 0
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 8 system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 8
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 129 system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 129
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 8
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start
sleep 3000 sleep 3000
@ -36,7 +37,7 @@ while $x > $y
endw endw
sql show vgroups sql show vgroups
if $rows != 2 then if $rows != 8 then
return -1 return -1
endi endi
@ -51,7 +52,7 @@ while $x > $y
endw endw
sql show vgroups sql show vgroups
if $rows != 4 then if $rows != 8 then
return -1 return -1
endi endi
@ -66,7 +67,7 @@ while $x > $y
endw endw
sql show vgroups sql show vgroups
if $rows != 6 then if $rows != 8 then
return -1 return -1
endi endi

View File

@ -312,9 +312,9 @@ cd ../../../debug; make
#./test.sh -f general/parser/stream_on_sys.sim #./test.sh -f general/parser/stream_on_sys.sim
#./test.sh -f general/parser/repeatStream.sim #./test.sh -f general/parser/repeatStream.sim
./test.sh -f general/stream/metrics_del.sim #./test.sh -f general/stream/metrics_del.sim
./test.sh -f general/stream/metrics_n.sim ./test.sh -f general/stream/metrics_n.sim
./test.sh -f general/stream/metrics_replica1_vnoden.sim #./test.sh -f general/stream/metrics_replica1_vnoden.sim
./test.sh -f general/stream/restart_stream.sim ./test.sh -f general/stream/restart_stream.sim
./test.sh -f general/stream/stream_3.sim ./test.sh -f general/stream/stream_3.sim
./test.sh -f general/stream/stream_restart.sim ./test.sh -f general/stream/stream_restart.sim
@ -324,7 +324,7 @@ cd ../../../debug; make
./test.sh -f general/stream/table_replica1_vnoden.sim ./test.sh -f general/stream/table_replica1_vnoden.sim
./test.sh -f unique/arbitrator/check_cluster_cfg_para.sim ./test.sh -f unique/arbitrator/check_cluster_cfg_para.sim
./test.sh -f unique/arbitrator/dn2_mn1_cache_file_sync.sim #./test.sh -f unique/arbitrator/dn2_mn1_cache_file_sync.sim
./test.sh -f unique/arbitrator/dn3_mn1_full_createTableFail.sim ./test.sh -f unique/arbitrator/dn3_mn1_full_createTableFail.sim
./test.sh -f unique/arbitrator/dn3_mn1_full_dropDnodeFail.sim ./test.sh -f unique/arbitrator/dn3_mn1_full_dropDnodeFail.sim
./test.sh -f unique/arbitrator/dn3_mn1_multiCreateDropTable.sim ./test.sh -f unique/arbitrator/dn3_mn1_multiCreateDropTable.sim
@ -358,6 +358,7 @@ cd ../../../debug; make
./test.sh -f unique/arbitrator/replica_changeWithArbitrator.sim ./test.sh -f unique/arbitrator/replica_changeWithArbitrator.sim
./test.sh -f unique/arbitrator/sync_replica2_alterTable_add.sim ./test.sh -f unique/arbitrator/sync_replica2_alterTable_add.sim
./test.sh -f unique/arbitrator/sync_replica2_alterTable_drop.sim ./test.sh -f unique/arbitrator/sync_replica2_alterTable_drop.sim
./test.sh -f unique/arbitrator/sync_replica2_dropDb.sim ./test.sh -f unique/arbitrator/sync_replica2_dropDb.sim
./test.sh -f unique/arbitrator/sync_replica2_dropTable.sim ./test.sh -f unique/arbitrator/sync_replica2_dropTable.sim
./test.sh -f unique/arbitrator/sync_replica3_alterTable_add.sim ./test.sh -f unique/arbitrator/sync_replica3_alterTable_add.sim

View File

@ -138,6 +138,8 @@ echo "mnodeEqualVnodeNum 0" >> $TAOS_CFG
echo "clog 2" >> $TAOS_CFG echo "clog 2" >> $TAOS_CFG
echo "statusInterval 1" >> $TAOS_CFG echo "statusInterval 1" >> $TAOS_CFG
echo "numOfTotalVnodes 4" >> $TAOS_CFG echo "numOfTotalVnodes 4" >> $TAOS_CFG
echo "maxVgroupsPerDb 4" >> $TAOS_CFG
echo "maxTablesPerVnode 1000" >> $TAOS_CFG
echo "asyncLog 0" >> $TAOS_CFG echo "asyncLog 0" >> $TAOS_CFG
echo "numOfMnodes 1" >> $TAOS_CFG echo "numOfMnodes 1" >> $TAOS_CFG
echo "locale en_US.UTF-8" >> $TAOS_CFG echo "locale en_US.UTF-8" >> $TAOS_CFG

View File

@ -0,0 +1,113 @@
#!/bin/bash
# if [ $# != 4 || $# != 5 ]; then
# echo "argument list need input : "
# echo " -n nodeName"
# echo " -s start/stop"
# echo " -c clear"
# exit 1
# fi
NODE_NAME=
EXEC_OPTON=
CLEAR_OPTION="false"
while getopts "n:s:u:x:ct" arg
do
case $arg in
n)
NODE_NAME=$OPTARG
;;
s)
EXEC_OPTON=$OPTARG
;;
c)
CLEAR_OPTION="clear"
;;
t)
SHELL_OPTION="true"
;;
u)
USERS=$OPTARG
;;
x)
SIGNAL=$OPTARG
;;
?)
echo "unkown argument"
;;
esac
done
SCRIPT_DIR=`dirname $0`
cd $SCRIPT_DIR/../
SCRIPT_DIR=`pwd`
IN_TDINTERNAL="community"
if [[ "$SCRIPT_DIR" == *"$IN_TDINTERNAL"* ]]; then
cd ../../..
else
cd ../../
fi
TAOS_DIR=`pwd`
TAOSD_DIR=`find . -name "taosd"|grep bin|head -n1`
if [[ "$TAOSD_DIR" == *"$IN_TDINTERNAL"* ]]; then
BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' --fields=2,3`
else
BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' --fields=2`
fi
BUILD_DIR=$TAOS_DIR/$BIN_DIR/build
SIM_DIR=$TAOS_DIR/sim
NODE_DIR=$SIM_DIR/$NODE_NAME
EXE_DIR=$BUILD_DIR/bin
CFG_DIR=$NODE_DIR/cfg
LOG_DIR=$NODE_DIR/log
DATA_DIR=$NODE_DIR/data
MGMT_DIR=$NODE_DIR/data/mgmt
TSDB_DIR=$NODE_DIR/data/tsdb
TAOS_CFG=$NODE_DIR/cfg/taos.cfg
echo ------------ $EXEC_OPTON $NODE_NAME
TAOS_FLAG=$SIM_DIR/tsim/flag
if [ -f "$TAOS_FLAG" ]; then
EXE_DIR=/usr/local/bin/taos
fi
if [ "$CLEAR_OPTION" = "clear" ]; then
echo rm -rf $MGMT_DIR $TSDB_DIR
rm -rf $TSDB_DIR
rm -rf $MGMT_DIR
fi
if [ "$EXEC_OPTON" = "start" ]; then
echo "ExcuteCmd:" $EXE_DIR/taosd -c $CFG_DIR
if [ "$SHELL_OPTION" = "true" ]; then
nohup valgrind --log-file=${LOG_DIR}/valgrind.log --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes $EXE_DIR/taosd -c $CFG_DIR > /dev/null 2>&1 &
else
nohup $EXE_DIR/taosd -c $CFG_DIR --random-file-fail-factor 5 > /dev/null 2>&1 &
fi
else
#relative path
RCFG_DIR=sim/$NODE_NAME/cfg
PID=`ps -ef|grep taosd | grep $RCFG_DIR | grep -v grep | awk '{print $2}'`
while [ -n "$PID" ]
do
if [ "$SIGNAL" = "SIGINT" ]; then
echo try to kill by signal SIGINT
kill -SIGINT $PID
else
echo try to kill by signal SIGKILL
kill -9 $PID
fi
sleep 1
PID=`ps -ef|grep taosd | grep $RCFG_DIR | grep -v grep | awk '{print $2}'`
done
fi

View File

@ -25,6 +25,12 @@ system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 20
system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 20 system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 20
system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 20 system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 20
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 20
system sh/cfg.sh -n dnode2 -c maxVgroupsPerDb -v 20
system sh/cfg.sh -n dnode3 -c maxVgroupsPerDb -v 20
system sh/cfg.sh -n dnode4 -c maxVgroupsPerDb -v 20
system sh/cfg.sh -n dnode1 -c http -v 1 system sh/cfg.sh -n dnode1 -c http -v 1
system sh/cfg.sh -n dnode2 -c http -v 1 system sh/cfg.sh -n dnode2 -c http -v 1
system sh/cfg.sh -n dnode3 -c http -v 1 system sh/cfg.sh -n dnode3 -c http -v 1
system sh/cfg.sh -n dnode4 -c http -v 1

View File

@ -161,7 +161,7 @@ sleep 10000
$loopCnt = 0 $loopCnt = 0
wait_dnode_offline_overtime_dropped: wait_dnode_offline_overtime_dropped:
$loopCnt = $loopCnt + 1 $loopCnt = $loopCnt + 1
if $loopCnt == 10 then if $loopCnt == 20 then
return -1 return -1
endi endi
sql show dnodes sql show dnodes

View File

@ -29,10 +29,16 @@ system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
system sh/cfg.sh -n dnode3 -c alternativeRole -v 2 system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
system sh/cfg.sh -n dnode4 -c alternativeRole -v 2 system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 $totalTableNum = 10
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode2 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode3 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode4 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
@ -56,11 +62,10 @@ sql create dnode $hostname3
sql create dnode $hostname4 sql create dnode $hostname4
sleep 3000 sleep 3000
$totalTableNum = 10
$sleepTimer = 3000 $sleepTimer = 3000
$db = db $db = db
sql create database $db replica 3 maxTables $totalTableNum sql create database $db replica 3
sql use $db sql use $db
# create table , insert data # create table , insert data

View File

@ -29,10 +29,16 @@ system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
system sh/cfg.sh -n dnode3 -c alternativeRole -v 2 system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
system sh/cfg.sh -n dnode4 -c alternativeRole -v 2 system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 $totalTableNum = 10
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode2 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode3 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode4 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
@ -54,11 +60,10 @@ sql create dnode $hostname2
sql create dnode $hostname3 sql create dnode $hostname3
sleep 3000 sleep 3000
$totalTableNum = 10
$sleepTimer = 3000 $sleepTimer = 3000
$db = db $db = db
sql create database $db replica 2 maxTables $totalTableNum sql create database $db replica 2
sql use $db sql use $db
# create table , insert data # create table , insert data
@ -137,8 +142,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode3Vtatus = $data4_2 $dnode3Vtatus = $data6_2
$dnode2Vtatus = $data7_2 $dnode2Vtatus = $data9_2
if $dnode3Vtatus != offline then if $dnode3Vtatus != offline then
sleep 2000 sleep 2000
@ -204,8 +209,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode3Vtatus = $data4_2 $dnode3Vtatus = $data6_2
$dnode2Vtatus = $data7_2 $dnode2Vtatus = $data9_2
print dnode2Vtatus: $dnode3Vtatus print dnode2Vtatus: $dnode3Vtatus
print dnode3Vtatus: $dnode3Vtatus print dnode3Vtatus: $dnode3Vtatus
@ -319,8 +324,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode3Vtatus = $data4_2 $dnode3Vtatus = $data6_2
$dnode2Vtatus = $data7_2 $dnode2Vtatus = $data9_2
print dnode4Vtatus: $dnode4Vtatus print dnode4Vtatus: $dnode4Vtatus
print dnode3Vtatus: $dnode3Vtatus print dnode3Vtatus: $dnode3Vtatus

View File

@ -29,10 +29,16 @@ system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
system sh/cfg.sh -n dnode3 -c alternativeRole -v 2 system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
system sh/cfg.sh -n dnode4 -c alternativeRole -v 2 system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 $totalTableNum = 10
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode2 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode3 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode4 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
@ -56,11 +62,10 @@ sql create dnode $hostname3
sql create dnode $hostname4 sql create dnode $hostname4
sleep 3000 sleep 3000
$totalTableNum = 10
$sleepTimer = 3000 $sleepTimer = 3000
$db = db $db = db
sql create database $db replica 3 maxTables $totalTableNum sql create database $db replica 3
sql use $db sql use $db
# create table , insert data # create table , insert data
@ -139,8 +144,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data4_2 $dnode4Vtatus = $data6_2
$dnode3Vtatus = $data7_2 $dnode3Vtatus = $data9_2
if $dnode4Vtatus != offline then if $dnode4Vtatus != offline then
sleep 2000 sleep 2000
@ -206,8 +211,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data4_2 $dnode4Vtatus = $data6_2
$dnode3Vtatus = $data7_2 $dnode3Vtatus = $data9_2
print dnode4Vtatus: $dnode4Vtatus print dnode4Vtatus: $dnode4Vtatus
print dnode3Vtatus: $dnode3Vtatus print dnode3Vtatus: $dnode3Vtatus
@ -325,8 +330,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data4_2 $dnode4Vtatus = $data6_2
$dnode3Vtatus = $data7_2 $dnode3Vtatus = $data9_2
print dnode4Vtatus: $dnode4Vtatus print dnode4Vtatus: $dnode4Vtatus
print dnode3Vtatus: $dnode3Vtatus print dnode3Vtatus: $dnode3Vtatus
@ -386,8 +391,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data4_2 $dnode4Vtatus = $data6_2
$dnode3Vtatus = $data7_2 $dnode3Vtatus = $data9_2
print dnode4Vtatus: $dnode4Vtatus print dnode4Vtatus: $dnode4Vtatus
print dnode3Vtatus: $dnode3Vtatus print dnode3Vtatus: $dnode3Vtatus

View File

@ -23,6 +23,12 @@ system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
system sh/cfg.sh -n dnode4 -c balanceInterval -v 10 system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
system sh/cfg.sh -n dnode5 -c balanceInterval -v 10 system sh/cfg.sh -n dnode5 -c balanceInterval -v 10
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 16
system sh/cfg.sh -n dnode2 -c maxVgroupsPerDb -v 16
system sh/cfg.sh -n dnode3 -c maxVgroupsPerDb -v 16
system sh/cfg.sh -n dnode4 -c maxVgroupsPerDb -v 16
system sh/cfg.sh -n dnode5 -c maxVgroupsPerDb -v 16
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4 system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4 system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4 system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
@ -215,6 +221,7 @@ system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode3 -c offlineThreshold -v 10 system sh/cfg.sh -n dnode3 -c offlineThreshold -v 10
system sh/cfg.sh -n dnode3 -c enableCoreFile -v 1 system sh/cfg.sh -n dnode3 -c enableCoreFile -v 1
system sh/cfg.sh -n dnode3 -c maxVgroupsPerDb -v 16
system sh/exec.sh -n dnode3 -s start system sh/exec.sh -n dnode3 -s start
sql create dnode $hostname3 sql create dnode $hostname3
@ -322,7 +329,7 @@ $totalRows = 0
$tsStart = 1420041600000 $tsStart = 1420041600000
$db = db1 $db = db1
sql create database $db replica 2 maxTables 4 sql create database $db replica 2
sql use $db sql use $db
$stb = stb $stb = stb
sql create table $stb (ts timestamp, c1 int) tags(t1 int) sql create table $stb (ts timestamp, c1 int) tags(t1 int)

View File

@ -29,10 +29,16 @@ system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
system sh/cfg.sh -n dnode3 -c alternativeRole -v 2 system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
system sh/cfg.sh -n dnode4 -c alternativeRole -v 2 system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 $totalTableNum = 10
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode2 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode3 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode4 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
@ -52,11 +58,10 @@ system sh/exec.sh -n dnode2 -s start
sql create dnode $hostname2 sql create dnode $hostname2
sleep 3000 sleep 3000
$totalTableNum = 10
$sleepTimer = 10000 $sleepTimer = 10000
$db = db $db = db
sql create database $db replica 1 maxTables $totalTableNum sql create database $db replica 1
sql use $db sql use $db
# create table , insert data # create table , insert data
@ -352,16 +357,16 @@ if $loopCnt == 10 then
endi endi
sql show vgroups sql show vgroups
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 $data10_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 $data10_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 $data10_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 $data5_4 $data6_4 $data7_4 $data8_4 print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 $data5_4 $data6_4 $data7_4 $data8_4 $data9_4 $data10_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5 #print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6 #print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
$thirdDnode_2 = $data8_1 $thirdDnode_2 = $data10_1
$thirdDnode_3 = $data8_2 $thirdDnode_3 = $data10_2
$thirdDnode_4 = $data8_3 $thirdDnode_4 = $data10_3
$thirdDnode_5 = $data8_4 $thirdDnode_5 = $data10_4
if $thirdDnode_2 != null then if $thirdDnode_2 != null then
sleep 2000 sleep 2000
@ -405,10 +410,10 @@ print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $dat
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 $data5_4 $data6_4 $data7_4 $data8_4 print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 $data5_4 $data6_4 $data7_4 $data8_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5 #print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6 #print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
$sencodDnode_2 = $data5_1 $sencodDnode_2 = $data7_1
$sencodDnode_3 = $data5_2 $sencodDnode_3 = $data7_2
$sencodDnode_4 = $data5_3 $sencodDnode_4 = $data7_3
$sencodDnode_5 = $data5_4 $sencodDnode_5 = $data7_4
if $sencodDnode_2 != null then if $sencodDnode_2 != null then
sleep 2000 sleep 2000

View File

@ -29,10 +29,16 @@ system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
system sh/cfg.sh -n dnode3 -c alternativeRole -v 2 system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
system sh/cfg.sh -n dnode4 -c alternativeRole -v 2 system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 $totalTableNum = 10
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v $totalTableNum
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode2 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode3 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode4 -c maxVgroupsPerDb -v 1
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
@ -60,7 +66,7 @@ $totalTableNum = 10
$sleepTimer = 10000 $sleepTimer = 10000
$db = db $db = db
sql create database $db replica 2 maxTables $totalTableNum sql create database $db replica 2
sql use $db sql use $db
# create table , insert data # create table , insert data

Some files were not shown because too many files have changed in this diff Show More