Merge branch 'develop' into feature/query
This commit is contained in:
commit
233ecbe9cd
16
.travis.yml
16
.travis.yml
|
@ -13,7 +13,7 @@ branches:
|
|||
|
||||
matrix:
|
||||
- os: linux
|
||||
dist: bionic
|
||||
dist: focal
|
||||
language: c
|
||||
|
||||
git:
|
||||
|
@ -28,8 +28,6 @@ matrix:
|
|||
- build-essential
|
||||
- cmake
|
||||
- net-tools
|
||||
- python-pip
|
||||
- python-setuptools
|
||||
- python3-pip
|
||||
- python3-setuptools
|
||||
- valgrind
|
||||
|
@ -54,13 +52,19 @@ matrix:
|
|||
cd ${TRAVIS_BUILD_DIR}/debug
|
||||
make install > /dev/null || travis_terminate $?
|
||||
|
||||
pip install numpy
|
||||
pip install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python2/
|
||||
pip3 install numpy
|
||||
pip3 install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python3/
|
||||
|
||||
cd ${TRAVIS_BUILD_DIR}/tests
|
||||
./test-all.sh smoke || travis_terminate $?
|
||||
sleep 1
|
||||
|
||||
cd ${TRAVIS_BUILD_DIR}/tests/pytest
|
||||
pkill -TERM -x taosd
|
||||
fuser -k -n tcp 6030
|
||||
sleep 1
|
||||
./crash_gen.sh -a -p -t 4 -s 25|| travis_terminate $?
|
||||
sleep 1
|
||||
|
||||
cd ${TRAVIS_BUILD_DIR}/tests/pytest
|
||||
./valgrind-test.sh 2>&1 > mem-error-out.log
|
||||
|
@ -160,7 +164,7 @@ matrix:
|
|||
|
||||
script:
|
||||
- cmake .. > /dev/null
|
||||
- make > /dev/null
|
||||
- make
|
||||
|
||||
- os: linux
|
||||
dist: bionic
|
||||
|
|
|
@ -320,6 +320,8 @@ typedef struct SSqlStream {
|
|||
SSqlObj *pSql;
|
||||
uint32_t streamId;
|
||||
char listed;
|
||||
bool isProject;
|
||||
int16_t precision;
|
||||
int64_t num; // number of computing count
|
||||
|
||||
/*
|
||||
|
@ -334,7 +336,6 @@ typedef struct SSqlStream {
|
|||
int64_t etime; // stream end query time, when time is larger then etime, the stream will be closed
|
||||
int64_t interval;
|
||||
int64_t slidingTime;
|
||||
int16_t precision;
|
||||
void * pTimer;
|
||||
|
||||
void (*fp)();
|
||||
|
|
|
@ -274,6 +274,10 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
|
|||
pReducer->numOfBuffer = idx;
|
||||
|
||||
SCompareParam *param = malloc(sizeof(SCompareParam));
|
||||
if (param == NULL) {
|
||||
tfree(pReducer);
|
||||
return;
|
||||
}
|
||||
param->pLocalData = pReducer->pLocalDataSrc;
|
||||
param->pDesc = pReducer->pDesc;
|
||||
param->num = pReducer->pLocalDataSrc[0]->pMemBuffer->numOfElemsPerPage;
|
||||
|
@ -284,6 +288,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
|
|||
|
||||
pRes->code = tLoserTreeCreate(&pReducer->pLoserTree, pReducer->numOfBuffer, param, treeComparator);
|
||||
if (pReducer->pLoserTree == NULL || pRes->code != 0) {
|
||||
tfree(param);
|
||||
tfree(pReducer);
|
||||
return;
|
||||
}
|
||||
|
@ -332,6 +337,8 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
|
|||
tfree(pReducer->pResultBuf);
|
||||
tfree(pReducer->pFinalRes);
|
||||
tfree(pReducer->prevRowOfInput);
|
||||
tfree(pReducer->pLoserTree);
|
||||
tfree(param);
|
||||
tfree(pReducer);
|
||||
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
return;
|
||||
|
|
|
@ -1310,6 +1310,11 @@ int tsParseSql(SSqlObj *pSql, bool initial) {
|
|||
tscDebug("%p resume to parse sql: %s", pSql, pCmd->curSql);
|
||||
}
|
||||
|
||||
ret = tscAllocPayload(&pSql->cmd, TSDB_DEFAULT_PAYLOAD_SIZE);
|
||||
if (TSDB_CODE_SUCCESS != ret) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (tscIsInsertData(pSql->sqlstr)) {
|
||||
/*
|
||||
* Set the fp before parse the sql string, in case of getTableMeta failed, in which
|
||||
|
@ -1326,11 +1331,6 @@ int tsParseSql(SSqlObj *pSql, bool initial) {
|
|||
|
||||
ret = tsParseInsertSql(pSql);
|
||||
} else {
|
||||
ret = tscAllocPayload(&pSql->cmd, TSDB_DEFAULT_PAYLOAD_SIZE);
|
||||
if (TSDB_CODE_SUCCESS != ret) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
SSqlInfo SQLInfo = qSQLParse(pSql->sqlstr);
|
||||
ret = tscToSQLCmd(pSql, &SQLInfo);
|
||||
SQLInfoDestroy(&SQLInfo);
|
||||
|
|
|
@ -71,6 +71,7 @@ static void tscProcessStreamLaunchQuery(SSchedMsg *pMsg) {
|
|||
|
||||
pSql->fp = tscProcessStreamQueryCallback;
|
||||
pSql->param = pStream;
|
||||
pSql->res.completed = false;
|
||||
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
|
||||
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
||||
|
@ -86,7 +87,7 @@ static void tscProcessStreamLaunchQuery(SSchedMsg *pMsg) {
|
|||
// failed to get meter/metric meta, retry in 10sec.
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
int64_t retryDelayTime = tscGetRetryDelayTime(pStream->slidingTime, pStream->precision);
|
||||
tscError("%p stream:%p,get metermeta failed, retry in %" PRId64 "ms", pStream->pSql, pStream, retryDelayTime);
|
||||
tscDebug("%p stream:%p,get metermeta failed, retry in %" PRId64 "ms", pStream->pSql, pStream, retryDelayTime);
|
||||
tscSetRetryTimer(pStream, pSql, retryDelayTime);
|
||||
|
||||
} else {
|
||||
|
@ -108,7 +109,7 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) {
|
|||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
|
||||
tscDebug("%p add into timer", pSql);
|
||||
|
||||
if (isProjectStream(pQueryInfo)) {
|
||||
if (pStream->isProject) {
|
||||
/*
|
||||
* pQueryInfo->window.ekey, which is the start time, does not change in case of
|
||||
* repeat first execution, once the first execution failed.
|
||||
|
@ -121,7 +122,19 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) {
|
|||
}
|
||||
} else {
|
||||
pQueryInfo->window.skey = pStream->stime - pStream->interval;
|
||||
pQueryInfo->window.ekey = pStream->stime - 1;
|
||||
int64_t etime = taosGetTimestamp(pStream->precision);
|
||||
// delay to wait all data in last time window
|
||||
if (pStream->precision == TSDB_TIME_PRECISION_MICRO) {
|
||||
etime -= tsMaxStreamComputDelay * 1000l;
|
||||
} else {
|
||||
etime -= tsMaxStreamComputDelay;
|
||||
}
|
||||
if (etime > pStream->etime) {
|
||||
etime = pStream->etime;
|
||||
} else {
|
||||
etime = pStream->stime + (etime - pStream->stime) / pStream->interval * pStream->interval;
|
||||
}
|
||||
pQueryInfo->window.ekey = etime;
|
||||
}
|
||||
|
||||
// launch stream computing in a new thread
|
||||
|
@ -137,7 +150,7 @@ static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOf
|
|||
SSqlStream *pStream = (SSqlStream *)param;
|
||||
if (tres == NULL || numOfRows < 0) {
|
||||
int64_t retryDelay = tscGetRetryDelayTime(pStream->slidingTime, pStream->precision);
|
||||
tscError("%p stream:%p, query data failed, code:%d, retry in %" PRId64 "ms", pStream->pSql, pStream, numOfRows,
|
||||
tscError("%p stream:%p, query data failed, code:0x%08x, retry in %" PRId64 "ms", pStream->pSql, pStream, numOfRows,
|
||||
retryDelay);
|
||||
|
||||
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pStream->pSql->cmd, 0, 0);
|
||||
|
@ -151,17 +164,45 @@ static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOf
|
|||
taos_fetch_rows_a(tres, tscProcessStreamRetrieveResult, param);
|
||||
}
|
||||
|
||||
static void tscSetTimestampForRes(SSqlStream *pStream, SSqlObj *pSql) {
|
||||
SSqlRes *pRes = &pSql->res;
|
||||
|
||||
int64_t timestamp = *(int64_t *)pRes->data;
|
||||
int64_t actualTimestamp = pStream->stime - pStream->interval;
|
||||
|
||||
if (timestamp != actualTimestamp) {
|
||||
// reset the timestamp of each agg point by using start time of each interval
|
||||
*((int64_t *)pRes->data) = actualTimestamp;
|
||||
tscWarn("%p stream:%p, timestamp of points is:%" PRId64 ", reset to %" PRId64, pSql, pStream, timestamp, actualTimestamp);
|
||||
// no need to be called as this is alreay done in the query
|
||||
static void tscStreamFillTimeGap(SSqlStream* pStream, TSKEY ts) {
|
||||
#if 0
|
||||
SSqlObj * pSql = pStream->pSql;
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
|
||||
|
||||
if (pQueryInfo->fillType != TSDB_FILL_SET_VALUE && pQueryInfo->fillType != TSDB_FILL_NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
SSqlRes *pRes = &pSql->res;
|
||||
/* failed to retrieve any result in this retrieve */
|
||||
pSql->res.numOfRows = 1;
|
||||
void *row[TSDB_MAX_COLUMNS] = {0};
|
||||
char tmpRes[TSDB_MAX_BYTES_PER_ROW] = {0};
|
||||
void *oldPtr = pSql->res.data;
|
||||
pSql->res.data = tmpRes;
|
||||
int32_t rowNum = 0;
|
||||
|
||||
while (pStream->stime + pStream->slidingTime < ts) {
|
||||
pStream->stime += pStream->slidingTime;
|
||||
*(TSKEY*)row[0] = pStream->stime;
|
||||
for (int32_t i = 1; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
|
||||
int16_t offset = tscFieldInfoGetOffset(pQueryInfo, i);
|
||||
TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
|
||||
assignVal(pSql->res.data + offset, (char *)(&pQueryInfo->fillVal[i]), pField->bytes, pField->type);
|
||||
row[i] = pSql->res.data + offset;
|
||||
}
|
||||
(*pStream->fp)(pStream->param, pSql, row);
|
||||
++rowNum;
|
||||
}
|
||||
|
||||
if (rowNum > 0) {
|
||||
tscDebug("%p stream:%p %d rows padded", pSql, pStream, rowNum);
|
||||
}
|
||||
|
||||
pRes->numOfRows = 0;
|
||||
pRes->data = oldPtr;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOfRows) {
|
||||
|
@ -170,7 +211,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
|
|||
|
||||
if (pSql == NULL || numOfRows < 0) {
|
||||
int64_t retryDelayTime = tscGetRetryDelayTime(pStream->slidingTime, pStream->precision);
|
||||
tscError("%p stream:%p, retrieve data failed, code:%d, retry in %" PRId64 "ms", pSql, pStream, numOfRows, retryDelayTime);
|
||||
tscError("%p stream:%p, retrieve data failed, code:0x%08x, retry in %" PRId64 "ms", pSql, pStream, numOfRows, retryDelayTime);
|
||||
|
||||
tscSetRetryTimer(pStream, pStream->pSql, retryDelayTime);
|
||||
return;
|
||||
|
@ -180,16 +221,11 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
|
|||
|
||||
if (numOfRows > 0) { // when reaching here the first execution of stream computing is successful.
|
||||
pStream->numOfRes += numOfRows;
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
|
||||
|
||||
for(int32_t i = 0; i < numOfRows; ++i) {
|
||||
TAOS_ROW row = taos_fetch_row(res);
|
||||
tscDebug("%p stream:%p fetch result", pSql, pStream);
|
||||
if (isProjectStream(pQueryInfo)) {
|
||||
pStream->stime = *(TSKEY *)row[0];
|
||||
} else {
|
||||
tscSetTimestampForRes(pStream, pSql);
|
||||
}
|
||||
tscStreamFillTimeGap(pStream, *(TSKEY*)row[0]);
|
||||
pStream->stime = *(TSKEY *)row[0];
|
||||
|
||||
// user callback function
|
||||
(*pStream->fp)(pStream->param, res, row);
|
||||
|
@ -199,55 +235,18 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
|
|||
taos_fetch_rows_a(res, tscProcessStreamRetrieveResult, pStream);
|
||||
} else { // numOfRows == 0, all data has been retrieved
|
||||
pStream->useconds += pSql->res.useconds;
|
||||
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
|
||||
|
||||
if (pStream->numOfRes == 0) {
|
||||
if (pQueryInfo->fillType == TSDB_FILL_SET_VALUE || pQueryInfo->fillType == TSDB_FILL_NULL) {
|
||||
SSqlRes *pRes = &pSql->res;
|
||||
|
||||
/* failed to retrieve any result in this retrieve */
|
||||
pSql->res.numOfRows = 1;
|
||||
void *row[TSDB_MAX_COLUMNS] = {0};
|
||||
char tmpRes[TSDB_MAX_BYTES_PER_ROW] = {0};
|
||||
|
||||
void *oldPtr = pSql->res.data;
|
||||
pSql->res.data = tmpRes;
|
||||
|
||||
for (int32_t i = 1; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
|
||||
int16_t offset = tscFieldInfoGetOffset(pQueryInfo, i);
|
||||
TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
|
||||
|
||||
assignVal(pSql->res.data + offset, (char *)(&pQueryInfo->fillVal[i]), pField->bytes, pField->type);
|
||||
row[i] = pSql->res.data + offset;
|
||||
}
|
||||
|
||||
tscSetTimestampForRes(pStream, pSql);
|
||||
row[0] = pRes->data;
|
||||
|
||||
// char result[512] = {0};
|
||||
// taos_print_row(result, row, pQueryInfo->fieldsInfo.pFields, pQueryInfo->fieldsInfo.numOfOutput);
|
||||
// tscInfo("%p stream:%p query result: %s", pSql, pStream, result);
|
||||
tscDebug("%p stream:%p fetch result", pSql, pStream);
|
||||
|
||||
// user callback function
|
||||
(*pStream->fp)(pStream->param, res, row);
|
||||
|
||||
pRes->numOfRows = 0;
|
||||
pRes->data = oldPtr;
|
||||
} else if (isProjectStream(pQueryInfo)) {
|
||||
if (pStream->isProject) {
|
||||
/* no resuls in the query range, retry */
|
||||
// todo set retry dynamic time
|
||||
int32_t retry = tsProjectExecInterval;
|
||||
tscError("%p stream:%p, retrieve no data, code:%d, retry in %" PRId32 "ms", pSql, pStream, numOfRows, retry);
|
||||
tscError("%p stream:%p, retrieve no data, code:0x%08x, retry in %" PRId32 "ms", pSql, pStream, numOfRows, retry);
|
||||
|
||||
tscSetRetryTimer(pStream, pStream->pSql, retry);
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
if (isProjectStream(pQueryInfo)) {
|
||||
pStream->stime += 1;
|
||||
}
|
||||
} else if (pStream->isProject) {
|
||||
pStream->stime += 1;
|
||||
}
|
||||
|
||||
tscDebug("%p stream:%p, query on:%s, fetch result completed, fetched rows:%" PRId64, pSql, pStream, pTableMetaInfo->name,
|
||||
|
@ -256,16 +255,18 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
|
|||
// release the metric/meter meta information reference, so data in cache can be updated
|
||||
|
||||
taosCacheRelease(tscCacheHandle, (void**)&(pTableMetaInfo->pTableMeta), false);
|
||||
tscFreeSqlResult(pSql);
|
||||
tfree(pSql->pSubs);
|
||||
pSql->numOfSubs = 0;
|
||||
tfree(pTableMetaInfo->vgroupList);
|
||||
tscSetNextLaunchTimer(pStream, pSql);
|
||||
}
|
||||
}
|
||||
|
||||
static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer) {
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
|
||||
int64_t delay = getDelayValueAfterTimewindowClosed(pStream, timer);
|
||||
|
||||
if (isProjectStream(pQueryInfo)) {
|
||||
if (pStream->isProject) {
|
||||
int64_t now = taosGetTimestamp(pStream->precision);
|
||||
int64_t etime = now > pStream->etime ? pStream->etime : now;
|
||||
|
||||
|
@ -323,8 +324,7 @@ static int64_t getLaunchTimeDelay(const SSqlStream* pStream) {
|
|||
static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) {
|
||||
int64_t timer = 0;
|
||||
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
|
||||
if (isProjectStream(pQueryInfo)) {
|
||||
if (pStream->isProject) {
|
||||
/*
|
||||
* for project query, no mater fetch data successfully or not, next launch will issue
|
||||
* more than the sliding time window
|
||||
|
@ -342,7 +342,6 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) {
|
|||
return;
|
||||
}
|
||||
} else {
|
||||
pStream->stime += pStream->slidingTime;
|
||||
if ((pStream->stime - pStream->interval) >= pStream->etime) {
|
||||
tscDebug("%p stream:%p, stime:%" PRId64 " is larger than end time: %" PRId64 ", stop the stream", pStream->pSql, pStream,
|
||||
pStream->stime, pStream->etime);
|
||||
|
@ -409,14 +408,16 @@ static void tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
|
|||
|
||||
pStream->slidingTime = pQueryInfo->slidingTime;
|
||||
|
||||
pQueryInfo->intervalTime = 0; // clear the interval value to avoid the force time window split by query processor
|
||||
pQueryInfo->slidingTime = 0;
|
||||
if (pStream->isProject) {
|
||||
pQueryInfo->intervalTime = 0; // clear the interval value to avoid the force time window split by query processor
|
||||
pQueryInfo->slidingTime = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static int64_t tscGetStreamStartTimestamp(SSqlObj *pSql, SSqlStream *pStream, int64_t stime) {
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
|
||||
|
||||
if (isProjectStream(pQueryInfo)) {
|
||||
if (pStream->isProject) {
|
||||
// no data in table, flush all data till now to destination meter, 10sec delay
|
||||
pStream->interval = tsProjectExecInterval;
|
||||
pStream->slidingTime = tsProjectExecInterval;
|
||||
|
@ -489,7 +490,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
|
|||
|
||||
SSqlStream *pStream = (SSqlStream *)calloc(1, sizeof(SSqlStream));
|
||||
if (pStream == NULL) {
|
||||
tscError("%p open stream failed, sql:%s, reason:%s, code:%d", pSql, sqlstr, pCmd->payload, pRes->code);
|
||||
tscError("%p open stream failed, sql:%s, reason:%s, code:0x%08x", pSql, sqlstr, pCmd->payload, pRes->code);
|
||||
tscFreeSqlObj(pSql);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -514,7 +515,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
|
|||
if (pRes->code != TSDB_CODE_SUCCESS) {
|
||||
setErrorInfo(pSql, pRes->code, pCmd->payload);
|
||||
|
||||
tscError("%p open stream failed, sql:%s, reason:%s, code:%d", pSql, sqlstr, pCmd->payload, pRes->code);
|
||||
tscError("%p open stream failed, sql:%s, reason:%s, code:0x%08x", pSql, sqlstr, pCmd->payload, pRes->code);
|
||||
tscFreeSqlObj(pSql);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -523,6 +524,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
|
|||
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
||||
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
|
||||
|
||||
pStream->isProject = isProjectStream(pQueryInfo);
|
||||
pStream->fp = fp;
|
||||
pStream->callback = callback;
|
||||
pStream->param = param;
|
||||
|
@ -565,6 +567,8 @@ void taos_close_stream(TAOS_STREAM *handle) {
|
|||
taosTmrStopA(&(pStream->pTimer));
|
||||
|
||||
tscDebug("%p stream:%p is closed", pSql, pStream);
|
||||
// notify CQ to release the pStream object
|
||||
pStream->fp(pStream->param, NULL, NULL);
|
||||
|
||||
tscFreeSqlObj(pSql);
|
||||
pStream->pSql = NULL;
|
||||
|
|
|
@ -1453,9 +1453,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
|
|||
static void tscFreeSubSqlObj(SRetrieveSupport *trsupport, SSqlObj *pSql) {
|
||||
tscDebug("%p start to free subquery result", pSql);
|
||||
|
||||
if (pSql->res.code == TSDB_CODE_SUCCESS) {
|
||||
taos_free_result(pSql);
|
||||
}
|
||||
taos_free_result(pSql);
|
||||
|
||||
tfree(trsupport->localBuffer);
|
||||
|
||||
|
|
|
@ -1820,7 +1820,6 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
|
|||
STableMeta* pPrevTableMeta = taosCacheTransfer(tscCacheHandle, (void**)&pPrevInfo->pTableMeta);
|
||||
|
||||
SVgroupsInfo* pVgroupsInfo = pPrevInfo->vgroupList;
|
||||
pPrevInfo->vgroupList = NULL;
|
||||
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, name, pPrevTableMeta, pVgroupsInfo, pTableMetaInfo->tagColList);
|
||||
}
|
||||
|
||||
|
|
|
@ -69,6 +69,7 @@ extern int64_t tsMaxRetentWindow;
|
|||
extern int32_t tsCacheBlockSize;
|
||||
extern int32_t tsBlocksPerVnode;
|
||||
extern int32_t tsMaxTablePerVnode;
|
||||
extern int32_t tsMaxVgroupsPerDb;
|
||||
extern int16_t tsDaysPerFile;
|
||||
extern int32_t tsDaysToKeep;
|
||||
extern int32_t tsMinRowsInFileBlock;
|
||||
|
|
|
@ -111,13 +111,8 @@ int32_t tsTimePrecision = TSDB_DEFAULT_PRECISION;
|
|||
int16_t tsCompression = TSDB_DEFAULT_COMP_LEVEL;
|
||||
int16_t tsWAL = TSDB_DEFAULT_WAL_LEVEL;
|
||||
int32_t tsReplications = TSDB_DEFAULT_REPLICA_NUM;
|
||||
|
||||
#ifdef _TD_ARM_32_
|
||||
int32_t tsMaxTablePerVnode = 100;
|
||||
#else
|
||||
int32_t tsMaxTablePerVnode = TSDB_DEFAULT_TABLES;
|
||||
#endif
|
||||
|
||||
int32_t tsMaxVgroupsPerDb = 0;
|
||||
int32_t tsMaxTablePerVnode = TSDB_DEFAULT_TABLES;
|
||||
// balance
|
||||
int32_t tsEnableBalance = 1;
|
||||
int32_t tsAlternativeRole = 0;
|
||||
|
@ -129,7 +124,7 @@ int32_t tsMnodeEqualVnodeNum = 4;
|
|||
int32_t tsEnableHttpModule = 1;
|
||||
int32_t tsRestRowLimit = 10240;
|
||||
uint16_t tsHttpPort = 6020; // only tcp, range tcp[6020]
|
||||
int32_t tsHttpCacheSessions = 100;
|
||||
int32_t tsHttpCacheSessions = 1000;
|
||||
int32_t tsHttpSessionExpire = 36000;
|
||||
int32_t tsHttpMaxThreads = 2;
|
||||
int32_t tsHttpEnableCompress = 0;
|
||||
|
@ -606,8 +601,18 @@ static void doInitGlobalConfig() {
|
|||
cfg.unitType = TAOS_CFG_UTYPE_NONE;
|
||||
taosInitConfigOption(cfg);
|
||||
|
||||
cfg.option = "maxVgroupsPerDb";
|
||||
cfg.ptr = &tsMaxVgroupsPerDb;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
||||
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
|
||||
cfg.minValue = 0;
|
||||
cfg.maxValue = 8192;
|
||||
cfg.ptrLength = 0;
|
||||
cfg.unitType = TAOS_CFG_UTYPE_NONE;
|
||||
taosInitConfigOption(cfg);
|
||||
|
||||
// database configs
|
||||
cfg.option = "maxtablesPerVnode";
|
||||
cfg.option = "maxTablesPerVnode";
|
||||
cfg.ptr = &tsMaxTablePerVnode;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
||||
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
|
||||
|
|
|
@ -1,44 +0,0 @@
|
|||
#!/bin/bash
|
||||
ulimit -c unlimited
|
||||
|
||||
function buildTDengine {
|
||||
cd /root/TDengine
|
||||
|
||||
git remote update
|
||||
REMOTE_COMMIT=`git rev-parse --short remotes/origin/develop`
|
||||
LOCAL_COMMIT=`git rev-parse --short @`
|
||||
|
||||
echo " LOCAL: $LOCAL_COMMIT"
|
||||
echo "REMOTE: $REMOTE_COMMIT"
|
||||
if [ "$LOCAL_COMMIT" == "$REMOTE_COMMIT" ]; then
|
||||
echo "repo up-to-date"
|
||||
else
|
||||
echo "repo need to pull"
|
||||
git pull
|
||||
|
||||
LOCAL_COMMIT=`git rev-parse --short @`
|
||||
cd /root/TDengine/debug
|
||||
rm -rf /root/TDengine/debug/*
|
||||
cmake ..
|
||||
make > /dev/null
|
||||
make install
|
||||
fi
|
||||
}
|
||||
|
||||
function restartTaosd {
|
||||
systemctl stop taosd
|
||||
pkill -KILL -x taosd
|
||||
sleep 10
|
||||
|
||||
logDir=`grep 'logDir' /etc/taos/taos.cfg|awk 'END{print $2}'`
|
||||
dataDir=`grep 'dataDir' /etc/taos/taos.cfg|awk '{print $2}'`
|
||||
|
||||
rm -rf $logDir/*
|
||||
rm -rf $dataDir/*
|
||||
|
||||
taosd 2>&1 > /dev/null &
|
||||
sleep 10
|
||||
}
|
||||
|
||||
buildTDengine
|
||||
restartTaosd
|
|
@ -0,0 +1,236 @@
|
|||
package com.taosdata.jdbc.utils;
|
||||
|
||||
import java.io.File;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
public class TDNode {
|
||||
|
||||
private int index;
|
||||
private int running;
|
||||
private int deployed;
|
||||
private boolean testCluster;
|
||||
private String path;
|
||||
private String cfgDir;
|
||||
private String dataDir;
|
||||
private String logDir;
|
||||
private String cfgPath;
|
||||
|
||||
public TDNode(int index) {
|
||||
this.index = index;
|
||||
running = 0;
|
||||
deployed = 0;
|
||||
testCluster = false;
|
||||
}
|
||||
|
||||
public void setPath(String path) {
|
||||
this.path = path;
|
||||
}
|
||||
|
||||
public void setTestCluster(boolean testCluster) {
|
||||
this.testCluster = testCluster;
|
||||
}
|
||||
|
||||
public void searchTaosd(File dir, ArrayList<String> taosdPath) {
|
||||
File[] fileList = dir.listFiles();
|
||||
|
||||
if(fileList == null || fileList.length == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
for(File file : fileList) {
|
||||
if(file.isFile()) {
|
||||
if(file.getName().equals("taosd")) {
|
||||
taosdPath.add(file.getAbsolutePath());
|
||||
}
|
||||
} else {
|
||||
searchTaosd(file, taosdPath);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void start() {
|
||||
String selfPath = System.getProperty("user.dir");
|
||||
String binPath = "";
|
||||
String projDir = selfPath + "/../../../";
|
||||
|
||||
try {
|
||||
ArrayList<String> taosdPath = new ArrayList<>();
|
||||
|
||||
File dir = new File(projDir);
|
||||
String realProjDir = dir.getCanonicalPath();
|
||||
dir = new File(realProjDir);
|
||||
System.out.println("project Dir: " + projDir);
|
||||
searchTaosd(dir, taosdPath);
|
||||
|
||||
if(taosdPath.size() == 0) {
|
||||
System.out.println("The project path doens't exist");
|
||||
return;
|
||||
} else {
|
||||
for(String p : taosdPath) {
|
||||
if(!p.contains("packaging")) {
|
||||
binPath = p;
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
|
||||
if(binPath.isEmpty()) {
|
||||
System.out.println("taosd not found");
|
||||
return;
|
||||
} else {
|
||||
System.out.println("taosd found in " + binPath);
|
||||
}
|
||||
|
||||
if(this.deployed == 0) {
|
||||
System.out.println("dnode" + index + "is not deployed");
|
||||
return;
|
||||
}
|
||||
|
||||
String cmd = "nohup " + binPath + " -c " + cfgDir + " > /dev/null 2>&1 & ";
|
||||
System.out.println("start taosd cmd: " + cmd);
|
||||
|
||||
try{
|
||||
Runtime.getRuntime().exec(cmd);
|
||||
TimeUnit.SECONDS.sleep(5);
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
|
||||
this.running = 1;
|
||||
}
|
||||
|
||||
public void stop() {
|
||||
String toBeKilled = "taosd";
|
||||
|
||||
if (this.running != 0) {
|
||||
String killCmd = "pkill -kill -x " + toBeKilled;
|
||||
String[] killCmds = {"sh", "-c", killCmd};
|
||||
try {
|
||||
Runtime.getRuntime().exec(killCmds).waitFor();
|
||||
|
||||
for(int port = 6030; port < 6041; port ++) {
|
||||
String fuserCmd = "fuser -k -n tcp " + port;
|
||||
Runtime.getRuntime().exec(fuserCmd).waitFor();
|
||||
}
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
|
||||
this.running = 0;
|
||||
System.out.println("dnode:" + this.index + " is stopped by pkill");
|
||||
}
|
||||
}
|
||||
|
||||
public void startIP() {
|
||||
try{
|
||||
String cmd = "sudo ifconfig lo:" + index + "192.168.0." + index + " up";
|
||||
Runtime.getRuntime().exec(cmd).waitFor();
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
public void stopIP() {
|
||||
try{
|
||||
String cmd = "sudo ifconfig lo:" + index + "192.168.0." + index + " down";
|
||||
Runtime.getRuntime().exec(cmd).waitFor();
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
public void setCfgConfig(String option, String value) {
|
||||
try{
|
||||
String cmd = "echo " + option + " " + value + " >> " + this.cfgPath;
|
||||
String[] cmdLine = {"sh", "-c", cmd};
|
||||
Process ps = Runtime.getRuntime().exec(cmdLine);
|
||||
ps.waitFor();
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
public String getDnodeRootDir() {
|
||||
String dnodeRootDir = this.path + "/sim/psim/dnode" + this.index;
|
||||
return dnodeRootDir;
|
||||
}
|
||||
|
||||
public String getDnodesRootDir() {
|
||||
String dnodesRootDir = this.path + "/sim/psim" + this.index;
|
||||
return dnodesRootDir;
|
||||
}
|
||||
|
||||
public void deploy() {
|
||||
this.logDir = this.path + "/sim/dnode" + this.index + "/log";
|
||||
this.dataDir = this.path + "/sim/dnode" + this.index + "/data";
|
||||
this.cfgDir = this.path + "/sim/dnode" + this.index + "/cfg";
|
||||
this.cfgPath = this.path + "/sim/dnode" + this.index + "/cfg/taos.cfg";
|
||||
|
||||
try {
|
||||
String cmd = "rm -rf " + this.logDir;
|
||||
Runtime.getRuntime().exec(cmd).waitFor();
|
||||
|
||||
cmd = "rm -rf " + this.cfgDir;
|
||||
Runtime.getRuntime().exec(cmd).waitFor();
|
||||
|
||||
cmd = "rm -rf " + this.dataDir;
|
||||
Runtime.getRuntime().exec(cmd).waitFor();
|
||||
|
||||
cmd = "mkdir -p " + this.logDir;
|
||||
Runtime.getRuntime().exec(cmd).waitFor();
|
||||
|
||||
cmd = "mkdir -p " + this.cfgDir;
|
||||
Runtime.getRuntime().exec(cmd).waitFor();
|
||||
|
||||
cmd = "mkdir -p " + this.dataDir;
|
||||
Runtime.getRuntime().exec(cmd).waitFor();
|
||||
|
||||
cmd = "touch " + this.cfgPath;
|
||||
Runtime.getRuntime().exec(cmd).waitFor();
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
|
||||
if(this.testCluster) {
|
||||
startIP();
|
||||
setCfgConfig("masterIp", "192.168.0.1");
|
||||
setCfgConfig("secondIp", "192.168.0.2");
|
||||
setCfgConfig("publicIp", "192.168.0." + this.index);
|
||||
setCfgConfig("internalIp", "192.168.0." + this.index);
|
||||
setCfgConfig("privateIp", "192.168.0." + this.index);
|
||||
}
|
||||
setCfgConfig("dataDir", this.dataDir);
|
||||
setCfgConfig("logDir", this.logDir);
|
||||
setCfgConfig("numOfLogLines", "1000000/00");
|
||||
setCfgConfig("mnodeEqualVnodeNum", "0");
|
||||
setCfgConfig("walLevel", "1");
|
||||
setCfgConfig("statusInterval", "1");
|
||||
setCfgConfig("numOfTotalVnodes", "64");
|
||||
setCfgConfig("numOfMnodes", "3");
|
||||
setCfgConfig("numOfThreadsPerCore", "2.0");
|
||||
setCfgConfig("monitor", "0");
|
||||
setCfgConfig("maxVnodeConnections", "30000");
|
||||
setCfgConfig("maxMgmtConnections", "30000");
|
||||
setCfgConfig("maxMeterConnections", "30000");
|
||||
setCfgConfig("maxShellConns", "30000");
|
||||
setCfgConfig("locale", "en_US.UTF-8");
|
||||
setCfgConfig("charset", "UTF-8");
|
||||
setCfgConfig("asyncLog", "0");
|
||||
setCfgConfig("anyIp", "0");
|
||||
setCfgConfig("dDebugFlag", "135");
|
||||
setCfgConfig("mDebugFlag", "135");
|
||||
setCfgConfig("sdbDebugFlag", "135");
|
||||
setCfgConfig("rpcDebugFlag", "135");
|
||||
setCfgConfig("tmrDebugFlag", "131");
|
||||
setCfgConfig("cDebugFlag", "135");
|
||||
setCfgConfig("httpDebugFlag", "135");
|
||||
setCfgConfig("monitorDebugFlag", "135");
|
||||
setCfgConfig("udebugFlag", "135");
|
||||
setCfgConfig("jnidebugFlag", "135");
|
||||
setCfgConfig("qdebugFlag", "135");
|
||||
this.deployed = 1;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,94 @@
|
|||
package com.taosdata.jdbc.utils;
|
||||
|
||||
import java.io.File;
|
||||
import java.util.*;
|
||||
|
||||
public class TDNodes {
|
||||
private ArrayList<TDNode> tdNodes;
|
||||
private boolean testCluster;
|
||||
|
||||
public TDNodes () {
|
||||
tdNodes = new ArrayList<>();
|
||||
for(int i = 1; i < 11; i ++) {
|
||||
tdNodes.add(new TDNode(i));
|
||||
}
|
||||
}
|
||||
|
||||
public void setPath(String path) {
|
||||
try {
|
||||
String killCmd = "pkill -kill -x taosd";
|
||||
String[] killCmds = {"sh", "-c", killCmd};
|
||||
Runtime.getRuntime().exec(killCmds).waitFor();
|
||||
|
||||
String binPath = System.getProperty("user.dir");
|
||||
binPath += "/../../../debug";
|
||||
System.out.println("binPath: " + binPath);
|
||||
|
||||
File file = new File(path);
|
||||
binPath = file.getCanonicalPath();
|
||||
System.out.println("binPath real path: " + binPath);
|
||||
|
||||
if(path.isEmpty()){
|
||||
file = new File(path + "/../../");
|
||||
path = file.getCanonicalPath();
|
||||
}
|
||||
|
||||
for(int i = 0; i < tdNodes.size(); i++) {
|
||||
tdNodes.get(i).setPath(path);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
public void setTestCluster(boolean testCluster) {
|
||||
this.testCluster = testCluster;
|
||||
}
|
||||
|
||||
public void check(int index) {
|
||||
if(index < 1 || index > 10) {
|
||||
System.out.println("index: " + index + " should on a scale of [1, 10]");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
public void deploy(int index) {
|
||||
try {
|
||||
File file = new File(System.getProperty("user.dir") + "/../../../");
|
||||
String projectRealPath = file.getCanonicalPath();
|
||||
check(index);
|
||||
tdNodes.get(index - 1).setTestCluster(this.testCluster);
|
||||
tdNodes.get(index - 1).setPath(projectRealPath);
|
||||
tdNodes.get(index - 1).deploy();
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
System.out.println("deploy Test Exception");
|
||||
}
|
||||
}
|
||||
|
||||
public void cfg(int index, String option, String value) {
|
||||
check(index);
|
||||
tdNodes.get(index - 1).setCfgConfig(option, value);
|
||||
}
|
||||
|
||||
public void start(int index) {
|
||||
check(index);
|
||||
tdNodes.get(index - 1).start();
|
||||
}
|
||||
|
||||
public void stop(int index) {
|
||||
check(index);
|
||||
tdNodes.get(index - 1).stop();
|
||||
}
|
||||
|
||||
public void startIP(int index) {
|
||||
check(index);
|
||||
tdNodes.get(index - 1).startIP();
|
||||
}
|
||||
|
||||
public void stopIP(int index) {
|
||||
check(index);
|
||||
tdNodes.get(index - 1).stopIP();
|
||||
}
|
||||
|
||||
}
|
|
@ -1,27 +1,38 @@
|
|||
package com.taosdata.jdbc;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.InputStreamReader;
|
||||
import java.io.File;
|
||||
import com.taosdata.jdbc.utils.TDNodes;
|
||||
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
public class BaseTest {
|
||||
|
||||
private static boolean testCluster = false;
|
||||
private static String deployPath = System.getProperty("user.dir");
|
||||
private static TDNodes tdNodes = new TDNodes();
|
||||
|
||||
|
||||
@BeforeClass
|
||||
public static void setupEnv() {
|
||||
try{
|
||||
String path = System.getProperty("user.dir");
|
||||
String bashPath = path + "/buildTDengine.sh";
|
||||
File file = new File(deployPath + "/../../../");
|
||||
String rootPath = file.getCanonicalPath();
|
||||
|
||||
tdNodes.setPath(rootPath);
|
||||
tdNodes.setTestCluster(testCluster);
|
||||
|
||||
Process ps = Runtime.getRuntime().exec(bashPath);
|
||||
ps.waitFor();
|
||||
tdNodes.deploy(1);
|
||||
tdNodes.start(1);
|
||||
|
||||
BufferedReader br = new BufferedReader(new InputStreamReader(ps.getInputStream()));
|
||||
while(br.readLine() != null) {
|
||||
System.out.println(br.readLine());
|
||||
}
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
System.out.println("Base Test Exception");
|
||||
}
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void cleanUpEnv() {
|
||||
tdNodes.stop(1);
|
||||
}
|
||||
}
|
|
@ -109,6 +109,8 @@ void cqClose(void *handle) {
|
|||
while (pObj) {
|
||||
SCqObj *pTemp = pObj;
|
||||
pObj = pObj->next;
|
||||
tdFreeSchema(pTemp->pSchema);
|
||||
tfree(pTemp->sqlStr);
|
||||
free(pTemp);
|
||||
}
|
||||
|
||||
|
@ -242,6 +244,10 @@ static void cqCreateStream(SCqContext *pContext, SCqObj *pObj) {
|
|||
|
||||
static void cqProcessStreamRes(void *param, TAOS_RES *tres, TAOS_ROW row) {
|
||||
SCqObj *pObj = (SCqObj *)param;
|
||||
if (tres == NULL && row == NULL) {
|
||||
pObj->pStream = NULL;
|
||||
return;
|
||||
}
|
||||
SCqContext *pContext = pObj->pContext;
|
||||
STSchema *pSchema = pObj->pSchema;
|
||||
if (pObj->pStream == NULL) return;
|
||||
|
@ -263,8 +269,14 @@ static void cqProcessStreamRes(void *param, TAOS_RES *tres, TAOS_ROW row) {
|
|||
void* val = row[i];
|
||||
if (val == NULL) {
|
||||
val = getNullValue(c->type);
|
||||
} else if (IS_VAR_DATA_TYPE(c->type)) {
|
||||
} else if (c->type == TSDB_DATA_TYPE_BINARY) {
|
||||
val = ((char*)val) - sizeof(VarDataLenT);
|
||||
} else if (c->type == TSDB_DATA_TYPE_NCHAR) {
|
||||
char buf[TSDB_MAX_NCHAR_LEN];
|
||||
size_t len = taos_fetch_lengths(tres)[i];
|
||||
taosMbsToUcs4(val, len, buf, sizeof(buf), &len);
|
||||
memcpy(val + sizeof(VarDataLenT), buf, len);
|
||||
varDataLen(val) = len;
|
||||
}
|
||||
tdAppendColVal(trow, val, c->type, c->bytes, c->offset);
|
||||
}
|
||||
|
|
|
@ -22,6 +22,8 @@ extern "C" {
|
|||
|
||||
int32_t dnodeInitMgmt();
|
||||
void dnodeCleanupMgmt();
|
||||
int32_t dnodeInitMgmtTimer();
|
||||
void dnodeCleanupMgmtTimer();
|
||||
void dnodeDispatchToMgmtQueue(SRpcMsg *rpcMsg);
|
||||
|
||||
void* dnodeGetVnode(int32_t vgId);
|
||||
|
|
|
@ -57,6 +57,7 @@ static const SDnodeComponent tsDnodeComponents[] = {
|
|||
{"server", dnodeInitServer, dnodeCleanupServer},
|
||||
{"mgmt", dnodeInitMgmt, dnodeCleanupMgmt},
|
||||
{"modules", dnodeInitModules, dnodeCleanupModules},
|
||||
{"mgmt-tmr",dnodeInitMgmtTimer, dnodeCleanupMgmtTimer},
|
||||
{"shell", dnodeInitShell, dnodeCleanupShell}
|
||||
};
|
||||
|
||||
|
|
|
@ -147,6 +147,12 @@ int32_t dnodeInitMgmt() {
|
|||
return -1;
|
||||
}
|
||||
|
||||
dInfo("dnode mgmt is initialized");
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t dnodeInitMgmtTimer() {
|
||||
tsDnodeTmr = taosTmrInit(100, 200, 60000, "DND-DM");
|
||||
if (tsDnodeTmr == NULL) {
|
||||
dError("failed to init dnode timer");
|
||||
|
@ -155,13 +161,11 @@ int32_t dnodeInitMgmt() {
|
|||
}
|
||||
|
||||
taosTmrReset(dnodeSendStatusMsg, 500, NULL, tsDnodeTmr, &tsStatusTimer);
|
||||
|
||||
dInfo("dnode mgmt is initialized");
|
||||
|
||||
dInfo("dnode mgmt timer is initialized");
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
void dnodeCleanupMgmt() {
|
||||
void dnodeCleanupMgmtTimer() {
|
||||
if (tsStatusTimer != NULL) {
|
||||
taosTmrStopA(&tsStatusTimer);
|
||||
tsStatusTimer = NULL;
|
||||
|
@ -171,7 +175,10 @@ void dnodeCleanupMgmt() {
|
|||
taosTmrCleanUp(tsDnodeTmr);
|
||||
tsDnodeTmr = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void dnodeCleanupMgmt() {
|
||||
dnodeCleanupMgmtTimer();
|
||||
dnodeCloseVnodes();
|
||||
|
||||
if (tsMgmtQset) taosQsetThreadResume(tsMgmtQset);
|
||||
|
@ -691,10 +698,12 @@ static void dnodeSendStatusMsg(void *handle, void *tmrId) {
|
|||
pStatus->alternativeRole = (uint8_t) tsAlternativeRole;
|
||||
|
||||
// fill cluster cfg parameters
|
||||
pStatus->clusterCfg.numOfMnodes = tsNumOfMnodes;
|
||||
pStatus->clusterCfg.mnodeEqualVnodeNum = tsMnodeEqualVnodeNum;
|
||||
pStatus->clusterCfg.offlineThreshold = tsOfflineThreshold;
|
||||
pStatus->clusterCfg.statusInterval = tsStatusInterval;
|
||||
pStatus->clusterCfg.numOfMnodes = htonl(tsNumOfMnodes);
|
||||
pStatus->clusterCfg.mnodeEqualVnodeNum = htonl(tsMnodeEqualVnodeNum);
|
||||
pStatus->clusterCfg.offlineThreshold = htonl(tsOfflineThreshold);
|
||||
pStatus->clusterCfg.statusInterval = htonl(tsStatusInterval);
|
||||
pStatus->clusterCfg.maxtablesPerVnode = htonl(tsMaxTablePerVnode);
|
||||
pStatus->clusterCfg.maxVgroupsPerDb = htonl(tsMaxVgroupsPerDb);
|
||||
strcpy(pStatus->clusterCfg.arbitrator, tsArbitrator);
|
||||
strcpy(pStatus->clusterCfg.timezone, tsTimezone);
|
||||
strcpy(pStatus->clusterCfg.locale, tsLocale);
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include "tglobal.h"
|
||||
#include "dnodeInt.h"
|
||||
#include "dnodeMain.h"
|
||||
#include "tfile.h"
|
||||
|
||||
static void signal_handler(int32_t signum, siginfo_t *sigInfo, void *context);
|
||||
static sem_t exitSem;
|
||||
|
@ -67,6 +68,18 @@ int32_t main(int32_t argc, char *argv[]) {
|
|||
taosSetAllocMode(TAOS_ALLOC_MODE_DETECT_LEAK, NULL, true);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#ifdef TAOS_RANDOM_FILE_FAIL
|
||||
else if (strcmp(argv[i], "--random-file-fail-factor") == 0) {
|
||||
if ( (i+1) < argc ) {
|
||||
int factor = atoi(argv[i+1]);
|
||||
printf("The factor of random failure is %d\n", factor);
|
||||
taosSetRandomFileFailFactor(factor);
|
||||
} else {
|
||||
printf("Please specify a number for random failure factor!");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -296,8 +296,9 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
|
|||
#define TSDB_DEFAULT_TOTAL_BLOCKS 4
|
||||
|
||||
#define TSDB_MIN_TABLES 4
|
||||
#define TSDB_MAX_TABLES 200000
|
||||
#define TSDB_DEFAULT_TABLES 1000
|
||||
#define TSDB_MAX_TABLES 5000000
|
||||
#define TSDB_DEFAULT_TABLES 200000
|
||||
#define TSDB_TABLES_STEP 10000
|
||||
|
||||
#define TSDB_MIN_DAYS_PER_FILE 1
|
||||
#define TSDB_MAX_DAYS_PER_FILE 3650
|
||||
|
|
|
@ -158,6 +158,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_DB_OPTION, 0, 0x0382, "mnode inva
|
|||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_DB, 0, 0x0383, "mnode invalid database")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_MONITOR_DB_FORBIDDEN, 0, 0x0384, "mnode monitor db forbidden")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_DATABASES, 0, 0x0385, "mnode too many databases")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_DB_IN_DROPPING, 0, 0x0386, "mnode db in dropping")
|
||||
|
||||
// dnode
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_DND_MSG_NOT_PROCESSED, 0, 0x0400, "dnode message not processed")
|
||||
|
|
|
@ -569,6 +569,8 @@ typedef struct {
|
|||
char timezone[64]; // tsTimezone
|
||||
char locale[TSDB_LOCALE_LEN]; // tsLocale
|
||||
char charset[TSDB_LOCALE_LEN]; // tsCharset
|
||||
int32_t maxtablesPerVnode;
|
||||
int32_t maxVgroupsPerDb;
|
||||
} SClusterCfg;
|
||||
|
||||
typedef struct {
|
||||
|
|
|
@ -44,10 +44,7 @@ void mnodeRemoveSuperTableFromDb(SDbObj *pDb);
|
|||
void mnodeAddTableIntoDb(SDbObj *pDb);
|
||||
void mnodeRemoveTableFromDb(SDbObj *pDb);
|
||||
void mnodeAddVgroupIntoDb(SVgObj *pVgroup);
|
||||
void mnodeAddVgroupIntoDbTail(SVgObj *pVgroup);
|
||||
void mnodeRemoveVgroupFromDb(SVgObj *pVgroup);
|
||||
void mnodeMoveVgroupToTail(SVgObj *pVgroup);
|
||||
void mnodeMoveVgroupToHead(SVgObj *pVgroup);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -145,10 +145,8 @@ typedef struct SVgObj {
|
|||
int64_t totalStorage;
|
||||
int64_t compStorage;
|
||||
int64_t pointsWritten;
|
||||
struct SVgObj *prev, *next;
|
||||
struct SDbObj *pDb;
|
||||
void * idPool;
|
||||
SChildTableObj **tableList;
|
||||
} SVgObj;
|
||||
|
||||
typedef struct {
|
||||
|
@ -183,9 +181,11 @@ typedef struct SDbObj {
|
|||
int32_t numOfVgroups;
|
||||
int32_t numOfTables;
|
||||
int32_t numOfSuperTables;
|
||||
SVgObj *pHead;
|
||||
SVgObj *pTail;
|
||||
int32_t vgListSize;
|
||||
int32_t vgListIndex;
|
||||
SVgObj **vgList;
|
||||
struct SAcctObj *pAcct;
|
||||
pthread_mutex_t mutex;
|
||||
} SDbObj;
|
||||
|
||||
typedef struct SUserObj {
|
||||
|
@ -246,7 +246,8 @@ typedef struct {
|
|||
int16_t offset[TSDB_MAX_COLUMNS];
|
||||
int16_t bytes[TSDB_MAX_COLUMNS];
|
||||
int32_t numOfReads;
|
||||
int8_t reserved0[2];
|
||||
int8_t maxReplica;
|
||||
int8_t reserved0[0];
|
||||
uint16_t payloadLen;
|
||||
char payload[];
|
||||
} SShowObj;
|
||||
|
|
|
@ -40,6 +40,7 @@ char* mnodeGetDnodeStatusStr(int32_t dnodeStatus);
|
|||
void mgmtMonitorDnodeModule();
|
||||
|
||||
int32_t mnodeGetDnodesNum();
|
||||
int32_t mnodeGetOnlinDnodesCpuCoreNum();
|
||||
int32_t mnodeGetOnlinDnodesNum();
|
||||
void * mnodeGetNextDnode(void *pIter, SDnodeObj **pDnode);
|
||||
void mnodeIncDnodeRef(SDnodeObj *pDnode);
|
||||
|
|
|
@ -30,17 +30,17 @@ void mnodeDecVgroupRef(SVgObj *pVgroup);
|
|||
void mnodeDropAllDbVgroups(SDbObj *pDropDb);
|
||||
void mnodeSendDropAllDbVgroupsMsg(SDbObj *pDropDb);
|
||||
void mnodeDropAllDnodeVgroups(SDnodeObj *pDropDnode);
|
||||
void mnodeUpdateAllDbVgroups(SDbObj *pAlterDb);
|
||||
//void mnodeUpdateAllDbVgroups(SDbObj *pAlterDb);
|
||||
|
||||
void * mnodeGetNextVgroup(void *pIter, SVgObj **pVgroup);
|
||||
void mnodeUpdateVgroup(SVgObj *pVgroup);
|
||||
void mnodeUpdateVgroupStatus(SVgObj *pVgroup, SDnodeObj *pDnode, SVnodeLoad *pVload);
|
||||
void mnodeCheckUnCreatedVgroup(SDnodeObj *pDnode, SVnodeLoad *pVloads, int32_t openVnodes);
|
||||
|
||||
int32_t mnodeCreateVgroup(struct SMnodeMsg *pMsg, SDbObj *pDb);
|
||||
int32_t mnodeCreateVgroup(struct SMnodeMsg *pMsg);
|
||||
void mnodeDropVgroup(SVgObj *pVgroup, void *ahandle);
|
||||
void mnodeAlterVgroup(SVgObj *pVgroup, void *ahandle);
|
||||
SVgObj *mnodeGetAvailableVgroup(SDbObj *pDb);
|
||||
int32_t mnodeGetAvailableVgroup(struct SMnodeMsg *pMsg, SVgObj **pVgroup, int32_t *sid);
|
||||
|
||||
void mnodeAddTableIntoVgroup(SVgObj *pVgroup, SChildTableObj *pTable);
|
||||
void mnodeRemoveTableFromVgroup(SVgObj *pVgroup, SChildTableObj *pTable);
|
||||
|
|
|
@ -38,6 +38,7 @@
|
|||
#include "mnodeUser.h"
|
||||
#include "mnodeVgroup.h"
|
||||
|
||||
#define VG_LIST_SIZE 8
|
||||
static void * tsDbSdb = NULL;
|
||||
static int32_t tsDbUpdateSize;
|
||||
|
||||
|
@ -50,8 +51,14 @@ static int32_t mnodeProcessCreateDbMsg(SMnodeMsg *pMsg);
|
|||
static int32_t mnodeProcessAlterDbMsg(SMnodeMsg *pMsg);
|
||||
static int32_t mnodeProcessDropDbMsg(SMnodeMsg *pMsg);
|
||||
|
||||
static void mnodeDestroyDb(SDbObj *pDb) {
|
||||
pthread_mutex_destroy(&pDb->mutex);
|
||||
tfree(pDb->vgList);
|
||||
tfree(pDb);
|
||||
}
|
||||
|
||||
static int32_t mnodeDbActionDestroy(SSdbOper *pOper) {
|
||||
tfree(pOper->pObj);
|
||||
mnodeDestroyDb(pOper->pObj);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -59,8 +66,9 @@ static int32_t mnodeDbActionInsert(SSdbOper *pOper) {
|
|||
SDbObj *pDb = pOper->pObj;
|
||||
SAcctObj *pAcct = mnodeGetAcct(pDb->acct);
|
||||
|
||||
pDb->pHead = NULL;
|
||||
pDb->pTail = NULL;
|
||||
pthread_mutex_init(&pDb->mutex, NULL);
|
||||
pDb->vgListSize = VG_LIST_SIZE;
|
||||
pDb->vgList = calloc(pDb->vgListSize, sizeof(SVgObj *));
|
||||
pDb->numOfVgroups = 0;
|
||||
pDb->numOfTables = 0;
|
||||
pDb->numOfSuperTables = 0;
|
||||
|
@ -94,14 +102,15 @@ static int32_t mnodeDbActionDelete(SSdbOper *pOper) {
|
|||
}
|
||||
|
||||
static int32_t mnodeDbActionUpdate(SSdbOper *pOper) {
|
||||
SDbObj *pDb = pOper->pObj;
|
||||
SDbObj *pSaved = mnodeGetDb(pDb->name);
|
||||
if (pDb != pSaved) {
|
||||
memcpy(pSaved, pDb, pOper->rowSize);
|
||||
free(pDb);
|
||||
SDbObj *pNew = pOper->pObj;
|
||||
SDbObj *pDb = mnodeGetDb(pNew->name);
|
||||
if (pDb != NULL && pNew != pDb) {
|
||||
memcpy(pDb, pNew, pOper->rowSize);
|
||||
free(pNew->vgList);
|
||||
free(pNew);
|
||||
}
|
||||
mnodeUpdateAllDbVgroups(pSaved);
|
||||
mnodeDecDbRef(pSaved);
|
||||
//mnodeUpdateAllDbVgroups(pDb);
|
||||
mnodeDecDbRef(pDb);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -179,9 +188,14 @@ void mnodeDecDbRef(SDbObj *pDb) {
|
|||
|
||||
SDbObj *mnodeGetDbByTableId(char *tableId) {
|
||||
char db[TSDB_TABLE_ID_LEN], *pos;
|
||||
|
||||
|
||||
// tableId format should be : acct.db.table
|
||||
pos = strstr(tableId, TS_PATH_DELIMITER);
|
||||
assert(NULL != pos);
|
||||
|
||||
pos = strstr(pos + 1, TS_PATH_DELIMITER);
|
||||
assert(NULL != pos);
|
||||
|
||||
memset(db, 0, sizeof(db));
|
||||
strncpy(db, tableId, pos - tableId);
|
||||
|
||||
|
@ -379,7 +393,7 @@ static int32_t mnodeCreateDb(SAcctObj *pAcct, SCMCreateDbMsg *pCreate, void *pMs
|
|||
|
||||
code = sdbInsertRow(&oper);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tfree(pDb);
|
||||
mnodeDestroyDb(pDb);
|
||||
mLInfo("db:%s, failed to create, reason:%s", pDb->name, tstrerror(code));
|
||||
return code;
|
||||
} else {
|
||||
|
@ -416,45 +430,33 @@ void mnodePrintVgroups(SDbObj *pDb, char *oper) {
|
|||
void mnodeAddVgroupIntoDb(SVgObj *pVgroup) {
|
||||
SDbObj *pDb = pVgroup->pDb;
|
||||
|
||||
pVgroup->next = pDb->pHead;
|
||||
pVgroup->prev = NULL;
|
||||
pthread_mutex_lock(&pDb->mutex);
|
||||
int32_t vgPos = pDb->numOfVgroups++;
|
||||
if (vgPos >= pDb->vgListSize) {
|
||||
pDb->vgList = realloc(pDb->vgList, pDb->vgListSize * 2 * sizeof(SVgObj *));
|
||||
memset(pDb->vgList + pDb->vgListSize, 0, pDb->vgListSize * sizeof(SVgObj *));
|
||||
pDb->vgListSize *= 2;
|
||||
}
|
||||
|
||||
if (pDb->pHead) pDb->pHead->prev = pVgroup;
|
||||
if (pDb->pTail == NULL) pDb->pTail = pVgroup;
|
||||
|
||||
pDb->pHead = pVgroup;
|
||||
pDb->numOfVgroups++;
|
||||
}
|
||||
|
||||
void mnodeAddVgroupIntoDbTail(SVgObj *pVgroup) {
|
||||
SDbObj *pDb = pVgroup->pDb;
|
||||
pVgroup->next = NULL;
|
||||
pVgroup->prev = pDb->pTail;
|
||||
|
||||
if (pDb->pTail) pDb->pTail->next = pVgroup;
|
||||
if (pDb->pHead == NULL) pDb->pHead = pVgroup;
|
||||
|
||||
pDb->pTail = pVgroup;
|
||||
pDb->numOfVgroups++;
|
||||
pDb->vgList[vgPos] = pVgroup;
|
||||
pthread_mutex_unlock(&pDb->mutex);
|
||||
}
|
||||
|
||||
void mnodeRemoveVgroupFromDb(SVgObj *pVgroup) {
|
||||
SDbObj *pDb = pVgroup->pDb;
|
||||
if (pVgroup->prev) pVgroup->prev->next = pVgroup->next;
|
||||
if (pVgroup->next) pVgroup->next->prev = pVgroup->prev;
|
||||
if (pVgroup->prev == NULL) pDb->pHead = pVgroup->next;
|
||||
if (pVgroup->next == NULL) pDb->pTail = pVgroup->prev;
|
||||
pDb->numOfVgroups--;
|
||||
}
|
||||
|
||||
void mnodeMoveVgroupToTail(SVgObj *pVgroup) {
|
||||
mnodeRemoveVgroupFromDb(pVgroup);
|
||||
mnodeAddVgroupIntoDbTail(pVgroup);
|
||||
}
|
||||
pthread_mutex_lock(&pDb->mutex);
|
||||
for (int32_t v1 = 0; v1 < pDb->numOfVgroups; ++v1) {
|
||||
if (pDb->vgList[v1] == pVgroup) {
|
||||
for (int32_t v2 = v1; v2 < pDb->numOfVgroups - 1; ++v2) {
|
||||
pDb->vgList[v2] = pDb->vgList[v2 + 1];
|
||||
}
|
||||
pDb->numOfVgroups--;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void mnodeMoveVgroupToHead(SVgObj *pVgroup) {
|
||||
mnodeRemoveVgroupFromDb(pVgroup);
|
||||
mnodeAddVgroupIntoDb(pVgroup);
|
||||
pthread_mutex_unlock(&pDb->mutex);
|
||||
}
|
||||
|
||||
void mnodeCleanupDbs() {
|
||||
|
@ -526,11 +528,6 @@ static int32_t mnodeGetDbMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn
|
|||
#ifndef __CLOUD_VERSION__
|
||||
if (strcmp(pUser->user, TSDB_DEFAULT_USER) == 0) {
|
||||
#endif
|
||||
pShow->bytes[cols] = 4;
|
||||
pSchema[cols].type = TSDB_DATA_TYPE_INT;
|
||||
strcpy(pSchema[cols].name, "maxtables");
|
||||
pSchema[cols].bytes = htons(pShow->bytes[cols]);
|
||||
cols++;
|
||||
|
||||
pShow->bytes[cols] = 4;
|
||||
pSchema[cols].type = TSDB_DATA_TYPE_INT;
|
||||
|
@ -556,12 +553,6 @@ static int32_t mnodeGetDbMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn
|
|||
pSchema[cols].bytes = htons(pShow->bytes[cols]);
|
||||
cols++;
|
||||
|
||||
pShow->bytes[cols] = 4;
|
||||
pSchema[cols].type = TSDB_DATA_TYPE_INT;
|
||||
strcpy(pSchema[cols].name, "ctime(Sec.)");
|
||||
pSchema[cols].bytes = htons(pShow->bytes[cols]);
|
||||
cols++;
|
||||
|
||||
pShow->bytes[cols] = 1;
|
||||
pSchema[cols].type = TSDB_DATA_TYPE_TINYINT;
|
||||
strcpy(pSchema[cols].name, "wallevel");
|
||||
|
@ -671,10 +662,6 @@ static int32_t mnodeRetrieveDbs(SShowObj *pShow, char *data, int32_t rows, void
|
|||
#ifndef __CLOUD_VERSION__
|
||||
if (strcmp(pUser->user, TSDB_DEFAULT_USER) == 0) {
|
||||
#endif
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
*(int32_t *)pWrite = pDb->cfg.maxTables; // table num can be created should minus 1
|
||||
cols++;
|
||||
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
*(int32_t *)pWrite = pDb->cfg.cacheBlockSize;
|
||||
cols++;
|
||||
|
@ -691,10 +678,6 @@ static int32_t mnodeRetrieveDbs(SShowObj *pShow, char *data, int32_t rows, void
|
|||
*(int32_t *)pWrite = pDb->cfg.maxRowsPerFileBlock;
|
||||
cols++;
|
||||
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
*(int32_t *)pWrite = pDb->cfg.commitTime;
|
||||
cols++;
|
||||
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
*(int8_t *)pWrite = pDb->cfg.walLevel;
|
||||
cols++;
|
||||
|
@ -965,6 +948,11 @@ static int32_t mnodeProcessAlterDbMsg(SMnodeMsg *pMsg) {
|
|||
mError("db:%s, failed to alter, invalid db", pAlter->db);
|
||||
return TSDB_CODE_MND_INVALID_DB;
|
||||
}
|
||||
|
||||
if (pMsg->pDb->status != TSDB_DB_STATUS_READY) {
|
||||
mError("db:%s, status:%d, in dropping", pAlter->db, pMsg->pDb->status);
|
||||
return TSDB_CODE_MND_DB_IN_DROPPING;
|
||||
}
|
||||
|
||||
return mnodeAlterDb(pMsg->pDb, pAlter, pMsg);
|
||||
}
|
||||
|
|
|
@ -69,6 +69,7 @@ static int32_t mnodeDnodeActionInsert(SSdbOper *pOper) {
|
|||
SDnodeObj *pDnode = pOper->pObj;
|
||||
if (pDnode->status != TAOS_DN_STATUS_DROPPING) {
|
||||
pDnode->status = TAOS_DN_STATUS_OFFLINE;
|
||||
pDnode->lastAccess = tsAccessSquence;
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -187,7 +188,27 @@ int32_t mnodeGetDnodesNum() {
|
|||
return sdbGetNumOfRows(tsDnodeSdb);
|
||||
}
|
||||
|
||||
int32_t mnodeGetOnlinDnodesNum(char *ep) {
|
||||
int32_t mnodeGetOnlinDnodesCpuCoreNum() {
|
||||
SDnodeObj *pDnode = NULL;
|
||||
void * pIter = NULL;
|
||||
int32_t cpuCores = 0;
|
||||
|
||||
while (1) {
|
||||
pIter = mnodeGetNextDnode(pIter, &pDnode);
|
||||
if (pDnode == NULL) break;
|
||||
if (pDnode->status != TAOS_DN_STATUS_OFFLINE) {
|
||||
cpuCores += pDnode->numOfCores;
|
||||
}
|
||||
mnodeDecDnodeRef(pDnode);
|
||||
}
|
||||
|
||||
sdbFreeIter(pIter);
|
||||
|
||||
if (cpuCores < 2) cpuCores = 2;
|
||||
return cpuCores;
|
||||
}
|
||||
|
||||
int32_t mnodeGetOnlinDnodesNum() {
|
||||
SDnodeObj *pDnode = NULL;
|
||||
void * pIter = NULL;
|
||||
int32_t onlineDnodes = 0;
|
||||
|
@ -283,15 +304,17 @@ static void mnodeProcessCfgDnodeMsgRsp(SRpcMsg *rpcMsg) {
|
|||
}
|
||||
|
||||
static bool mnodeCheckClusterCfgPara(const SClusterCfg *clusterCfg) {
|
||||
if (clusterCfg->numOfMnodes != tsNumOfMnodes) return false;
|
||||
if (clusterCfg->mnodeEqualVnodeNum != tsMnodeEqualVnodeNum) return false;
|
||||
if (clusterCfg->offlineThreshold != tsOfflineThreshold) return false;
|
||||
if (clusterCfg->statusInterval != tsStatusInterval) return false;
|
||||
if (clusterCfg->numOfMnodes != htonl(tsNumOfMnodes)) return false;
|
||||
if (clusterCfg->mnodeEqualVnodeNum != htonl(tsMnodeEqualVnodeNum)) return false;
|
||||
if (clusterCfg->offlineThreshold != htonl(tsOfflineThreshold)) return false;
|
||||
if (clusterCfg->statusInterval != htonl(tsStatusInterval)) return false;
|
||||
if (clusterCfg->maxtablesPerVnode != htonl(tsMaxTablePerVnode)) return false;
|
||||
if (clusterCfg->maxVgroupsPerDb != htonl(tsMaxVgroupsPerDb)) return false;
|
||||
|
||||
if (0 != strncasecmp(clusterCfg->arbitrator, tsArbitrator, strlen(tsArbitrator))) return false;
|
||||
if (0 != strncasecmp(clusterCfg->timezone, tsTimezone, strlen(tsTimezone))) return false;
|
||||
if (0 != strncasecmp(clusterCfg->locale, tsLocale, strlen(tsLocale))) return false;
|
||||
if (0 != strncasecmp(clusterCfg->charset, tsCharset, strlen(tsCharset))) return false;
|
||||
if (0 != strncasecmp(clusterCfg->locale, tsLocale, strlen(tsLocale))) return false;
|
||||
if (0 != strncasecmp(clusterCfg->charset, tsCharset, strlen(tsCharset))) return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -376,7 +399,7 @@ static int32_t mnodeProcessDnodeStatusMsg(SMnodeMsg *pMsg) {
|
|||
if (false == ret) {
|
||||
mnodeDecDnodeRef(pDnode);
|
||||
rpcFreeCont(pRsp);
|
||||
mError("dnode %s cluster cfg parameters inconsistent", pStatus->dnodeEp);
|
||||
mError("dnode:%d, %s cluster cfg parameters inconsistent", pDnode->dnodeId, pStatus->dnodeEp);
|
||||
return TSDB_CODE_MND_CLUSTER_CFG_INCONSISTENT;
|
||||
}
|
||||
|
||||
|
@ -468,18 +491,22 @@ static int32_t mnodeDropDnodeByEp(char *ep, SMnodeMsg *pMsg) {
|
|||
return TSDB_CODE_MND_DNODE_NOT_EXIST;
|
||||
}
|
||||
|
||||
mnodeDecDnodeRef(pDnode);
|
||||
if (strcmp(pDnode->dnodeEp, mnodeGetMnodeMasterEp()) == 0) {
|
||||
mError("dnode:%d, can't drop dnode:%s which is master", pDnode->dnodeId, ep);
|
||||
mnodeDecDnodeRef(pDnode);
|
||||
return TSDB_CODE_MND_NO_REMOVE_MASTER;
|
||||
}
|
||||
|
||||
mInfo("dnode:%d, start to drop it", pDnode->dnodeId);
|
||||
|
||||
#ifndef _SYNC
|
||||
return mnodeDropDnode(pDnode, pMsg);
|
||||
int32_t code = mnodeDropDnode(pDnode, pMsg);
|
||||
#else
|
||||
return balanceDropDnode(pDnode);
|
||||
int32_t code = balanceDropDnode(pDnode);
|
||||
#endif
|
||||
|
||||
mnodeDecDnodeRef(pDnode);
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t mnodeProcessCreateDnodeMsg(SMnodeMsg *pMsg) {
|
||||
|
|
|
@ -41,7 +41,7 @@ typedef struct {
|
|||
void (*cleanup)();
|
||||
} SMnodeComponent;
|
||||
|
||||
void *tsMnodeTmr;
|
||||
void *tsMnodeTmr = NULL;
|
||||
static bool tsMgmtIsRunning = false;
|
||||
|
||||
static const SMnodeComponent tsMnodeComponents[] = {
|
||||
|
|
|
@ -199,7 +199,7 @@ static void sdbRestoreTables() {
|
|||
sdbDebug("table:%s, is restored, numOfRows:%" PRId64, pTable->tableName, pTable->numOfRows);
|
||||
}
|
||||
|
||||
sdbInfo("sdb is restored, version:%" PRId64 " totalRows:%d numOfTables:%d", tsSdbObj.version, totalRows, numOfTables);
|
||||
sdbInfo("sdb is restored, ver:%" PRId64 " totalRows:%d numOfTables:%d", tsSdbObj.version, totalRows, numOfTables);
|
||||
}
|
||||
|
||||
void sdbUpdateMnodeRoles() {
|
||||
|
@ -264,8 +264,12 @@ static void sdbConfirmForward(void *ahandle, void *param, int32_t code) {
|
|||
if (pOper->cb != NULL) {
|
||||
pOper->retCode = (*pOper->cb)(pMsg, pOper->retCode);
|
||||
}
|
||||
|
||||
dnodeSendRpcMnodeWriteRsp(pMsg, pOper->retCode);
|
||||
|
||||
// if ahandle, means this func is called by sdb write
|
||||
if (ahandle == NULL) {
|
||||
sdbDecRef(pOper->table, pOper->pObj);
|
||||
}
|
||||
taosFreeQitem(pOper);
|
||||
}
|
||||
|
||||
|
@ -373,7 +377,7 @@ void sdbCleanUp() {
|
|||
tsSdbObj.status = SDB_STATUS_CLOSING;
|
||||
|
||||
sdbCleanupWriteWorker();
|
||||
sdbDebug("sdb will be closed, version:%" PRId64, tsSdbObj.version);
|
||||
sdbDebug("sdb will be closed, ver:%" PRId64, tsSdbObj.version);
|
||||
|
||||
if (tsSdbObj.sync) {
|
||||
syncStop(tsSdbObj.sync);
|
||||
|
@ -471,8 +475,8 @@ static int32_t sdbInsertHash(SSdbTable *pTable, SSdbOper *pOper) {
|
|||
atomic_add_fetch_32(&pTable->autoIndex, 1);
|
||||
}
|
||||
|
||||
sdbDebug("table:%s, insert record:%s to hash, rowSize:%d numOfRows:%" PRId64 " version:%" PRIu64, pTable->tableName,
|
||||
sdbGetKeyStrFromObj(pTable, pOper->pObj), pOper->rowSize, pTable->numOfRows, sdbGetVersion());
|
||||
sdbDebug("table:%s, insert record:%s to hash, rowSize:%d numOfRows:%" PRId64 ", msg:%p", pTable->tableName,
|
||||
sdbGetKeyStrFromObj(pTable, pOper->pObj), pOper->rowSize, pTable->numOfRows, pOper->pMsg);
|
||||
|
||||
(*pTable->insertFp)(pOper);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -490,8 +494,8 @@ static int32_t sdbDeleteHash(SSdbTable *pTable, SSdbOper *pOper) {
|
|||
taosHashRemove(pTable->iHandle, key, keySize);
|
||||
atomic_sub_fetch_32(&pTable->numOfRows, 1);
|
||||
|
||||
sdbDebug("table:%s, delete record:%s from hash, numOfRows:%" PRId64 " version:%" PRIu64, pTable->tableName,
|
||||
sdbGetKeyStrFromObj(pTable, pOper->pObj), pTable->numOfRows, sdbGetVersion());
|
||||
sdbDebug("table:%s, delete record:%s from hash, numOfRows:%" PRId64 ", msg:%p", pTable->tableName,
|
||||
sdbGetKeyStrFromObj(pTable, pOper->pObj), pTable->numOfRows, pOper->pMsg);
|
||||
|
||||
int8_t *updateEnd = pOper->pObj + pTable->refCountPos - 1;
|
||||
*updateEnd = 1;
|
||||
|
@ -501,8 +505,8 @@ static int32_t sdbDeleteHash(SSdbTable *pTable, SSdbOper *pOper) {
|
|||
}
|
||||
|
||||
static int32_t sdbUpdateHash(SSdbTable *pTable, SSdbOper *pOper) {
|
||||
sdbDebug("table:%s, update record:%s in hash, numOfRows:%" PRId64 " version:%" PRIu64, pTable->tableName,
|
||||
sdbGetKeyStrFromObj(pTable, pOper->pObj), pTable->numOfRows, sdbGetVersion());
|
||||
sdbDebug("table:%s, update record:%s in hash, numOfRows:%" PRId64 ", msg:%p", pTable->tableName,
|
||||
sdbGetKeyStrFromObj(pTable, pOper->pObj), pTable->numOfRows, pOper->pMsg);
|
||||
|
||||
(*pTable->updateFp)(pOper);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -527,12 +531,12 @@ static int sdbWrite(void *param, void *data, int type) {
|
|||
// for data from WAL or forward, version may be smaller
|
||||
if (pHead->version <= tsSdbObj.version) {
|
||||
pthread_mutex_unlock(&tsSdbObj.mutex);
|
||||
sdbDebug("table:%s, failed to restore %s record:%s from source(%d), version:%" PRId64 " too large, sdb version:%" PRId64,
|
||||
sdbDebug("table:%s, failed to restore %s record:%s from source(%d), ver:%" PRId64 " too large, sdb ver:%" PRId64,
|
||||
pTable->tableName, sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), type, pHead->version, tsSdbObj.version);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else if (pHead->version != tsSdbObj.version + 1) {
|
||||
pthread_mutex_unlock(&tsSdbObj.mutex);
|
||||
sdbError("table:%s, failed to restore %s record:%s from source(%d), version:%" PRId64 " too large, sdb version:%" PRId64,
|
||||
sdbError("table:%s, failed to restore %s record:%s from source(%d), ver:%" PRId64 " too large, sdb ver:%" PRId64,
|
||||
pTable->tableName, sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), type, pHead->version, tsSdbObj.version);
|
||||
return TSDB_CODE_MND_APP_ERROR;
|
||||
} else {
|
||||
|
@ -556,19 +560,19 @@ static int sdbWrite(void *param, void *data, int type) {
|
|||
if (syncCode <= 0) pOper->processedCount = 1;
|
||||
|
||||
if (syncCode < 0) {
|
||||
sdbError("table:%s, failed to forward request, result:%s action:%s record:%s version:%" PRId64, pTable->tableName,
|
||||
tstrerror(syncCode), sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version);
|
||||
sdbError("table:%s, failed to forward request, result:%s action:%s record:%s ver:%" PRId64 ", msg:%p", pTable->tableName,
|
||||
tstrerror(syncCode), sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version, pOper->pMsg);
|
||||
} else if (syncCode > 0) {
|
||||
sdbDebug("table:%s, forward request is sent, action:%s record:%s version:%" PRId64, pTable->tableName,
|
||||
sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version);
|
||||
sdbDebug("table:%s, forward request is sent, action:%s record:%s ver:%" PRId64 ", msg:%p", pTable->tableName,
|
||||
sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version, pOper->pMsg);
|
||||
} else {
|
||||
sdbTrace("table:%s, no need to send fwd request, action:%s record:%s version:%" PRId64, pTable->tableName,
|
||||
sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version);
|
||||
sdbTrace("table:%s, no need to send fwd request, action:%s record:%s ver:%" PRId64 ", msg:%p", pTable->tableName,
|
||||
sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version, pOper->pMsg);
|
||||
}
|
||||
return syncCode;
|
||||
}
|
||||
|
||||
sdbDebug("table:%s, record from wal/fwd is disposed, action:%s record:%s version:%" PRId64, pTable->tableName,
|
||||
sdbDebug("table:%s, record from wal/fwd is disposed, action:%s record:%s ver:%" PRId64, pTable->tableName,
|
||||
sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version);
|
||||
|
||||
// even it is WAL/FWD, it shall be called to update version in sync
|
||||
|
@ -581,16 +585,26 @@ static int sdbWrite(void *param, void *data, int type) {
|
|||
return sdbInsertHash(pTable, &oper);
|
||||
} else if (action == SDB_ACTION_DELETE) {
|
||||
SSdbRow *rowMeta = sdbGetRowMeta(pTable, pHead->cont);
|
||||
assert(rowMeta != NULL && rowMeta->row != NULL);
|
||||
if (rowMeta == NULL || rowMeta->row == NULL) {
|
||||
sdbError("table:%s, failed to get object:%s from wal while dispose delete action", pTable->tableName,
|
||||
pHead->cont);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
SSdbOper oper = {.table = pTable, .pObj = rowMeta->row};
|
||||
return sdbDeleteHash(pTable, &oper);
|
||||
} else if (action == SDB_ACTION_UPDATE) {
|
||||
SSdbRow *rowMeta = sdbGetRowMeta(pTable, pHead->cont);
|
||||
assert(rowMeta != NULL && rowMeta->row != NULL);
|
||||
if (rowMeta == NULL || rowMeta->row == NULL) {
|
||||
sdbError("table:%s, failed to get object:%s from wal while dispose update action", pTable->tableName,
|
||||
pHead->cont);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
SSdbOper oper = {.rowSize = pHead->len, .rowData = pHead->cont, .table = pTable};
|
||||
code = (*pTable->decodeFp)(&oper);
|
||||
return sdbUpdateHash(pTable, &oper);
|
||||
} else { return TSDB_CODE_MND_INVALID_MSG_TYPE; }
|
||||
} else {
|
||||
return TSDB_CODE_MND_INVALID_MSG_TYPE;
|
||||
}
|
||||
}
|
||||
|
||||
int32_t sdbInsertRow(SSdbOper *pOper) {
|
||||
|
@ -663,14 +677,18 @@ int32_t sdbDeleteRow(SSdbOper *pOper) {
|
|||
return TSDB_CODE_MND_SDB_INVAID_META_ROW;
|
||||
}
|
||||
|
||||
sdbIncRef(pTable, pOper->pObj);
|
||||
|
||||
int32_t code = sdbDeleteHash(pTable, pOper);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
sdbError("table:%s, failed to delete from hash", pTable->tableName);
|
||||
sdbDecRef(pTable, pOper->pObj);
|
||||
return code;
|
||||
}
|
||||
|
||||
// just delete data from memory
|
||||
if (pOper->type != SDB_OPER_GLOBAL) {
|
||||
sdbDecRef(pTable, pOper->pObj);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -692,7 +710,6 @@ int32_t sdbDeleteRow(SSdbOper *pOper) {
|
|||
pNewOper->pMsg, pTable->tableName, pOper->pObj, sdbGetKeyStrFromObj(pTable, pOper->pObj));
|
||||
}
|
||||
|
||||
sdbIncRef(pNewOper->table, pNewOper->pObj);
|
||||
taosWriteQitem(tsSdbWriteQueue, TAOS_QTYPE_RPC, pNewOper);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -957,7 +974,7 @@ static void *sdbWorkerFp(void *param) {
|
|||
pOper->processedCount = 1;
|
||||
pHead = (void *)pOper + sizeof(SSdbOper) + SDB_SYNC_HACK;
|
||||
if (pOper->pMsg != NULL) {
|
||||
sdbDebug("app:%p:%p, table:%s record:%p:%s version:%" PRIu64 ", will be processed in sdb queue",
|
||||
sdbDebug("app:%p:%p, table:%s record:%p:%s ver:%" PRIu64 ", will be processed in sdb queue",
|
||||
pOper->pMsg->rpcMsg.ahandle, pOper->pMsg, ((SSdbTable *)pOper->table)->tableName, pOper->pObj,
|
||||
sdbGetKeyStr(pOper->table, pHead->cont), pHead->version);
|
||||
}
|
||||
|
@ -967,7 +984,12 @@ static void *sdbWorkerFp(void *param) {
|
|||
}
|
||||
|
||||
int32_t code = sdbWrite(pOper, pHead, type);
|
||||
if (pOper && code <= 0) pOper->retCode = code;
|
||||
if (code > 0) code = 0;
|
||||
if (pOper) {
|
||||
pOper->retCode = code;
|
||||
} else {
|
||||
pHead->len = code; // hackway
|
||||
}
|
||||
}
|
||||
|
||||
walFsync(tsSdbObj.wal);
|
||||
|
@ -979,10 +1001,10 @@ static void *sdbWorkerFp(void *param) {
|
|||
|
||||
if (type == TAOS_QTYPE_RPC) {
|
||||
pOper = (SSdbOper *)item;
|
||||
sdbDecRef(pOper->table, pOper->pObj);
|
||||
sdbConfirmForward(NULL, pOper, pOper->retCode);
|
||||
} else if (type == TAOS_QTYPE_FWD) {
|
||||
syncConfirmForward(tsSdbObj.sync, pHead->version, TSDB_CODE_SUCCESS);
|
||||
pHead = (SWalHead *)item;
|
||||
syncConfirmForward(tsSdbObj.sync, pHead->version, pHead->len);
|
||||
taosFreeQitem(item);
|
||||
} else {
|
||||
taosFreeQitem(item);
|
||||
|
|
|
@ -307,6 +307,12 @@ static int32_t mnodeProcessConnectMsg(SMnodeMsg *pMsg) {
|
|||
code = TSDB_CODE_MND_INVALID_DB;
|
||||
goto connect_over;
|
||||
}
|
||||
|
||||
if (pDb->status != TSDB_DB_STATUS_READY) {
|
||||
mError("db:%s, status:%d, in dropping", pDb->name, pDb->status);
|
||||
code = TSDB_CODE_MND_DB_IN_DROPPING;
|
||||
goto connect_over;
|
||||
}
|
||||
mnodeDecDbRef(pDb);
|
||||
}
|
||||
|
||||
|
@ -350,7 +356,12 @@ static int32_t mnodeProcessUseMsg(SMnodeMsg *pMsg) {
|
|||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDb(pUseDbMsg->db);
|
||||
if (pMsg->pDb == NULL) {
|
||||
code = TSDB_CODE_MND_INVALID_DB;
|
||||
return TSDB_CODE_MND_INVALID_DB;
|
||||
}
|
||||
|
||||
if (pMsg->pDb->status != TSDB_DB_STATUS_READY) {
|
||||
mError("db:%s, status:%d, in dropping", pMsg->pDb->name, pMsg->pDb->status);
|
||||
return TSDB_CODE_MND_DB_IN_DROPPING;
|
||||
}
|
||||
|
||||
return code;
|
||||
|
@ -403,4 +414,4 @@ void mnodeVacuumResult(char *data, int32_t numOfCols, int32_t rows, int32_t capa
|
|||
memmove(data + pShow->offset[i] * rows, data + pShow->offset[i] * capacity, pShow->bytes[i] * rows);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -116,6 +116,11 @@ static int32_t mnodeChildTableActionInsert(SSdbOper *pOper) {
|
|||
mError("ctable:%s, vgId:%d not in db:%s", pTable->info.tableId, pVgroup->vgId, pVgroup->dbName);
|
||||
return TSDB_CODE_MND_INVALID_DB;
|
||||
}
|
||||
|
||||
if (pDb->status != TSDB_DB_STATUS_READY) {
|
||||
mError("db:%s, status:%d, in dropping", pDb->name, pDb->status);
|
||||
return TSDB_CODE_MND_DB_IN_DROPPING;
|
||||
}
|
||||
mnodeDecDbRef(pDb);
|
||||
|
||||
SAcctObj *pAcct = mnodeGetAcct(pDb->acct);
|
||||
|
@ -284,8 +289,8 @@ static int32_t mnodeChildTableActionRestored() {
|
|||
if (pTable == NULL) break;
|
||||
|
||||
SDbObj *pDb = mnodeGetDbByTableId(pTable->info.tableId);
|
||||
if (pDb == NULL) {
|
||||
mError("ctable:%s, failed to get db, discard it", pTable->info.tableId);
|
||||
if (pDb == NULL || pDb->status != TSDB_DB_STATUS_READY) {
|
||||
mError("ctable:%s, failed to get db or db in dropping, discard it", pTable->info.tableId);
|
||||
SSdbOper desc = {.type = SDB_OPER_LOCAL, .pObj = pTable, .table = tsChildTableSdb};
|
||||
sdbDeleteRow(&desc);
|
||||
mnodeDecTableRef(pTable);
|
||||
|
@ -314,15 +319,6 @@ static int32_t mnodeChildTableActionRestored() {
|
|||
continue;
|
||||
}
|
||||
|
||||
if (pVgroup->tableList == NULL) {
|
||||
mError("ctable:%s, vgId:%d tableList is null", pTable->info.tableId, pTable->vgId);
|
||||
pTable->vgId = 0;
|
||||
SSdbOper desc = {.type = SDB_OPER_LOCAL, .pObj = pTable, .table = tsChildTableSdb};
|
||||
sdbDeleteRow(&desc);
|
||||
mnodeDecTableRef(pTable);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (pTable->info.type == TSDB_CHILD_TABLE) {
|
||||
SSuperTableObj *pSuperTable = mnodeGetSuperTableByUid(pTable->suid);
|
||||
if (pSuperTable == NULL) {
|
||||
|
@ -423,7 +419,7 @@ static int32_t mnodeSuperTableActionDestroy(SSdbOper *pOper) {
|
|||
static int32_t mnodeSuperTableActionInsert(SSdbOper *pOper) {
|
||||
SSuperTableObj *pStable = pOper->pObj;
|
||||
SDbObj *pDb = mnodeGetDbByTableId(pStable->info.tableId);
|
||||
if (pDb != NULL) {
|
||||
if (pDb != NULL && pDb->status == TSDB_DB_STATUS_READY) {
|
||||
mnodeAddSuperTableIntoDb(pDb);
|
||||
}
|
||||
mnodeDecDbRef(pDb);
|
||||
|
@ -685,10 +681,15 @@ static int32_t mnodeProcessCreateTableMsg(SMnodeMsg *pMsg) {
|
|||
SCMCreateTableMsg *pCreate = pMsg->rpcMsg.pCont;
|
||||
|
||||
if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDb(pCreate->db);
|
||||
if (pMsg->pDb == NULL || pMsg->pDb->status != TSDB_DB_STATUS_READY) {
|
||||
if (pMsg->pDb == NULL) {
|
||||
mError("app:%p:%p, table:%s, failed to create, db not selected", pMsg->rpcMsg.ahandle, pMsg, pCreate->tableId);
|
||||
return TSDB_CODE_MND_DB_NOT_SELECTED;
|
||||
}
|
||||
|
||||
if (pMsg->pDb->status != TSDB_DB_STATUS_READY) {
|
||||
mError("db:%s, status:%d, in dropping", pMsg->pDb->name, pMsg->pDb->status);
|
||||
return TSDB_CODE_MND_DB_IN_DROPPING;
|
||||
}
|
||||
|
||||
if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(pCreate->tableId);
|
||||
if (pMsg->pTable != NULL && pMsg->retry == 0) {
|
||||
|
@ -719,10 +720,15 @@ static int32_t mnodeProcessCreateTableMsg(SMnodeMsg *pMsg) {
|
|||
static int32_t mnodeProcessDropTableMsg(SMnodeMsg *pMsg) {
|
||||
SCMDropTableMsg *pDrop = pMsg->rpcMsg.pCont;
|
||||
if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDbByTableId(pDrop->tableId);
|
||||
if (pMsg->pDb == NULL || pMsg->pDb->status != TSDB_DB_STATUS_READY) {
|
||||
mError("app:%p:%p, table:%s, failed to drop table, db not selected", pMsg->rpcMsg.ahandle, pMsg, pDrop->tableId);
|
||||
if (pMsg->pDb == NULL) {
|
||||
mError("app:%p:%p, table:%s, failed to drop table, db not selected or db in dropping", pMsg->rpcMsg.ahandle, pMsg, pDrop->tableId);
|
||||
return TSDB_CODE_MND_DB_NOT_SELECTED;
|
||||
}
|
||||
|
||||
if (pMsg->pDb->status != TSDB_DB_STATUS_READY) {
|
||||
mError("db:%s, status:%d, in dropping", pMsg->pDb->name, pMsg->pDb->status);
|
||||
return TSDB_CODE_MND_DB_IN_DROPPING;
|
||||
}
|
||||
|
||||
if (mnodeCheckIsMonitorDB(pMsg->pDb->name, tsMonitorDbName)) {
|
||||
mError("app:%p:%p, table:%s, failed to drop table, in monitor database", pMsg->rpcMsg.ahandle, pMsg,
|
||||
|
@ -757,11 +763,16 @@ static int32_t mnodeProcessTableMetaMsg(SMnodeMsg *pMsg) {
|
|||
pInfo->tableId, pMsg->rpcMsg.handle, pInfo->createFlag);
|
||||
|
||||
if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDbByTableId(pInfo->tableId);
|
||||
if (pMsg->pDb == NULL || pMsg->pDb->status != TSDB_DB_STATUS_READY) {
|
||||
if (pMsg->pDb == NULL) {
|
||||
mError("app:%p:%p, table:%s, failed to get table meta, db not selected", pMsg->rpcMsg.ahandle, pMsg,
|
||||
pInfo->tableId);
|
||||
return TSDB_CODE_MND_DB_NOT_SELECTED;
|
||||
}
|
||||
|
||||
if (pMsg->pDb->status != TSDB_DB_STATUS_READY) {
|
||||
mError("db:%s, status:%d, in dropping", pMsg->pDb->name, pMsg->pDb->status);
|
||||
return TSDB_CODE_MND_DB_IN_DROPPING;
|
||||
}
|
||||
|
||||
if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(pInfo->tableId);
|
||||
if (pMsg->pTable == NULL) {
|
||||
|
@ -1209,6 +1220,11 @@ static int32_t mnodeDropSuperTableColumn(SMnodeMsg *pMsg, char *colName) {
|
|||
static int32_t mnodeGetShowSuperTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) {
|
||||
SDbObj *pDb = mnodeGetDb(pShow->db);
|
||||
if (pDb == NULL) return TSDB_CODE_MND_DB_NOT_SELECTED;
|
||||
|
||||
if (pDb->status != TSDB_DB_STATUS_READY) {
|
||||
mError("db:%s, status:%d, in dropping", pDb->name, pDb->status);
|
||||
return TSDB_CODE_MND_DB_IN_DROPPING;
|
||||
}
|
||||
|
||||
int32_t cols = 0;
|
||||
SSchema *pSchema = pMeta->schema;
|
||||
|
@ -1268,6 +1284,11 @@ int32_t mnodeRetrieveShowSuperTables(SShowObj *pShow, char *data, int32_t rows,
|
|||
|
||||
SDbObj *pDb = mnodeGetDb(pShow->db);
|
||||
if (pDb == NULL) return 0;
|
||||
|
||||
if (pDb->status != TSDB_DB_STATUS_READY) {
|
||||
mError("db:%s, status:%d, in dropping", pDb->name, pDb->status);
|
||||
return 0;
|
||||
}
|
||||
|
||||
tstrncpy(prefix, pDb->name, 64);
|
||||
strcat(prefix, TS_PATH_DELIMITER);
|
||||
|
@ -1570,8 +1591,8 @@ static int32_t mnodeDoCreateChildTableCb(SMnodeMsg *pMsg, int32_t code) {
|
|||
assert(pTable);
|
||||
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
mDebug("app:%p:%p, table:%s, create table in sid:%d, uid:%" PRIu64, pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId,
|
||||
pTable->sid, pTable->uid);
|
||||
mDebug("app:%p:%p, table:%s, created in mnode, vgId:%d sid:%d, uid:%" PRIu64 ", result:%s", pMsg->rpcMsg.ahandle,
|
||||
pMsg, pTable->info.tableId, pTable->vgId, pTable->sid, pTable->uid, tstrerror(code));
|
||||
} else {
|
||||
mError("app:%p:%p, table:%s, failed to create table sid:%d, uid:%" PRIu64 ", reason:%s", pMsg->rpcMsg.ahandle, pMsg,
|
||||
pTable->info.tableId, pTable->sid, pTable->uid, tstrerror(code));
|
||||
|
@ -1700,19 +1721,15 @@ static int32_t mnodeProcessCreateChildTableMsg(SMnodeMsg *pMsg) {
|
|||
return code;
|
||||
}
|
||||
|
||||
SVgObj *pVgroup = mnodeGetAvailableVgroup(pMsg->pDb);
|
||||
if (pVgroup == NULL) {
|
||||
mDebug("app:%p:%p, table:%s, start to create a new vgroup", pMsg->rpcMsg.ahandle, pMsg, pCreate->tableId);
|
||||
return mnodeCreateVgroup(pMsg, pMsg->pDb);
|
||||
}
|
||||
|
||||
if (pMsg->retry == 0) {
|
||||
if (pMsg->pTable == NULL) {
|
||||
int32_t sid = taosAllocateId(pVgroup->idPool);
|
||||
if (sid <= 0) {
|
||||
mDebug("app:%p:%p, table:%s, no enough sid in vgId:%d", pMsg->rpcMsg.ahandle, pMsg, pCreate->tableId,
|
||||
pVgroup->vgId);
|
||||
return mnodeCreateVgroup(pMsg, pMsg->pDb);
|
||||
SVgObj *pVgroup = NULL;
|
||||
int32_t sid = 0;
|
||||
code = mnodeGetAvailableVgroup(pMsg, &pVgroup, &sid);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
mDebug("app:%p:%p, table:%s, failed to get available vgroup, reason:%s", pMsg->rpcMsg.ahandle, pMsg,
|
||||
pCreate->tableId, tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
if (pMsg->pVgroup == NULL) {
|
||||
|
@ -1720,7 +1737,7 @@ static int32_t mnodeProcessCreateChildTableMsg(SMnodeMsg *pMsg) {
|
|||
mnodeIncVgroupRef(pVgroup);
|
||||
}
|
||||
|
||||
mDebug("app:%p:%p, table:%s, create table in vgroup, vgId:%d sid:%d", pMsg->rpcMsg.ahandle, pMsg, pCreate->tableId,
|
||||
mDebug("app:%p:%p, table:%s, allocated in vgroup, vgId:%d sid:%d", pMsg->rpcMsg.ahandle, pMsg, pCreate->tableId,
|
||||
pVgroup->vgId, sid);
|
||||
|
||||
return mnodeDoCreateChildTable(pMsg, sid);
|
||||
|
@ -2119,6 +2136,7 @@ static void mnodeDropAllChildTablesInStable(SSuperTableObj *pStable) {
|
|||
mInfo("stable:%s, all child tables:%d is dropped from sdb", pStable->info.tableId, numOfTables);
|
||||
}
|
||||
|
||||
#if 0
|
||||
static SChildTableObj* mnodeGetTableByPos(int32_t vnode, int32_t sid) {
|
||||
SVgObj *pVgroup = mnodeGetVgroup(vnode);
|
||||
if (pVgroup == NULL) return NULL;
|
||||
|
@ -2129,8 +2147,11 @@ static SChildTableObj* mnodeGetTableByPos(int32_t vnode, int32_t sid) {
|
|||
mnodeDecVgroupRef(pVgroup);
|
||||
return pTable;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int32_t mnodeProcessTableCfgMsg(SMnodeMsg *pMsg) {
|
||||
return TSDB_CODE_COM_OPS_NOT_SUPPORT;
|
||||
#if 0
|
||||
SDMConfigTableMsg *pCfg = pMsg->rpcMsg.pCont;
|
||||
pCfg->dnodeId = htonl(pCfg->dnodeId);
|
||||
pCfg->vgId = htonl(pCfg->vgId);
|
||||
|
@ -2154,6 +2175,7 @@ static int32_t mnodeProcessTableCfgMsg(SMnodeMsg *pMsg) {
|
|||
pMsg->rpcRsp.rsp = pCreate;
|
||||
pMsg->rpcRsp.len = htonl(pCreate->contLen);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
#endif
|
||||
}
|
||||
|
||||
// handle drop child response
|
||||
|
@ -2299,7 +2321,7 @@ static int32_t mnodeProcessMultiTableMetaMsg(SMnodeMsg *pMsg) {
|
|||
if (pTable == NULL) continue;
|
||||
|
||||
if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDbByTableId(tableId);
|
||||
if (pMsg->pDb == NULL) {
|
||||
if (pMsg->pDb == NULL || pMsg->pDb->status != TSDB_DB_STATUS_READY) {
|
||||
mnodeDecTableRef(pTable);
|
||||
continue;
|
||||
}
|
||||
|
@ -2337,6 +2359,11 @@ static int32_t mnodeProcessMultiTableMetaMsg(SMnodeMsg *pMsg) {
|
|||
static int32_t mnodeGetShowTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) {
|
||||
SDbObj *pDb = mnodeGetDb(pShow->db);
|
||||
if (pDb == NULL) return TSDB_CODE_MND_DB_NOT_SELECTED;
|
||||
|
||||
if (pDb->status != TSDB_DB_STATUS_READY) {
|
||||
mError("db:%s, status:%d, in dropping", pDb->name, pDb->status);
|
||||
return TSDB_CODE_MND_DB_IN_DROPPING;
|
||||
}
|
||||
|
||||
int32_t cols = 0;
|
||||
SSchema *pSchema = pMeta->schema;
|
||||
|
@ -2385,6 +2412,11 @@ static int32_t mnodeGetShowTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, void
|
|||
static int32_t mnodeRetrieveShowTables(SShowObj *pShow, char *data, int32_t rows, void *pConn) {
|
||||
SDbObj *pDb = mnodeGetDb(pShow->db);
|
||||
if (pDb == NULL) return 0;
|
||||
|
||||
if (pDb->status != TSDB_DB_STATUS_READY) {
|
||||
mError("db:%s, status:%d, in dropping", pDb->name, pDb->status);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t numOfRows = 0;
|
||||
SChildTableObj *pTable = NULL;
|
||||
|
@ -2476,10 +2508,15 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg) {
|
|||
pAlter->tableId, pMsg->rpcMsg.handle);
|
||||
|
||||
if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDbByTableId(pAlter->tableId);
|
||||
if (pMsg->pDb == NULL || pMsg->pDb->status != TSDB_DB_STATUS_READY) {
|
||||
if (pMsg->pDb == NULL) {
|
||||
mError("app:%p:%p, table:%s, failed to alter table, db not selected", pMsg->rpcMsg.ahandle, pMsg, pAlter->tableId);
|
||||
return TSDB_CODE_MND_DB_NOT_SELECTED;
|
||||
}
|
||||
|
||||
if (pMsg->pDb->status != TSDB_DB_STATUS_READY) {
|
||||
mError("db:%s, status:%d, in dropping", pMsg->pDb->name, pMsg->pDb->status);
|
||||
return TSDB_CODE_MND_DB_IN_DROPPING;
|
||||
}
|
||||
|
||||
if (mnodeCheckIsMonitorDB(pMsg->pDb->name, tsMonitorDbName)) {
|
||||
mError("app:%p:%p, table:%s, failed to alter table, its log db", pMsg->rpcMsg.ahandle, pMsg, pAlter->tableId);
|
||||
|
@ -2539,6 +2576,11 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg) {
|
|||
static int32_t mnodeGetStreamTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) {
|
||||
SDbObj *pDb = mnodeGetDb(pShow->db);
|
||||
if (pDb == NULL) return TSDB_CODE_MND_DB_NOT_SELECTED;
|
||||
|
||||
if (pDb->status != TSDB_DB_STATUS_READY) {
|
||||
mError("db:%s, status:%d, in dropping", pDb->name, pDb->status);
|
||||
return TSDB_CODE_MND_DB_IN_DROPPING;
|
||||
}
|
||||
|
||||
int32_t cols = 0;
|
||||
SSchema *pSchema = pMeta->schema;
|
||||
|
@ -2586,7 +2628,11 @@ static int32_t mnodeGetStreamTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, vo
|
|||
static int32_t mnodeRetrieveStreamTables(SShowObj *pShow, char *data, int32_t rows, void *pConn) {
|
||||
SDbObj *pDb = mnodeGetDb(pShow->db);
|
||||
if (pDb == NULL) return 0;
|
||||
|
||||
|
||||
if (pDb->status != TSDB_DB_STATUS_READY) {
|
||||
mError("db:%s, status:%d, in dropping", pDb->name, pDb->status);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t numOfRows = 0;
|
||||
SChildTableObj *pTable = NULL;
|
||||
|
|
|
@ -46,6 +46,7 @@ typedef enum {
|
|||
static void *tsVgroupSdb = NULL;
|
||||
static int32_t tsVgUpdateSize = 0;
|
||||
|
||||
static int32_t mnodeAllocVgroupIdPool(SVgObj *pInputVgroup);
|
||||
static int32_t mnodeGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn);
|
||||
static int32_t mnodeRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, void *pConn);
|
||||
static void mnodeProcessCreateVnodeRsp(SRpcMsg *rpcMsg);
|
||||
|
@ -53,17 +54,17 @@ static void mnodeProcessDropVnodeRsp(SRpcMsg *rpcMsg);
|
|||
static int32_t mnodeProcessVnodeCfgMsg(SMnodeMsg *pMsg) ;
|
||||
static void mnodeSendDropVgroupMsg(SVgObj *pVgroup, void *ahandle);
|
||||
|
||||
static int32_t mnodeVgroupActionDestroy(SSdbOper *pOper) {
|
||||
SVgObj *pVgroup = pOper->pObj;
|
||||
static void mnodeDestroyVgroup(SVgObj *pVgroup) {
|
||||
if (pVgroup->idPool) {
|
||||
taosIdPoolCleanUp(pVgroup->idPool);
|
||||
pVgroup->idPool = NULL;
|
||||
}
|
||||
if (pVgroup->tableList) {
|
||||
tfree(pVgroup->tableList);
|
||||
}
|
||||
|
||||
tfree(pOper->pObj);
|
||||
tfree(pVgroup);
|
||||
}
|
||||
|
||||
static int32_t mnodeVgroupActionDestroy(SSdbOper *pOper) {
|
||||
mnodeDestroyVgroup(pOper->pObj);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -75,23 +76,16 @@ static int32_t mnodeVgroupActionInsert(SSdbOper *pOper) {
|
|||
if (pDb == NULL) {
|
||||
return TSDB_CODE_MND_INVALID_DB;
|
||||
}
|
||||
|
||||
pVgroup->pDb = pDb;
|
||||
pVgroup->prev = NULL;
|
||||
pVgroup->next = NULL;
|
||||
pVgroup->accessState = TSDB_VN_ALL_ACCCESS;
|
||||
|
||||
int32_t size = sizeof(SChildTableObj *) * pDb->cfg.maxTables;
|
||||
pVgroup->tableList = calloc(pDb->cfg.maxTables, sizeof(SChildTableObj *));
|
||||
if (pVgroup->tableList == NULL) {
|
||||
mError("vgId:%d, failed to malloc(size:%d) for the tableList of vgroups", pVgroup->vgId, size);
|
||||
return -1;
|
||||
|
||||
if (pDb->status != TSDB_DB_STATUS_READY) {
|
||||
mError("db:%s, status:%d, in dropping", pDb->name, pDb->status);
|
||||
return TSDB_CODE_MND_DB_IN_DROPPING;
|
||||
}
|
||||
|
||||
pVgroup->idPool = taosInitIdPool(pDb->cfg.maxTables);
|
||||
if (pVgroup->idPool == NULL) {
|
||||
mError("vgId:%d, failed to taosInitIdPool for vgroups", pVgroup->vgId);
|
||||
tfree(pVgroup->tableList);
|
||||
pVgroup->pDb = pDb;
|
||||
pVgroup->accessState = TSDB_VN_ALL_ACCCESS;
|
||||
if (mnodeAllocVgroupIdPool(pVgroup) < 0) {
|
||||
mError("vgId:%d, failed to init idpool for vgroups", pVgroup->vgId);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -129,20 +123,6 @@ static int32_t mnodeVgroupActionDelete(SSdbOper *pOper) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static void mnodeVgroupUpdateIdPool(SVgObj *pVgroup) {
|
||||
int32_t oldTables = taosIdPoolMaxSize(pVgroup->idPool);
|
||||
SDbObj *pDb = pVgroup->pDb;
|
||||
if (pDb != NULL) {
|
||||
if (pDb->cfg.maxTables != oldTables) {
|
||||
mInfo("vgId:%d tables change from %d to %d", pVgroup->vgId, oldTables, pDb->cfg.maxTables);
|
||||
taosUpdateIdPool(pVgroup->idPool, pDb->cfg.maxTables);
|
||||
int32_t size = sizeof(SChildTableObj *) * pDb->cfg.maxTables;
|
||||
pVgroup->tableList = (SChildTableObj **)realloc(pVgroup->tableList, size);
|
||||
memset(pVgroup->tableList + oldTables, 0, (pDb->cfg.maxTables - oldTables) * sizeof(SChildTableObj *));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int32_t mnodeVgroupActionUpdate(SSdbOper *pOper) {
|
||||
SVgObj *pNew = pOper->pObj;
|
||||
SVgObj *pVgroup = mnodeGetVgroup(pNew->vgId);
|
||||
|
@ -169,7 +149,6 @@ static int32_t mnodeVgroupActionUpdate(SSdbOper *pOper) {
|
|||
free(pNew);
|
||||
}
|
||||
|
||||
mnodeVgroupUpdateIdPool(pVgroup);
|
||||
|
||||
// reset vgid status on vgroup changed
|
||||
mDebug("vgId:%d, reset sync status to unsynced", pVgroup->vgId);
|
||||
|
@ -339,8 +318,132 @@ void mnodeUpdateVgroupStatus(SVgObj *pVgroup, SDnodeObj *pDnode, SVnodeLoad *pVl
|
|||
}
|
||||
}
|
||||
|
||||
SVgObj *mnodeGetAvailableVgroup(SDbObj *pDb) {
|
||||
return pDb->pHead;
|
||||
static int32_t mnodeAllocVgroupIdPool(SVgObj *pInputVgroup) {
|
||||
SDbObj *pDb = pInputVgroup->pDb;
|
||||
if (pDb == NULL) return TSDB_CODE_MND_APP_ERROR;
|
||||
|
||||
int32_t minIdPoolSize = TSDB_MAX_TABLES;
|
||||
int32_t maxIdPoolSize = TSDB_MIN_TABLES;
|
||||
for (int32_t v = 0; v < pDb->numOfVgroups; ++v) {
|
||||
SVgObj *pVgroup = pDb->vgList[v];
|
||||
if (pVgroup == NULL) continue;
|
||||
|
||||
int32_t idPoolSize = taosIdPoolMaxSize(pVgroup->idPool);
|
||||
minIdPoolSize = MIN(minIdPoolSize, idPoolSize);
|
||||
maxIdPoolSize = MAX(maxIdPoolSize, idPoolSize);
|
||||
}
|
||||
|
||||
// new vgroup
|
||||
if (pInputVgroup->idPool == NULL) {
|
||||
pInputVgroup->idPool = taosInitIdPool(maxIdPoolSize);
|
||||
if (pInputVgroup->idPool == NULL) {
|
||||
mError("vgId:%d, failed to init idPool for vgroup, size:%d", pInputVgroup->vgId, maxIdPoolSize);
|
||||
return TSDB_CODE_MND_OUT_OF_MEMORY;
|
||||
} else {
|
||||
mDebug("vgId:%d, init idPool for vgroup, size:%d", pInputVgroup->vgId, maxIdPoolSize);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
}
|
||||
|
||||
// realloc all vgroups in db
|
||||
int32_t newIdPoolSize;
|
||||
if (minIdPoolSize * 4 < TSDB_TABLES_STEP) {
|
||||
newIdPoolSize = minIdPoolSize * 4;
|
||||
} else {
|
||||
newIdPoolSize = ((minIdPoolSize / TSDB_TABLES_STEP) + 1) * TSDB_TABLES_STEP;
|
||||
}
|
||||
|
||||
if (newIdPoolSize > tsMaxTablePerVnode) {
|
||||
if (minIdPoolSize >= tsMaxTablePerVnode) {
|
||||
mError("db:%s, minIdPoolSize:%d newIdPoolSize:%d larger than maxTablesPerVnode:%d", pDb->name, minIdPoolSize, newIdPoolSize,
|
||||
tsMaxTablePerVnode);
|
||||
return TSDB_CODE_MND_NO_ENOUGH_DNODES;
|
||||
} else {
|
||||
newIdPoolSize = tsMaxTablePerVnode;
|
||||
}
|
||||
}
|
||||
|
||||
for (int32_t v = 0; v < pDb->numOfVgroups; ++v) {
|
||||
SVgObj *pVgroup = pDb->vgList[v];
|
||||
if (pVgroup == NULL) continue;
|
||||
|
||||
int32_t oldIdPoolSize = taosIdPoolMaxSize(pVgroup->idPool);
|
||||
if (newIdPoolSize == oldIdPoolSize) continue;
|
||||
|
||||
if (taosUpdateIdPool(pVgroup->idPool, newIdPoolSize) < 0) {
|
||||
mError("vgId:%d, failed to update idPoolSize from %d to %d", pVgroup->vgId, oldIdPoolSize, newIdPoolSize);
|
||||
return TSDB_CODE_MND_NO_ENOUGH_DNODES;
|
||||
} else {
|
||||
mDebug("vgId:%d, idPoolSize update from %d to %d", pVgroup->vgId, oldIdPoolSize, newIdPoolSize);
|
||||
}
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t mnodeGetAvailableVgroup(SMnodeMsg *pMsg, SVgObj **ppVgroup, int32_t *pSid) {
|
||||
SDbObj *pDb = pMsg->pDb;
|
||||
pthread_mutex_lock(&pDb->mutex);
|
||||
|
||||
for (int32_t v = 0; v < pDb->numOfVgroups; ++v) {
|
||||
int vgIndex = (v + pDb->vgListIndex) % pDb->numOfVgroups;
|
||||
SVgObj *pVgroup = pDb->vgList[vgIndex];
|
||||
if (pVgroup == NULL) {
|
||||
mError("db:%s, index:%d vgroup is null", pDb->name, vgIndex);
|
||||
pthread_mutex_unlock(&pDb->mutex);
|
||||
return TSDB_CODE_MND_APP_ERROR;
|
||||
}
|
||||
|
||||
int32_t sid = taosAllocateId(pVgroup->idPool);
|
||||
if (sid <= 0) {
|
||||
mDebug("app:%p:%p, db:%s, no enough sid in vgId:%d", pMsg->rpcMsg.ahandle, pMsg, pDb->name, pVgroup->vgId);
|
||||
continue;
|
||||
}
|
||||
|
||||
*pSid = sid;
|
||||
*ppVgroup = pVgroup;
|
||||
pDb->vgListIndex = vgIndex;
|
||||
|
||||
pthread_mutex_unlock(&pDb->mutex);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int maxVgroupsPerDb = tsMaxVgroupsPerDb;
|
||||
if (maxVgroupsPerDb <= 0) {
|
||||
maxVgroupsPerDb = mnodeGetOnlinDnodesCpuCoreNum();
|
||||
maxVgroupsPerDb = MAX(maxVgroupsPerDb, 2);
|
||||
}
|
||||
|
||||
if (pDb->numOfVgroups < maxVgroupsPerDb) {
|
||||
mDebug("app:%p:%p, db:%s, try to create a new vgroup, numOfVgroups:%d maxVgroupsPerDb:%d", pMsg->rpcMsg.ahandle, pMsg,
|
||||
pDb->name, pDb->numOfVgroups, maxVgroupsPerDb);
|
||||
pthread_mutex_unlock(&pDb->mutex);
|
||||
int32_t code = mnodeCreateVgroup(pMsg);
|
||||
if (code == TSDB_CODE_MND_ACTION_IN_PROGRESS) return code;
|
||||
}
|
||||
|
||||
SVgObj *pVgroup = pDb->vgList[0];
|
||||
if (pVgroup == NULL) return TSDB_CODE_MND_NO_ENOUGH_DNODES;
|
||||
|
||||
int32_t code = mnodeAllocVgroupIdPool(pVgroup);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
pthread_mutex_unlock(&pDb->mutex);
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t sid = taosAllocateId(pVgroup->idPool);
|
||||
if (sid <= 0) {
|
||||
mError("app:%p:%p, db:%s, no enough sid in vgId:%d", pMsg->rpcMsg.ahandle, pMsg, pDb->name, pVgroup->vgId);
|
||||
pthread_mutex_unlock(&pDb->mutex);
|
||||
return TSDB_CODE_MND_NO_ENOUGH_DNODES;
|
||||
}
|
||||
|
||||
*pSid = sid;
|
||||
*ppVgroup = pVgroup;
|
||||
pDb->vgListIndex = 0;
|
||||
pthread_mutex_unlock(&pDb->mutex);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
void *mnodeGetNextVgroup(void *pIter, SVgObj **pVgroup) {
|
||||
|
@ -367,15 +470,15 @@ static int32_t mnodeCreateVgroupCb(SMnodeMsg *pMsg, int32_t code) {
|
|||
pVgroup->vnodeGid[i].dnodeId);
|
||||
}
|
||||
|
||||
mnodeIncVgroupRef(pVgroup);
|
||||
pMsg->expected = pVgroup->numOfVnodes;
|
||||
mnodeSendCreateVgroupMsg(pVgroup, pMsg);
|
||||
|
||||
return TSDB_CODE_MND_ACTION_IN_PROGRESS;
|
||||
}
|
||||
|
||||
int32_t mnodeCreateVgroup(SMnodeMsg *pMsg, SDbObj *pDb) {
|
||||
int32_t mnodeCreateVgroup(SMnodeMsg *pMsg) {
|
||||
if (pMsg == NULL) return TSDB_CODE_MND_APP_ERROR;
|
||||
SDbObj *pDb = pMsg->pDb;
|
||||
|
||||
SVgObj *pVgroup = (SVgObj *)calloc(1, sizeof(SVgObj));
|
||||
tstrncpy(pVgroup->dbName, pDb->name, TSDB_ACCT_LEN + TSDB_DB_NAME_LEN);
|
||||
|
@ -388,6 +491,9 @@ int32_t mnodeCreateVgroup(SMnodeMsg *pMsg, SDbObj *pDb) {
|
|||
return TSDB_CODE_MND_NO_ENOUGH_DNODES;
|
||||
}
|
||||
|
||||
pMsg->pVgroup = pVgroup;
|
||||
mnodeIncVgroupRef(pVgroup);
|
||||
|
||||
SSdbOper oper = {
|
||||
.type = SDB_OPER_GLOBAL,
|
||||
.table = tsVgroupSdb,
|
||||
|
@ -397,12 +503,10 @@ int32_t mnodeCreateVgroup(SMnodeMsg *pMsg, SDbObj *pDb) {
|
|||
.cb = mnodeCreateVgroupCb
|
||||
};
|
||||
|
||||
pMsg->pVgroup = pVgroup;
|
||||
|
||||
int32_t code = sdbInsertRow(&oper);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
pMsg->pVgroup = NULL;
|
||||
tfree(pVgroup);
|
||||
mnodeDestroyVgroup(pVgroup);
|
||||
} else {
|
||||
code = TSDB_CODE_MND_ACTION_IN_PROGRESS;
|
||||
}
|
||||
|
@ -435,6 +539,11 @@ int32_t mnodeGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) {
|
|||
if (pDb == NULL) {
|
||||
return TSDB_CODE_MND_DB_NOT_SELECTED;
|
||||
}
|
||||
|
||||
if (pDb->status != TSDB_DB_STATUS_READY) {
|
||||
mError("db:%s, status:%d, in dropping", pDb->name, pDb->status);
|
||||
return TSDB_CODE_MND_DB_IN_DROPPING;
|
||||
}
|
||||
|
||||
int32_t cols = 0;
|
||||
SSchema *pSchema = pMeta->schema;
|
||||
|
@ -451,29 +560,27 @@ int32_t mnodeGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) {
|
|||
pSchema[cols].bytes = htons(pShow->bytes[cols]);
|
||||
cols++;
|
||||
|
||||
int32_t maxReplica = 0;
|
||||
SVgObj *pVgroup = NULL;
|
||||
STableObj *pTable = NULL;
|
||||
if (pShow->payloadLen > 0 ) {
|
||||
pTable = mnodeGetTable(pShow->payload);
|
||||
if (NULL == pTable || pTable->type == TSDB_SUPER_TABLE) {
|
||||
mnodeDecTableRef(pTable);
|
||||
return TSDB_CODE_MND_INVALID_TABLE_NAME;
|
||||
}
|
||||
mnodeDecTableRef(pTable);
|
||||
pVgroup = mnodeGetVgroup(((SChildTableObj*)pTable)->vgId);
|
||||
if (NULL == pVgroup) return TSDB_CODE_MND_INVALID_TABLE_NAME;
|
||||
mnodeDecVgroupRef(pVgroup);
|
||||
maxReplica = pVgroup->numOfVnodes > maxReplica ? pVgroup->numOfVnodes : maxReplica;
|
||||
} else {
|
||||
SVgObj *pVgroup = pDb->pHead;
|
||||
while (pVgroup != NULL) {
|
||||
maxReplica = pVgroup->numOfVnodes > maxReplica ? pVgroup->numOfVnodes : maxReplica;
|
||||
pVgroup = pVgroup->next;
|
||||
pShow->bytes[cols] = 4;
|
||||
pSchema[cols].type = TSDB_DATA_TYPE_INT;
|
||||
strcpy(pSchema[cols].name, "poolSize");
|
||||
pSchema[cols].bytes = htons(pShow->bytes[cols]);
|
||||
cols++;
|
||||
|
||||
pShow->bytes[cols] = 4;
|
||||
pSchema[cols].type = TSDB_DATA_TYPE_INT;
|
||||
strcpy(pSchema[cols].name, "maxTables");
|
||||
pSchema[cols].bytes = htons(pShow->bytes[cols]);
|
||||
cols++;
|
||||
|
||||
pShow->maxReplica = 1;
|
||||
for (int32_t v = 0; v < pDb->numOfVgroups; ++v) {
|
||||
SVgObj *pVgroup = pDb->vgList[v];
|
||||
if (pVgroup != NULL) {
|
||||
pShow->maxReplica = pVgroup->numOfVnodes > pShow->maxReplica ? pVgroup->numOfVnodes : pShow->maxReplica;
|
||||
}
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < maxReplica; ++i) {
|
||||
for (int32_t i = 0; i < pShow->maxReplica; ++i) {
|
||||
pShow->bytes[cols] = 2;
|
||||
pSchema[cols].type = TSDB_DATA_TYPE_SMALLINT;
|
||||
strcpy(pSchema[cols].name, "dnode");
|
||||
|
@ -497,43 +604,54 @@ int32_t mnodeGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) {
|
|||
pShow->numOfColumns = cols;
|
||||
|
||||
pShow->offset[0] = 0;
|
||||
for (int32_t i = 1; i < cols; ++i) pShow->offset[i] = pShow->offset[i - 1] + pShow->bytes[i - 1];
|
||||
|
||||
pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1];
|
||||
|
||||
if (NULL == pTable) {
|
||||
pShow->numOfRows = pDb->numOfVgroups;
|
||||
pShow->pIter = pDb->pHead;
|
||||
} else {
|
||||
pShow->numOfRows = 1;
|
||||
pShow->pIter = pVgroup;
|
||||
for (int32_t i = 1; i < cols; ++i) {
|
||||
pShow->offset[i] = pShow->offset[i - 1] + pShow->bytes[i - 1];
|
||||
}
|
||||
|
||||
mnodeDecDbRef(pDb);
|
||||
pShow->numOfRows = pDb->numOfVgroups;
|
||||
pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1];
|
||||
|
||||
mnodeDecDbRef(pDb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool mnodeFilterVgroups(SVgObj *pVgroup, STableObj *pTable) {
|
||||
if (NULL == pTable || pTable->type == TSDB_SUPER_TABLE) {
|
||||
return true;
|
||||
}
|
||||
|
||||
SChildTableObj *pCTable = (SChildTableObj *)pTable;
|
||||
if (pVgroup->vgId == pCTable->vgId) {
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
int32_t mnodeRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, void *pConn) {
|
||||
int32_t numOfRows = 0;
|
||||
SVgObj *pVgroup = NULL;
|
||||
int32_t maxReplica = 0;
|
||||
int32_t cols = 0;
|
||||
char * pWrite;
|
||||
|
||||
SDbObj *pDb = mnodeGetDb(pShow->db);
|
||||
if (pDb == NULL) return 0;
|
||||
|
||||
pVgroup = pDb->pHead;
|
||||
while (pVgroup != NULL) {
|
||||
maxReplica = pVgroup->numOfVnodes > maxReplica ? pVgroup->numOfVnodes : maxReplica;
|
||||
pVgroup = pVgroup->next;
|
||||
if (pDb->status != TSDB_DB_STATUS_READY) {
|
||||
mError("db:%s, status:%d, in dropping", pDb->name, pDb->status);
|
||||
return 0;
|
||||
}
|
||||
|
||||
STableObj *pTable = NULL;
|
||||
if (pShow->payloadLen > 0 ) {
|
||||
pTable = mnodeGetTable(pShow->payload);
|
||||
}
|
||||
|
||||
while (numOfRows < rows) {
|
||||
pVgroup = (SVgObj *) pShow->pIter;
|
||||
pShow->pIter = mnodeGetNextVgroup(pShow->pIter, &pVgroup);
|
||||
if (pVgroup == NULL) break;
|
||||
pShow->pIter = (void *) pVgroup->next;
|
||||
if (pVgroup->pDb != pDb) continue;
|
||||
if (!mnodeFilterVgroups(pVgroup, pTable)) continue;
|
||||
|
||||
cols = 0;
|
||||
|
||||
|
@ -545,7 +663,15 @@ int32_t mnodeRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, void *pC
|
|||
*(int32_t *) pWrite = pVgroup->numOfTables;
|
||||
cols++;
|
||||
|
||||
for (int32_t i = 0; i < maxReplica; ++i) {
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
*(int32_t *)pWrite = taosIdPoolMaxSize(pVgroup->idPool);
|
||||
cols++;
|
||||
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
*(int32_t *)pWrite = tsMaxTablePerVnode;
|
||||
cols++;
|
||||
|
||||
for (int32_t i = 0; i < pShow->maxReplica; ++i) {
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
*(int16_t *) pWrite = pVgroup->vnodeGid[i].dnodeId;
|
||||
cols++;
|
||||
|
@ -573,38 +699,36 @@ int32_t mnodeRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, void *pC
|
|||
}
|
||||
}
|
||||
|
||||
mnodeDecVgroupRef(pVgroup);
|
||||
numOfRows++;
|
||||
}
|
||||
|
||||
pShow->numOfReads += numOfRows;
|
||||
mnodeDecTableRef(pTable);
|
||||
mnodeDecDbRef(pDb);
|
||||
|
||||
return numOfRows;
|
||||
}
|
||||
|
||||
void mnodeAddTableIntoVgroup(SVgObj *pVgroup, SChildTableObj *pTable) {
|
||||
if (pTable->sid >= 1 && pVgroup->tableList[pTable->sid - 1] == NULL) {
|
||||
pVgroup->tableList[pTable->sid - 1] = pTable;
|
||||
taosIdPoolMarkStatus(pVgroup->idPool, pTable->sid);
|
||||
pVgroup->numOfTables++;
|
||||
}
|
||||
|
||||
if (pVgroup->numOfTables >= pVgroup->pDb->cfg.maxTables) {
|
||||
mnodeMoveVgroupToTail(pVgroup);
|
||||
int32_t idPoolSize = taosIdPoolMaxSize(pVgroup->idPool);
|
||||
if (pTable->sid > idPoolSize) {
|
||||
mnodeAllocVgroupIdPool(pVgroup);
|
||||
}
|
||||
|
||||
mnodeIncVgroupRef(pVgroup);
|
||||
if (pTable->sid >= 1) {
|
||||
taosIdPoolMarkStatus(pVgroup->idPool, pTable->sid);
|
||||
pVgroup->numOfTables++;
|
||||
mnodeIncVgroupRef(pVgroup);
|
||||
}
|
||||
}
|
||||
|
||||
void mnodeRemoveTableFromVgroup(SVgObj *pVgroup, SChildTableObj *pTable) {
|
||||
if (pTable->sid >= 1 && pVgroup->tableList[pTable->sid - 1] != NULL) {
|
||||
pVgroup->tableList[pTable->sid - 1] = NULL;
|
||||
if (pTable->sid >= 1) {
|
||||
taosFreeId(pVgroup->idPool, pTable->sid);
|
||||
pVgroup->numOfTables--;
|
||||
mnodeDecVgroupRef(pVgroup);
|
||||
}
|
||||
|
||||
mnodeMoveVgroupToHead(pVgroup);
|
||||
mnodeDecVgroupRef(pVgroup);
|
||||
}
|
||||
|
||||
SMDCreateVnodeMsg *mnodeBuildCreateVnodeMsg(SVgObj *pVgroup) {
|
||||
|
@ -615,13 +739,16 @@ SMDCreateVnodeMsg *mnodeBuildCreateVnodeMsg(SVgObj *pVgroup) {
|
|||
if (pVnode == NULL) return NULL;
|
||||
|
||||
strcpy(pVnode->db, pVgroup->dbName);
|
||||
int32_t maxTables = taosIdPoolMaxSize(pVgroup->idPool);
|
||||
//TODO: dynamic alloc tables in tsdb
|
||||
maxTables = MAX(10000, tsMaxTablePerVnode);
|
||||
|
||||
SMDVnodeCfg *pCfg = &pVnode->cfg;
|
||||
pCfg->vgId = htonl(pVgroup->vgId);
|
||||
pCfg->cfgVersion = htonl(pDb->cfgVersion);
|
||||
pCfg->cacheBlockSize = htonl(pDb->cfg.cacheBlockSize);
|
||||
pCfg->totalBlocks = htonl(pDb->cfg.totalBlocks);
|
||||
pCfg->maxTables = htonl(pDb->cfg.maxTables + 1);
|
||||
pCfg->maxTables = htonl(maxTables + 1);
|
||||
pCfg->daysPerFile = htonl(pDb->cfg.daysPerFile);
|
||||
pCfg->daysToKeep = htonl(pDb->cfg.daysToKeep);
|
||||
pCfg->daysToKeep1 = htonl(pDb->cfg.daysToKeep1);
|
||||
|
@ -799,19 +926,20 @@ static int32_t mnodeProcessVnodeCfgMsg(SMnodeMsg *pMsg) {
|
|||
mDebug("dnode:%s, vgId:%d, invalid dnode", taosIpStr(pCfg->dnodeId), pCfg->vgId);
|
||||
return TSDB_CODE_MND_VGROUP_NOT_EXIST;
|
||||
}
|
||||
mnodeDecDnodeRef(pDnode);
|
||||
|
||||
SVgObj *pVgroup = mnodeGetVgroup(pCfg->vgId);
|
||||
if (pVgroup == NULL) {
|
||||
mDebug("dnode:%s, vgId:%d, no vgroup info", taosIpStr(pCfg->dnodeId), pCfg->vgId);
|
||||
mnodeDecDnodeRef(pDnode);
|
||||
return TSDB_CODE_MND_VGROUP_NOT_EXIST;
|
||||
}
|
||||
mnodeDecVgroupRef(pVgroup);
|
||||
|
||||
mDebug("vgId:%d, send create vnode msg to dnode %s for vnode cfg msg", pVgroup->vgId, pDnode->dnodeEp);
|
||||
SRpcIpSet ipSet = mnodeGetIpSetFromIp(pDnode->dnodeEp);
|
||||
mnodeSendCreateVnodeMsg(pVgroup, &ipSet, NULL);
|
||||
|
||||
mnodeDecDnodeRef(pDnode);
|
||||
mnodeDecVgroupRef(pVgroup);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -841,9 +969,10 @@ void mnodeDropAllDnodeVgroups(SDnodeObj *pDropDnode) {
|
|||
|
||||
sdbFreeIter(pIter);
|
||||
|
||||
mInfo("dnode:%d, all vgroups is dropped from sdb", pDropDnode->dnodeId);
|
||||
mInfo("dnode:%d, all vgroups:%d is dropped from sdb", pDropDnode->dnodeId, numOfVgroups);
|
||||
}
|
||||
|
||||
#if 0
|
||||
void mnodeUpdateAllDbVgroups(SDbObj *pAlterDb) {
|
||||
void * pIter = NULL;
|
||||
SVgObj *pVgroup = NULL;
|
||||
|
@ -865,6 +994,7 @@ void mnodeUpdateAllDbVgroups(SDbObj *pAlterDb) {
|
|||
|
||||
mInfo("db:%s, all vgroups is updated in sdb", pAlterDb->name);
|
||||
}
|
||||
#endif
|
||||
|
||||
void mnodeDropAllDbVgroups(SDbObj *pDropDb) {
|
||||
void * pIter = NULL;
|
||||
|
|
|
@ -206,7 +206,7 @@ typedef struct HttpThread {
|
|||
pthread_mutex_t threadMutex;
|
||||
bool stop;
|
||||
int pollFd;
|
||||
int numOfFds;
|
||||
int numOfContexts;
|
||||
int threadId;
|
||||
char label[HTTP_LABEL_SIZE];
|
||||
bool (*processData)(HttpContext *pContext);
|
||||
|
|
|
@ -44,7 +44,7 @@ static void httpDestroyContext(void *data) {
|
|||
HttpThread *pThread = pContext->pThread;
|
||||
httpRemoveContextFromEpoll(pContext);
|
||||
httpReleaseSession(pContext);
|
||||
atomic_sub_fetch_32(&pThread->numOfFds, 1);
|
||||
atomic_sub_fetch_32(&pThread->numOfContexts, 1);
|
||||
|
||||
pContext->pThread = 0;
|
||||
pContext->state = HTTP_CONTEXT_STATE_CLOSED;
|
||||
|
@ -171,38 +171,39 @@ bool httpInitContext(HttpContext *pContext) {
|
|||
|
||||
void httpCloseContextByApp(HttpContext *pContext) {
|
||||
pContext->parsed = false;
|
||||
|
||||
bool keepAlive = true;
|
||||
|
||||
if (pContext->httpVersion == HTTP_VERSION_10 && pContext->httpKeepAlive != HTTP_KEEPALIVE_ENABLE) {
|
||||
keepAlive = false;
|
||||
} else if (pContext->httpVersion != HTTP_VERSION_10 && pContext->httpKeepAlive == HTTP_KEEPALIVE_DISABLE) {
|
||||
keepAlive = false;
|
||||
} else {}
|
||||
} else {
|
||||
}
|
||||
|
||||
if (keepAlive) {
|
||||
if (httpAlterContextState(pContext, HTTP_CONTEXT_STATE_HANDLING, HTTP_CONTEXT_STATE_READY)) {
|
||||
httpDebug("context:%p, fd:%d, ip:%s, last state:handling, keepAlive:true, reuse connect",
|
||||
pContext, pContext->fd, pContext->ipstr);
|
||||
httpDebug("context:%p, fd:%d, ip:%s, last state:handling, keepAlive:true, reuse context", pContext, pContext->fd,
|
||||
pContext->ipstr);
|
||||
} else if (httpAlterContextState(pContext, HTTP_CONTEXT_STATE_DROPPING, HTTP_CONTEXT_STATE_CLOSED)) {
|
||||
httpRemoveContextFromEpoll(pContext);
|
||||
httpDebug("context:%p, fd:%d, ip:%s, last state:dropping, keepAlive:true, close connect",
|
||||
pContext, pContext->fd, pContext->ipstr);
|
||||
httpDebug("context:%p, fd:%d, ip:%s, last state:dropping, keepAlive:true, close connect", pContext, pContext->fd,
|
||||
pContext->ipstr);
|
||||
} else if (httpAlterContextState(pContext, HTTP_CONTEXT_STATE_READY, HTTP_CONTEXT_STATE_READY)) {
|
||||
httpDebug("context:%p, fd:%d, ip:%s, last state:ready, keepAlive:true, reuse connect",
|
||||
pContext, pContext->fd, pContext->ipstr);
|
||||
httpDebug("context:%p, fd:%d, ip:%s, last state:ready, keepAlive:true, reuse context", pContext, pContext->fd,
|
||||
pContext->ipstr);
|
||||
} else if (httpAlterContextState(pContext, HTTP_CONTEXT_STATE_CLOSED, HTTP_CONTEXT_STATE_CLOSED)) {
|
||||
httpRemoveContextFromEpoll(pContext);
|
||||
httpDebug("context:%p, fd:%d, ip:%s, last state:ready, keepAlive:true, close connect",
|
||||
pContext, pContext->fd, pContext->ipstr);
|
||||
httpDebug("context:%p, fd:%d, ip:%s, last state:ready, keepAlive:true, close connect", pContext, pContext->fd,
|
||||
pContext->ipstr);
|
||||
} else {
|
||||
httpRemoveContextFromEpoll(pContext);
|
||||
httpError("context:%p, fd:%d, ip:%s, last state:%s:%d, keepAlive:true, close connect",
|
||||
pContext, pContext->fd, pContext->ipstr, httpContextStateStr(pContext->state), pContext->state);
|
||||
httpError("context:%p, fd:%d, ip:%s, last state:%s:%d, keepAlive:true, close connect", pContext, pContext->fd,
|
||||
pContext->ipstr, httpContextStateStr(pContext->state), pContext->state);
|
||||
}
|
||||
} else {
|
||||
httpRemoveContextFromEpoll(pContext);
|
||||
httpDebug("context:%p, fd:%d, ip:%s, last state:%s:%d, keepAlive:false, close connect",
|
||||
pContext, pContext->fd, pContext->ipstr, httpContextStateStr(pContext->state), pContext->state);
|
||||
httpDebug("context:%p, fd:%d, ip:%s, last state:%s:%d, keepAlive:false, close context", pContext, pContext->fd,
|
||||
pContext->ipstr, httpContextStateStr(pContext->state), pContext->state);
|
||||
}
|
||||
|
||||
httpReleaseContext(pContext);
|
||||
|
@ -214,7 +215,7 @@ void httpCloseContextByServer(HttpContext *pContext) {
|
|||
} else if (httpAlterContextState(pContext, HTTP_CONTEXT_STATE_DROPPING, HTTP_CONTEXT_STATE_DROPPING)) {
|
||||
httpDebug("context:%p, fd:%d, ip:%s, epoll already finished, wait app finished", pContext, pContext->fd, pContext->ipstr);
|
||||
} else if (httpAlterContextState(pContext, HTTP_CONTEXT_STATE_READY, HTTP_CONTEXT_STATE_CLOSED)) {
|
||||
httpDebug("context:%p, fd:%d, ip:%s, epoll finished, close context", pContext, pContext->fd, pContext->ipstr);
|
||||
httpDebug("context:%p, fd:%d, ip:%s, epoll finished, close connect", pContext, pContext->fd, pContext->ipstr);
|
||||
} else if (httpAlterContextState(pContext, HTTP_CONTEXT_STATE_CLOSED, HTTP_CONTEXT_STATE_CLOSED)) {
|
||||
httpDebug("context:%p, fd:%d, ip:%s, epoll finished, will be closed soon", pContext, pContext->fd, pContext->ipstr);
|
||||
} else {
|
||||
|
|
|
@ -313,9 +313,9 @@ bool httpParseRequest(HttpContext* pContext) {
|
|||
return true;
|
||||
}
|
||||
|
||||
httpTraceL("context:%p, fd:%d, ip:%s, thread:%s, numOfFds:%d, read size:%d, raw data:\n%s", pContext, pContext->fd,
|
||||
pContext->ipstr, pContext->pThread->label, pContext->pThread->numOfFds, pContext->parser.bufsize,
|
||||
pContext->parser.buffer);
|
||||
httpTraceL("context:%p, fd:%d, ip:%s, thread:%s, numOfContexts:%d, read size:%d, raw data:\n%s", pContext,
|
||||
pContext->fd, pContext->ipstr, pContext->pThread->label, pContext->pThread->numOfContexts,
|
||||
pContext->parser.bufsize, pContext->parser.buffer);
|
||||
|
||||
if (!httpGetHttpMethod(pContext)) {
|
||||
return false;
|
||||
|
|
|
@ -293,7 +293,7 @@ static void *httpAcceptHttpConnection(void *arg) {
|
|||
|
||||
totalFds = 1;
|
||||
for (int i = 0; i < pServer->numOfThreads; ++i) {
|
||||
totalFds += pServer->pThreads[i].numOfFds;
|
||||
totalFds += pServer->pThreads[i].numOfContexts;
|
||||
}
|
||||
|
||||
if (totalFds > tsHttpCacheSessions * 100) {
|
||||
|
@ -332,9 +332,9 @@ static void *httpAcceptHttpConnection(void *arg) {
|
|||
}
|
||||
|
||||
// notify the data process, add into the FdObj list
|
||||
atomic_add_fetch_32(&pThread->numOfFds, 1);
|
||||
httpDebug("context:%p, fd:%d, ip:%s, thread:%s numOfFds:%d totalFds:%d, accept a new connection", pContext, connFd,
|
||||
pContext->ipstr, pThread->label, pThread->numOfFds, totalFds);
|
||||
atomic_add_fetch_32(&pThread->numOfContexts, 1);
|
||||
httpDebug("context:%p, fd:%d, ip:%s, thread:%s numOfContexts:%d totalFds:%d, accept a new connection", pContext,
|
||||
connFd, pContext->ipstr, pThread->label, pThread->numOfContexts, totalFds);
|
||||
|
||||
// pick up next thread for next connection
|
||||
threadId++;
|
||||
|
|
|
@ -233,10 +233,11 @@ void httpProcessSingleSqlRetrieveCallBack(void *param, TAOS_RES *result, int num
|
|||
}
|
||||
}
|
||||
|
||||
void httpProcessSingleSqlCallBack(void *param, TAOS_RES *result, int code) {
|
||||
void httpProcessSingleSqlCallBack(void *param, TAOS_RES *result, int unUsedCode) {
|
||||
HttpContext *pContext = (HttpContext *)param;
|
||||
if (pContext == NULL) return;
|
||||
|
||||
int32_t code = taos_errno(result);
|
||||
HttpEncodeMethod *encode = pContext->encodeMethod;
|
||||
|
||||
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
|
||||
|
@ -260,8 +261,8 @@ void httpProcessSingleSqlCallBack(void *param, TAOS_RES *result, int code) {
|
|||
return;
|
||||
}
|
||||
|
||||
int num_fields = taos_field_count(result);
|
||||
if (num_fields == 0) {
|
||||
bool isUpdate = tscIsUpdateQuery(result);
|
||||
if (isUpdate) {
|
||||
// not select or show commands
|
||||
int affectRows = taos_affected_rows(result);
|
||||
|
||||
|
|
|
@ -4462,6 +4462,8 @@ static void sequentialTableProcess(SQInfo *pQInfo) {
|
|||
}
|
||||
|
||||
pRuntimeEnv->pQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &gp, pQInfo);
|
||||
taosArrayDestroy(g1);
|
||||
taosArrayDestroy(tx);
|
||||
|
||||
SArray* s = tsdbGetQueriedTableList(pRuntimeEnv->pQueryHandle);
|
||||
assert(taosArrayGetSize(s) >= 1);
|
||||
|
@ -5811,7 +5813,7 @@ static int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQ
|
|||
qDebug("QInfo:%p no result in time range %" PRId64 "-%" PRId64 ", order %d", pQInfo, pQuery->window.skey,
|
||||
pQuery->window.ekey, pQuery->order.order);
|
||||
setQueryStatus(pQuery, QUERY_COMPLETED);
|
||||
|
||||
pQInfo->tableqinfoGroupInfo.numOfTables = 0;
|
||||
sem_post(&pQInfo->dataReady);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -5839,6 +5841,18 @@ _error:
|
|||
return code;
|
||||
}
|
||||
|
||||
static void freeColumnFilterInfo(SColumnFilterInfo* pFilter, int32_t numOfFilters) {
|
||||
if (pFilter == NULL) {
|
||||
return;
|
||||
}
|
||||
for (int32_t i = 0; i < numOfFilters; i++) {
|
||||
if (pFilter[i].filterstr) {
|
||||
free((void*)(pFilter[i].pz));
|
||||
}
|
||||
}
|
||||
free(pFilter);
|
||||
}
|
||||
|
||||
static void freeQInfo(SQInfo *pQInfo) {
|
||||
if (!isValidQInfo(pQInfo)) {
|
||||
return;
|
||||
|
@ -5907,7 +5921,15 @@ static void freeQInfo(SQInfo *pQInfo) {
|
|||
|
||||
tfree(pQuery->tagColList);
|
||||
tfree(pQuery->pFilterInfo);
|
||||
tfree(pQuery->colList);
|
||||
|
||||
if (pQuery->colList != NULL) {
|
||||
for (int32_t i = 0; i < pQuery->numOfCols; i++) {
|
||||
SColumnInfo* column = pQuery->colList + i;
|
||||
freeColumnFilterInfo(column->filters, column->numOfFilters);
|
||||
}
|
||||
tfree(pQuery->colList);
|
||||
}
|
||||
|
||||
tfree(pQuery->sdata);
|
||||
|
||||
tfree(pQuery);
|
||||
|
@ -6103,6 +6125,11 @@ _over:
|
|||
free(pExprMsg);
|
||||
taosArrayDestroy(pTableIdList);
|
||||
|
||||
for (int32_t i = 0; i < pQueryMsg->numOfCols; i++) {
|
||||
SColumnInfo* column = pQueryMsg->colList + i;
|
||||
freeColumnFilterInfo(column->filters, column->numOfFilters);
|
||||
}
|
||||
|
||||
//pQInfo already freed in initQInfo, but *pQInfo may not pointer to null;
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
*pQInfo = NULL;
|
||||
|
|
|
@ -1366,6 +1366,7 @@ static int32_t rpcCompressRpcMsg(char* pCont, int32_t contLen) {
|
|||
}
|
||||
|
||||
int32_t compLen = LZ4_compress_default(pCont, buf, contLen, contLen + overhead);
|
||||
tDebug("compress rpc msg, before:%d, after:%d, overhead:%d", contLen, compLen, overhead);
|
||||
|
||||
/*
|
||||
* only the compressed size is less than the value of contLen - overhead, the compression is applied
|
||||
|
@ -1378,7 +1379,7 @@ static int32_t rpcCompressRpcMsg(char* pCont, int32_t contLen) {
|
|||
memcpy(pCont + overhead, buf, compLen);
|
||||
|
||||
pHead->comp = 1;
|
||||
//tDebug("compress rpc msg, before:%d, after:%d", contLen, compLen);
|
||||
tDebug("compress rpc msg, before:%d, after:%d", contLen, compLen);
|
||||
finalLen = compLen + overhead;
|
||||
} else {
|
||||
finalLen = contLen;
|
||||
|
|
|
@ -119,7 +119,8 @@ int tsdbInsertRowToMem(STsdbRepo *pRepo, SDataRow row, STable *pTable) {
|
|||
|
||||
int tsdbRefMemTable(STsdbRepo *pRepo, SMemTable *pMemTable) {
|
||||
if (pMemTable == NULL) return 0;
|
||||
T_REF_INC(pMemTable);
|
||||
int ref = T_REF_INC(pMemTable);
|
||||
tsdbDebug("vgId:%d ref memtable %p ref %d", REPO_ID(pRepo), pMemTable, ref);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -127,7 +128,9 @@ int tsdbRefMemTable(STsdbRepo *pRepo, SMemTable *pMemTable) {
|
|||
int tsdbUnRefMemTable(STsdbRepo *pRepo, SMemTable *pMemTable) {
|
||||
if (pMemTable == NULL) return 0;
|
||||
|
||||
if (T_REF_DEC(pMemTable) == 0) {
|
||||
int ref = T_REF_DEC(pMemTable);
|
||||
tsdbDebug("vgId:%d unref memtable %p ref %d", REPO_ID(pRepo), pMemTable, ref);
|
||||
if (ref == 0) {
|
||||
STsdbCfg * pCfg = &pRepo->config;
|
||||
STsdbBufPool *pBufPool = pRepo->pPool;
|
||||
|
||||
|
@ -167,6 +170,7 @@ int tsdbTakeMemSnapshot(STsdbRepo *pRepo, SMemTable **pMem, SMemTable **pIMem) {
|
|||
tsdbRefMemTable(pRepo, *pIMem);
|
||||
|
||||
if (tsdbUnlockRepo(pRepo) < 0) return -1;
|
||||
tsdbDebug("vgId:%d take memory snapshot, pMem %p pIMem %p", REPO_ID(pRepo), *pMem, *pIMem);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -739,7 +739,7 @@ static int tsdbWriteBlockToFile(SRWHelper *pHelper, SFile *pFile, SDataCols *pDa
|
|||
pCompBlock->keyFirst = dataColsKeyFirst(pDataCols);
|
||||
pCompBlock->keyLast = dataColsKeyAt(pDataCols, rowsToWrite - 1);
|
||||
|
||||
tsdbDebug("vgId:%d tid:%d a block of data is written to file %s, offset %" PRId64
|
||||
tsdbTrace("vgId:%d tid:%d a block of data is written to file %s, offset %" PRId64
|
||||
" numOfRows %d len %d numOfCols %" PRId16 " keyFirst %" PRId64 " keyLast %" PRId64,
|
||||
REPO_ID(helperRepo(pHelper)), pHelper->tableInfo.tid, pFile->fname, (int64_t)(pCompBlock->offset),
|
||||
(int)(pCompBlock->numOfRows), pCompBlock->len, pCompBlock->numOfCols, pCompBlock->keyFirst,
|
||||
|
@ -940,7 +940,7 @@ static int tsdbInsertSuperBlock(SRWHelper *pHelper, SCompBlock *pCompBlock, int
|
|||
ASSERT(pHelper->pCompInfo->blocks[0].keyLast < pHelper->pCompInfo->blocks[1].keyFirst);
|
||||
}
|
||||
|
||||
tsdbDebug("vgId:%d tid:%d a super block is inserted at index %d", REPO_ID(pHelper->pRepo), pHelper->tableInfo.tid,
|
||||
tsdbTrace("vgId:%d tid:%d a super block is inserted at index %d", REPO_ID(pHelper->pRepo), pHelper->tableInfo.tid,
|
||||
blkIdx);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
|
||||
#ifdef TAOS_RANDOM_FILE_FAIL
|
||||
|
||||
void taosSetRandomFileFailFactor(int factor);
|
||||
ssize_t taos_tread(int fd, void *buf, size_t count);
|
||||
ssize_t taos_twrite(int fd, void *buf, size_t count);
|
||||
off_t taos_lseek(int fd, off_t offset, int whence);
|
||||
|
|
|
@ -26,40 +26,51 @@
|
|||
|
||||
#include "os.h"
|
||||
|
||||
#define RANDOM_FILE_FAIL_FACTOR 5
|
||||
#ifdef TAOS_RANDOM_FILE_FAIL
|
||||
|
||||
static int random_file_fail_factor = 20;
|
||||
|
||||
void taosSetRandomFileFailFactor(int factor)
|
||||
{
|
||||
random_file_fail_factor = factor;
|
||||
}
|
||||
#endif
|
||||
|
||||
ssize_t taos_tread(int fd, void *buf, size_t count)
|
||||
{
|
||||
#ifdef TAOS_RANDOM_FILE_FAIL
|
||||
if (rand() % RANDOM_FILE_FAIL_FACTOR == 0) {
|
||||
errno = EIO;
|
||||
return -1;
|
||||
if (random_file_fail_factor > 0) {
|
||||
if (rand() % random_file_fail_factor == 0) {
|
||||
errno = EIO;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return tread(fd, buf, count);
|
||||
}
|
||||
|
||||
ssize_t taos_twrite(int fd, void *buf, size_t count)
|
||||
{
|
||||
#ifdef TAOS_RANDOM_FILE_FAIL
|
||||
if (rand() % RANDOM_FILE_FAIL_FACTOR == 0) {
|
||||
errno = EIO;
|
||||
return -1;
|
||||
if (random_file_fail_factor > 0) {
|
||||
if (rand() % random_file_fail_factor == 0) {
|
||||
errno = EIO;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return twrite(fd, buf, count);
|
||||
}
|
||||
|
||||
off_t taos_lseek(int fd, off_t offset, int whence)
|
||||
{
|
||||
#ifdef TAOS_RANDOM_FILE_FAIL
|
||||
if (rand() % RANDOM_FILE_FAIL_FACTOR == 0) {
|
||||
errno = EIO;
|
||||
return -1;
|
||||
if (random_file_fail_factor > 0) {
|
||||
if (rand() % random_file_fail_factor == 0) {
|
||||
errno = EIO;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return lseek(fd, offset, whence);
|
||||
}
|
||||
|
|
|
@ -123,7 +123,7 @@ void taosIdPoolMarkStatus(void *handle, int id) {
|
|||
int taosUpdateIdPool(id_pool_t *handle, int maxId) {
|
||||
id_pool_t *pIdPool = (id_pool_t*)handle;
|
||||
if (maxId <= pIdPool->maxId) {
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool *idList = calloc(maxId, sizeof(bool));
|
||||
|
|
|
@ -260,7 +260,7 @@ int tdUpdateKVStoreRecord(SKVStore *pStore, uint64_t uid, void *cont, int contLe
|
|||
}
|
||||
|
||||
taosHashPut(pStore->map, (void *)(&uid), sizeof(uid), (void *)(&rInfo), sizeof(rInfo));
|
||||
uDebug("put uid %" PRIu64 " into kvStore %s", uid, pStore->fname);
|
||||
uTrace("put uid %" PRIu64 " into kvStore %s", uid, pStore->fname);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include "taoserror.h"
|
||||
#include "twal.h"
|
||||
#include "tqueue.h"
|
||||
#include "tfile.h"
|
||||
|
||||
#define walPrefix "wal"
|
||||
|
||||
|
@ -180,7 +181,7 @@ int walWrite(void *handle, SWalHead *pHead) {
|
|||
taosCalcChecksumAppend(0, (uint8_t *)pHead, sizeof(SWalHead));
|
||||
int contLen = pHead->len + sizeof(SWalHead);
|
||||
|
||||
if(write(pWal->fd, pHead, contLen) != contLen) {
|
||||
if(twrite(pWal->fd, pHead, contLen) != contLen) {
|
||||
wError("wal:%s, failed to write(%s)", pWal->name, strerror(errno));
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
} else {
|
||||
|
@ -325,7 +326,7 @@ static int walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp) {
|
|||
wDebug("wal:%s, start to restore", name);
|
||||
|
||||
while (1) {
|
||||
int ret = read(fd, pHead, sizeof(SWalHead));
|
||||
int ret = tread(fd, pHead, sizeof(SWalHead));
|
||||
if ( ret == 0) break;
|
||||
|
||||
if (ret != sizeof(SWalHead)) {
|
||||
|
@ -340,7 +341,7 @@ static int walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp) {
|
|||
break;
|
||||
}
|
||||
|
||||
ret = read(fd, pHead->cont, pHead->len);
|
||||
ret = tread(fd, pHead->cont, pHead->len);
|
||||
if ( ret != pHead->len) {
|
||||
wWarn("wal:%s, failed to read body, skip, len:%d ret:%d", name, pHead->len, ret);
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -31,11 +31,22 @@ then
|
|||
exit -1
|
||||
fi
|
||||
|
||||
CURR_DIR=`pwd`
|
||||
IN_TDINTERNAL="community"
|
||||
if [[ "$CURR_DIR" == *"$IN_TDINTERNAL"* ]]; then
|
||||
TAOS_DIR=$CURR_DIR/../../..
|
||||
else
|
||||
TAOS_DIR=$CURR_DIR/../..
|
||||
fi
|
||||
TAOSD_DIR=`find $TAOS_DIR -name "taosd"|grep bin|head -n1`
|
||||
|
||||
LIB_DIR=`echo $TAOSD_DIR|rev|cut -d '/' -f 3,4,5,6|rev`/lib
|
||||
|
||||
# First we need to set up a path for Python to find our own TAOS modules, so that "import" can work.
|
||||
export PYTHONPATH=$(pwd)/../../src/connector/python/linux/python3
|
||||
|
||||
# Then let us set up the library path so that our compiled SO file can be loaded by Python
|
||||
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$(pwd)/../../build/build/lib
|
||||
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$LIB_DIR
|
||||
|
||||
# Now we are all let, and let's see if we can find a crash. Note we pass all params
|
||||
python3 ./crash_gen.py $@
|
||||
|
|
|
@ -121,7 +121,7 @@ python3 ./test.py -f import_merge/importTORestart.py
|
|||
python3 ./test.py -f import_merge/importTPORestart.py
|
||||
python3 ./test.py -f import_merge/importTRestart.py
|
||||
python3 ./test.py -f import_merge/importInsertThenImport.py
|
||||
|
||||
python3 ./test.py -f import_merge/importCSV.py
|
||||
# user
|
||||
python3 ./test.py -f user/user_create.py
|
||||
python3 ./test.py -f user/pass_len.py
|
||||
|
@ -145,11 +145,18 @@ python3 ./test.py -f query/queryJoin.py
|
|||
python3 ./test.py -f query/select_last_crash.py
|
||||
|
||||
#stream
|
||||
python3 ./test.py -f stream/metric_1.py
|
||||
python3 ./test.py -f stream/new.py
|
||||
python3 ./test.py -f stream/stream1.py
|
||||
python3 ./test.py -f stream/stream2.py
|
||||
python3 ./test.py -f stream/parser.py
|
||||
|
||||
#alter table
|
||||
python3 ./test.py -f alter/alter_table_crash.py
|
||||
|
||||
# client
|
||||
python3 ./test.py -f client/client.py
|
||||
|
||||
# Misc
|
||||
python3 testCompress.py
|
||||
python3 testNoCompress.py
|
||||
|
|
|
@ -0,0 +1,94 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import os
|
||||
import csv
|
||||
import random
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
from util.dnodes import *
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
self.csvfile = "/tmp/file.csv"
|
||||
self.rows = 10000
|
||||
self.ntables = 1
|
||||
self.startTime = 1520000010000
|
||||
def genRandomStr(self, maxLen):
|
||||
H = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
|
||||
salt = ''
|
||||
if maxLen <= 1:
|
||||
maxLen = 2
|
||||
l = random.randint(1,maxLen)
|
||||
for i in range(l):
|
||||
salt += random.choice(H)
|
||||
return salt
|
||||
def createCSVFile(self):
|
||||
f = open(self.csvfile,'w',encoding='utf-8')
|
||||
csv_writer = csv.writer(f, quoting=csv.QUOTE_NONNUMERIC)
|
||||
for i in range(self.rows):
|
||||
csv_writer.writerow([self.startTime + i,
|
||||
self.genRandomStr(5),
|
||||
self.genRandomStr(6),
|
||||
self.genRandomStr(7),
|
||||
self.genRandomStr(8),
|
||||
self.genRandomStr(9),
|
||||
self.genRandomStr(10),
|
||||
self.genRandomStr(11),
|
||||
self.genRandomStr(12),
|
||||
self.genRandomStr(13),
|
||||
self.genRandomStr(14)])
|
||||
f.close()
|
||||
def destroyCSVFile(self):
|
||||
os.remove(self.csvfile)
|
||||
def run(self):
|
||||
self.createCSVFile()
|
||||
|
||||
tdDnodes.stop(1)
|
||||
tdDnodes.deploy(1)
|
||||
tdDnodes.start(1)
|
||||
|
||||
tdSql.execute('reset query cache')
|
||||
tdSql.execute('drop database if exists db')
|
||||
tdSql.execute('create database db')
|
||||
tdSql.execute('use db')
|
||||
tdSql.execute('''create table tbx (ts TIMESTAMP,
|
||||
collect_area NCHAR(5),
|
||||
device_id BINARY(6),
|
||||
imsi BINARY(7),
|
||||
imei BINARY(8),
|
||||
mdn BINARY(9),
|
||||
net_type BINARY(10),
|
||||
mno NCHAR(11),
|
||||
province NCHAR(12),
|
||||
city NCHAR(13),
|
||||
alarm BINARY(14))''')
|
||||
|
||||
tdSql.execute("import into tbx file \'%s\'"%(self.csvfile))
|
||||
tdSql.query('select * from tbx')
|
||||
tdSql.checkRows(self.rows)
|
||||
|
||||
def stop(self):
|
||||
self.destroyCSVFile()
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
tdDnodes.stop(1)
|
||||
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -121,7 +121,7 @@ python3 ./test.py -f import_merge/importTORestart.py
|
|||
python3 ./test.py -f import_merge/importTPORestart.py
|
||||
python3 ./test.py -f import_merge/importTRestart.py
|
||||
python3 ./test.py -f import_merge/importInsertThenImport.py
|
||||
|
||||
python3 ./test.py -f import_merge/importCSV.py
|
||||
# user
|
||||
python3 ./test.py -f user/user_create.py
|
||||
python3 ./test.py -f user/pass_len.py
|
||||
|
@ -150,3 +150,7 @@ python3 ./test.py -f alter/alter_table_crash.py
|
|||
|
||||
# client
|
||||
python3 ./test.py -f client/client.py
|
||||
|
||||
# Misc
|
||||
python3 testCompress.py
|
||||
python3 testNoCompress.py
|
||||
|
|
|
@ -0,0 +1,104 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import time
|
||||
import taos
|
||||
from util.log import tdLog
|
||||
from util.cases import tdCases
|
||||
from util.sql import tdSql
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
def createFuncStream(self, expr, suffix, value):
|
||||
tbname = "strm_" + suffix
|
||||
tdLog.info("create stream table %s" % tbname)
|
||||
tdSql.query("select %s from stb interval(1d)" % expr)
|
||||
tdSql.checkData(0, 1, value)
|
||||
tdSql.execute("create table %s as select %s from stb interval(1d)" % (tbname, expr))
|
||||
|
||||
def checkStreamData(self, suffix, value):
|
||||
sql = "select * from strm_" + suffix
|
||||
tdSql.waitedQuery(sql, 1, 120)
|
||||
tdSql.checkData(0, 1, value)
|
||||
|
||||
def run(self):
|
||||
tbNum = 10
|
||||
rowNum = 20
|
||||
|
||||
tdSql.prepare()
|
||||
|
||||
tdLog.info("===== preparing data =====")
|
||||
tdSql.execute(
|
||||
"create table stb(ts timestamp, tbcol int, tbcol2 float) tags(tgcol int)")
|
||||
for i in range(tbNum):
|
||||
tdSql.execute("create table tb%d using stb tags(%d)" % (i, i))
|
||||
for j in range(rowNum):
|
||||
tdSql.execute(
|
||||
"insert into tb%d values (now - %dm, %d, %d)" %
|
||||
(i, 1440 - j, j, j))
|
||||
time.sleep(0.1)
|
||||
|
||||
self.createFuncStream("count(*)", "c1", 200)
|
||||
self.createFuncStream("count(tbcol)", "c2", 200)
|
||||
self.createFuncStream("count(tbcol2)", "c3", 200)
|
||||
self.createFuncStream("avg(tbcol)", "av", 9.5)
|
||||
self.createFuncStream("sum(tbcol)", "su", 1900)
|
||||
self.createFuncStream("min(tbcol)", "mi", 0)
|
||||
self.createFuncStream("max(tbcol)", "ma", 19)
|
||||
self.createFuncStream("first(tbcol)", "fi", 0)
|
||||
self.createFuncStream("last(tbcol)", "la", 19)
|
||||
#tdSql.query("select stddev(tbcol) from stb interval(1d)")
|
||||
#tdSql.query("select leastsquares(tbcol, 1, 1) from stb interval(1d)")
|
||||
tdSql.query("select top(tbcol, 1) from stb interval(1d)")
|
||||
tdSql.query("select bottom(tbcol, 1) from stb interval(1d)")
|
||||
#tdSql.query("select percentile(tbcol, 1) from stb interval(1d)")
|
||||
#tdSql.query("select diff(tbcol) from stb interval(1d)")
|
||||
|
||||
tdSql.query("select count(tbcol) from stb where ts < now + 4m interval(1d)")
|
||||
tdSql.checkData(0, 1, 200)
|
||||
#tdSql.execute("create table strm_wh as select count(tbcol) from stb where ts < now + 4m interval(1d)")
|
||||
|
||||
self.createFuncStream("count(tbcol)", "as", 200)
|
||||
|
||||
tdSql.query("select count(tbcol) from stb interval(1d) group by tgcol")
|
||||
tdSql.checkData(0, 1, 20)
|
||||
|
||||
tdSql.query("select count(tbcol) from stb where ts < now + 4m interval(1d) group by tgcol")
|
||||
tdSql.checkData(0, 1, 20)
|
||||
|
||||
self.checkStreamData("c1", 200)
|
||||
self.checkStreamData("c2", 200)
|
||||
self.checkStreamData("c3", 200)
|
||||
self.checkStreamData("av", 9.5)
|
||||
self.checkStreamData("su", 1900)
|
||||
self.checkStreamData("mi", 0)
|
||||
self.checkStreamData("ma", 19)
|
||||
self.checkStreamData("fi", 0)
|
||||
self.checkStreamData("la", 19)
|
||||
#self.checkStreamData("wh", 200)
|
||||
self.checkStreamData("as", 200)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
|
||||
|
|
@ -0,0 +1,71 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import time
|
||||
import taos
|
||||
from util.log import tdLog
|
||||
from util.cases import tdCases
|
||||
from util.sql import tdSql
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
def run(self):
|
||||
rowNum = 200
|
||||
totalNum = 200
|
||||
tdSql.prepare()
|
||||
|
||||
tdLog.info("=============== step1")
|
||||
tdSql.execute("create table mt(ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)")
|
||||
for i in range(5):
|
||||
tdSql.execute("create table tb%d using mt tags(%d)" % (i, i))
|
||||
for j in range(rowNum):
|
||||
tdSql.execute("insert into tb%d values(now + %ds, %d, %d)" % (i, j, j, j))
|
||||
time.sleep(0.1)
|
||||
|
||||
tdLog.info("=============== step2")
|
||||
tdSql.query("select count(*), count(tbcol), count(tbcol2) from mt interval(10s)")
|
||||
tdSql.execute("create table st as select count(*), count(tbcol), count(tbcol2) from mt interval(10s)")
|
||||
|
||||
tdLog.info("=============== step3")
|
||||
tdSql.waitedQuery("select * from st", 1, 120)
|
||||
v = tdSql.getData(0, 3)
|
||||
if v >= 51:
|
||||
tdLog.exit("value is %d, which is larger than 51" % v)
|
||||
|
||||
tdLog.info("=============== step4")
|
||||
for i in range(5, 10):
|
||||
tdSql.execute("create table tb%d using mt tags(%d)" % (i, i))
|
||||
for j in range(rowNum):
|
||||
tdSql.execute("insert into tb%d values(now + %ds, %d, %d)" % (i, j, j, j))
|
||||
|
||||
tdLog.info("=============== step5")
|
||||
tdLog.sleep(30)
|
||||
tdSql.waitedQuery("select * from st order by ts desc", 1, 120)
|
||||
v = tdSql.getData(0, 3)
|
||||
if v <= 51:
|
||||
tdLog.exit("value is %d, which is smaller than 51" % v)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
|
||||
|
|
@ -0,0 +1,182 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import time
|
||||
import taos
|
||||
from util.log import tdLog
|
||||
from util.cases import tdCases
|
||||
from util.sql import tdSql
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
'''
|
||||
def bug2222(self):
|
||||
tdSql.prepare()
|
||||
tdSql.execute("create table superreal(ts timestamp, addr binary(5), val float) tags (deviceNo binary(20))")
|
||||
tdSql.execute("create table real_001 using superreal tags('001')")
|
||||
tdSql.execute("create table tj_001 as select sum(val) from real_001 interval(1m)")
|
||||
|
||||
t = datetime.datetime.now()
|
||||
for i in range(60):
|
||||
ts = t.strftime("%Y-%m-%d %H:%M")
|
||||
t += datetime.timedelta(minutes=1)
|
||||
sql = "insert into real_001 values('%s:0%d', '1', %d)" % (ts, 0, i)
|
||||
for j in range(4):
|
||||
sql += ",('%s:0%d', '%d', %d)" % (ts, j + 1, j + 1, i)
|
||||
tdSql.execute(sql)
|
||||
time.sleep(60 + random.random() * 60 - 30)
|
||||
'''
|
||||
|
||||
def tbase300(self):
|
||||
tdLog.debug("begin tbase300")
|
||||
|
||||
tdSql.prepare()
|
||||
tdSql.execute("create table mt(ts timestamp, c1 int, c2 int) tags(t1 int)")
|
||||
tdSql.execute("create table tb1 using mt tags(1)");
|
||||
tdSql.execute("create table tb2 using mt tags(2)");
|
||||
tdSql.execute("create table strm as select count(*), avg(c1), sum(c2), max(c1), min(c2),first(c1), last(c2) from mt interval(4s) sliding(2s)")
|
||||
#tdSql.execute("create table strm as select count(*), avg(c1), sum(c2), max(c1), min(c2), first(c1) from mt interval(4s) sliding(2s)")
|
||||
tdLog.sleep(10)
|
||||
tdSql.execute("insert into tb2 values(now, 1, 1)");
|
||||
tdSql.execute("insert into tb1 values(now, 1, 1)");
|
||||
tdLog.sleep(4)
|
||||
tdSql.query("select * from mt")
|
||||
tdSql.query("select * from strm")
|
||||
tdSql.execute("drop table tb1")
|
||||
|
||||
tdSql.waitedQuery("select * from strm", 1, 100)
|
||||
if tdSql.queryRows < 1 or tdSql.queryRows > 2:
|
||||
tdLog.exit("rows should be 1 or 2")
|
||||
|
||||
tdSql.execute("drop table tb2")
|
||||
tdSql.execute("drop table mt")
|
||||
tdSql.execute("drop table strm")
|
||||
|
||||
def tbase304(self):
|
||||
tdLog.debug("begin tbase304")
|
||||
# we cannot reset query cache in server side, as a workaround,
|
||||
# set super table name to mt304, need to change back to mt later
|
||||
tdSql.execute("create table mt304 (ts timestamp, c1 int) tags(t1 int, t2 int)")
|
||||
tdSql.execute("create table tb1 using mt304 tags(1, 1)")
|
||||
tdSql.execute("create table tb2 using mt304 tags(1, -1)")
|
||||
time.sleep(0.1)
|
||||
tdSql.execute("create table strm as select count(*), avg(c1) from mt304 where t2 >= 0 interval(4s) sliding(2s)")
|
||||
tdSql.execute("insert into tb1 values (now,1)")
|
||||
tdSql.execute("insert into tb2 values (now,2)")
|
||||
|
||||
tdSql.waitedQuery("select * from strm", 1, 100)
|
||||
if tdSql.queryRows < 1 or tdSql.queryRows > 2:
|
||||
tdLog.exit("rows should be 1 or 2")
|
||||
|
||||
tdSql.checkData(0, 1, 1)
|
||||
tdSql.checkData(0, 2, 1.000000000)
|
||||
tdSql.execute("alter table mt304 drop tag t2")
|
||||
tdSql.execute("insert into tb2 values (now,2)")
|
||||
tdSql.execute("insert into tb1 values (now,1)")
|
||||
tdSql.query("select * from strm")
|
||||
tdSql.execute("alter table mt304 add tag t2 int")
|
||||
tdLog.sleep(1)
|
||||
tdSql.query("select * from strm")
|
||||
|
||||
def wildcardFilterOnTags(self):
|
||||
tdLog.debug("begin wildcardFilterOnTag")
|
||||
tdSql.prepare()
|
||||
tdSql.execute("create table stb (ts timestamp, c1 int, c2 binary(10)) tags(t1 binary(10))")
|
||||
tdSql.execute("create table tb1 using stb tags('a1')")
|
||||
tdSql.execute("create table tb2 using stb tags('b2')")
|
||||
tdSql.execute("create table tb3 using stb tags('a3')")
|
||||
tdSql.execute("create table strm as select count(*), avg(c1), first(c2) from stb where t1 like 'a%' interval(4s) sliding(2s)")
|
||||
tdSql.query("describe strm")
|
||||
tdSql.checkRows(4)
|
||||
|
||||
tdLog.sleep(1)
|
||||
tdSql.execute("insert into tb1 values (now, 0, 'tb1')")
|
||||
tdLog.sleep(4)
|
||||
tdSql.execute("insert into tb2 values (now, 2, 'tb2')")
|
||||
tdLog.sleep(4)
|
||||
tdSql.execute("insert into tb3 values (now, 0, 'tb3')")
|
||||
|
||||
tdSql.waitedQuery("select * from strm", 4, 60)
|
||||
tdSql.checkRows(4)
|
||||
tdSql.checkData(0, 2, 0.000000000)
|
||||
if tdSql.getData(0, 3) == 'tb2':
|
||||
tdLog.exit("unexpected value of data03")
|
||||
if tdSql.getData(1, 3) == 'tb2':
|
||||
tdLog.exit("unexpected value of data13")
|
||||
if tdSql.getData(2, 3) == 'tb2':
|
||||
tdLog.exit("unexpected value of data23")
|
||||
if tdSql.getData(3, 3) == 'tb2':
|
||||
tdLog.exit("unexpected value of data33")
|
||||
|
||||
tdLog.info("add table tb4 to see if stream still works correctly")
|
||||
# The vnode client needs to refresh metadata cache to allow strm calculate tb4's data.
|
||||
# But the current refreshing frequency is every 10 min
|
||||
# commented out the case below to save running time
|
||||
tdSql.execute("create table tb4 using stb tags('a4')")
|
||||
tdSql.execute("insert into tb4 values(now, 4, 'tb4')")
|
||||
tdSql.waitedQuery("select * from strm order by ts desc", 6, 60)
|
||||
tdSql.checkRows(6)
|
||||
tdSql.checkData(0, 2, 4)
|
||||
tdSql.checkData(0, 3, "tb4")
|
||||
|
||||
tdLog.info("change tag values to see if stream still works correctly")
|
||||
tdSql.execute("alter table tb4 set tag t1='b4'")
|
||||
tdLog.sleep(3)
|
||||
tdSql.execute("insert into tb1 values (now, 1, 'tb1_a1')")
|
||||
tdLog.sleep(4)
|
||||
tdSql.execute("insert into tb4 values (now, -4, 'tb4_b4')")
|
||||
tdSql.waitedQuery("select * from strm order by ts desc", 8, 100)
|
||||
tdSql.checkRows(8)
|
||||
tdSql.checkData(0, 2, 1)
|
||||
tdSql.checkData(0, 3, "tb1_a1")
|
||||
|
||||
def datatypes(self):
|
||||
tdLog.debug("begin data types")
|
||||
tdSql.prepare()
|
||||
tdSql.execute("create table stb3 (ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 binary(15), c6 nchar(15), c7 bool) tags(t1 int, t2 binary(15))")
|
||||
tdSql.execute("create table tb0 using stb3 tags(0, 'tb0')")
|
||||
tdSql.execute("create table tb1 using stb3 tags(1, 'tb1')")
|
||||
tdSql.execute("create table tb2 using stb3 tags(2, 'tb2')")
|
||||
tdSql.execute("create table tb3 using stb3 tags(3, 'tb3')")
|
||||
tdSql.execute("create table tb4 using stb3 tags(4, 'tb4')")
|
||||
|
||||
tdSql.execute("create table strm0 as select count(ts), count(c1), max(c2), min(c4), first(c5), last(c6) from stb3 where ts < now + 30s interval(4s) sliding(2s)")
|
||||
#tdSql.execute("create table strm0 as select count(ts), count(c1), max(c2), min(c4), first(c5) from stb where ts < now + 30s interval(4s) sliding(2s)")
|
||||
tdLog.sleep(1)
|
||||
tdSql.execute("insert into tb0 values (now, 0, 0, 0, 0, 'binary0', '涛思0', true) tb1 values (now, 1, 1, 1, 1, 'binary1', '涛思1', false) tb2 values (now, 2, 2, 2, 2, 'binary2', '涛思2', true) tb3 values (now, 3, 3, 3, 3, 'binary3', '涛思3', false) tb4 values (now, 4, 4, 4, 4, 'binary4', '涛思4', true) ")
|
||||
|
||||
tdSql.waitedQuery("select * from strm0 order by ts desc", 2, 120)
|
||||
tdSql.checkRows(2)
|
||||
|
||||
tdSql.execute("insert into tb0 values (now, 10, 10, 10, 10, 'binary0', '涛思0', true) tb1 values (now, 11, 11, 11, 11, 'binary1', '涛思1', false) tb2 values (now, 12, 12, 12, 12, 'binary2', '涛思2', true) tb3 values (now, 13, 13, 13, 13, 'binary3', '涛思3', false) tb4 values (now, 14, 14, 14, 14, 'binary4', '涛思4', true) ")
|
||||
tdSql.waitedQuery("select * from strm0 order by ts desc", 4, 120)
|
||||
tdSql.checkRows(4)
|
||||
|
||||
def run(self):
|
||||
self.tbase300()
|
||||
self.tbase304()
|
||||
self.wildcardFilterOnTags()
|
||||
self.datatypes()
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -55,10 +55,7 @@ class TDTestCase:
|
|||
tdSql.checkRows(tbNum + 1)
|
||||
|
||||
tdLog.info("===== step3 =====")
|
||||
tdLog.info("sleeping 120 seconds")
|
||||
time.sleep(120)
|
||||
tdSql.query("select * from s0")
|
||||
|
||||
tdSql.waitedQuery("select * from s0", 1, 120)
|
||||
try:
|
||||
tdSql.checkData(0, 1, rowNum)
|
||||
tdSql.checkData(0, 2, rowNum)
|
||||
|
@ -82,10 +79,7 @@ class TDTestCase:
|
|||
tdSql.checkRows(tbNum + 1)
|
||||
|
||||
tdLog.info("===== step7 =====")
|
||||
tdLog.info("sleeping 120 seconds")
|
||||
time.sleep(120)
|
||||
|
||||
tdSql.query("select * from s0")
|
||||
tdSql.waitedQuery("select * from s0", 1, 120)
|
||||
try:
|
||||
tdSql.checkData(0, 1, rowNum)
|
||||
tdSql.checkData(0, 2, rowNum)
|
||||
|
@ -108,10 +102,7 @@ class TDTestCase:
|
|||
tdSql.checkRows(tbNum + 2)
|
||||
|
||||
tdLog.info("===== step9 =====")
|
||||
tdLog.info("sleeping 120 seconds")
|
||||
time.sleep(120)
|
||||
|
||||
tdSql.query("select * from s1")
|
||||
tdSql.waitedQuery("select * from s1", 1, 120)
|
||||
try:
|
||||
tdSql.checkData(0, 1, rowNum * tbNum)
|
||||
tdSql.checkData(0, 2, rowNum * tbNum)
|
||||
|
@ -134,9 +125,7 @@ class TDTestCase:
|
|||
tdSql.checkRows(tbNum + 2)
|
||||
|
||||
tdLog.info("===== step13 =====")
|
||||
tdLog.info("sleeping 120 seconds")
|
||||
time.sleep(120)
|
||||
tdSql.query("select * from s1")
|
||||
tdSql.waitedQuery("select * from s1", 1, 120)
|
||||
try:
|
||||
tdSql.checkData(0, 1, rowNum * tbNum)
|
||||
tdSql.checkData(0, 2, rowNum * tbNum)
|
||||
|
|
|
@ -53,8 +53,7 @@ class TDTestCase:
|
|||
tdSql.checkRows(tbNum + 1)
|
||||
|
||||
tdLog.info("===== step3 =====")
|
||||
time.sleep(120)
|
||||
tdSql.query("select * from s0")
|
||||
tdSql.waitedQuery("select * from s0", 1, 120)
|
||||
try:
|
||||
tdSql.checkData(0, 1, rowNum)
|
||||
except Exception as e:
|
||||
|
@ -81,8 +80,7 @@ class TDTestCase:
|
|||
tdLog.info(repr(e))
|
||||
|
||||
tdLog.info("===== step7 =====")
|
||||
time.sleep(120)
|
||||
tdSql.query("select * from s0")
|
||||
tdSql.waitedQuery("select * from s0", 1, 120)
|
||||
try:
|
||||
tdSql.checkData(0, 1, rowNum)
|
||||
tdSql.checkData(0, 2, rowNum)
|
||||
|
@ -107,8 +105,7 @@ class TDTestCase:
|
|||
tdSql.checkRows(tbNum + 2)
|
||||
|
||||
tdLog.info("===== step9 =====")
|
||||
time.sleep(120)
|
||||
tdSql.query("select * from s1")
|
||||
tdSql.waitedQuery("select * from s1", 1, 120)
|
||||
try:
|
||||
tdSql.checkData(0, 1, totalNum)
|
||||
tdSql.checkData(0, 2, totalNum)
|
||||
|
@ -137,8 +134,7 @@ class TDTestCase:
|
|||
tdLog.info(repr(e))
|
||||
|
||||
tdLog.info("===== step13 =====")
|
||||
time.sleep(120)
|
||||
tdSql.query("select * from s1")
|
||||
tdSql.waitedQuery("select * from s1", 1, 120)
|
||||
try:
|
||||
tdSql.checkData(0, 1, totalNum)
|
||||
#tdSql.checkData(0, 2, None)
|
||||
|
|
|
@ -37,17 +37,8 @@ class TDTestCase:
|
|||
except Exception as e:
|
||||
tdLog.exit(e)
|
||||
|
||||
try:
|
||||
tdSql.execute("select * from db.st")
|
||||
except Exception as e:
|
||||
if e.args[0] != 'mnode invalid table name':
|
||||
tdLog.exit(e)
|
||||
|
||||
try:
|
||||
tdSql.execute("select * from db.tb")
|
||||
except Exception as e:
|
||||
if e.args[0] != 'mnode invalid table name':
|
||||
tdLog.exit(e)
|
||||
tdSql.error("select * from db.st")
|
||||
tdSql.error("select * from db.tb")
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -0,0 +1,500 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import os
|
||||
import os.path
|
||||
import subprocess
|
||||
from util.log import *
|
||||
|
||||
|
||||
class TDSimClient:
|
||||
def __init__(self):
|
||||
self.testCluster = False
|
||||
|
||||
self.cfgDict = {
|
||||
"numOfLogLines": "100000000",
|
||||
"numOfThreadsPerCore": "2.0",
|
||||
"locale": "en_US.UTF-8",
|
||||
"charset": "UTF-8",
|
||||
"asyncLog": "0",
|
||||
"anyIp": "0",
|
||||
"sdbDebugFlag": "135",
|
||||
"rpcDebugFlag": "135",
|
||||
"tmrDebugFlag": "131",
|
||||
"cDebugFlag": "135",
|
||||
"udebugFlag": "135",
|
||||
"jnidebugFlag": "135",
|
||||
"qdebugFlag": "135",
|
||||
}
|
||||
|
||||
def init(self, path):
|
||||
self.__init__()
|
||||
self.path = path
|
||||
|
||||
def getLogDir(self):
|
||||
self.logDir = "%s/sim/psim/log" % (self.path)
|
||||
return self.logDir
|
||||
|
||||
def getCfgDir(self):
|
||||
self.cfgDir = "%s/sim/psim/cfg" % (self.path)
|
||||
return self.cfgDir
|
||||
|
||||
def setTestCluster(self, value):
|
||||
self.testCluster = value
|
||||
|
||||
def addExtraCfg(self, option, value):
|
||||
self.cfgDict.update({option: value})
|
||||
|
||||
def cfg(self, option, value):
|
||||
cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath)
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
def deploy(self):
|
||||
self.logDir = "%s/sim/psim/log" % (self.path)
|
||||
self.cfgDir = "%s/sim/psim/cfg" % (self.path)
|
||||
self.cfgPath = "%s/sim/psim/cfg/taos.cfg" % (self.path)
|
||||
|
||||
cmd = "rm -rf " + self.logDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "mkdir -p " + self.logDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "rm -rf " + self.cfgDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "mkdir -p " + self.cfgDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "touch " + self.cfgPath
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
if self.testCluster:
|
||||
self.cfg("masterIp", "192.168.0.1")
|
||||
self.cfg("secondIp", "192.168.0.2")
|
||||
self.cfg("logDir", self.logDir)
|
||||
|
||||
for key, value in self.cfgDict.items():
|
||||
self.cfg(key, value)
|
||||
|
||||
tdLog.debug("psim is deployed and configured by %s" % (self.cfgPath))
|
||||
|
||||
|
||||
class TDDnode:
|
||||
def __init__(self, index):
|
||||
self.index = index
|
||||
self.running = 0
|
||||
self.deployed = 0
|
||||
self.testCluster = False
|
||||
self.valgrind = 0
|
||||
|
||||
def init(self, path):
|
||||
self.path = path
|
||||
|
||||
def setTestCluster(self, value):
|
||||
self.testCluster = value
|
||||
|
||||
def setValgrind(self, value):
|
||||
self.valgrind = value
|
||||
|
||||
def getDataSize(self):
|
||||
totalSize = 0
|
||||
|
||||
if (self.deployed == 1):
|
||||
for dirpath, dirnames, filenames in os.walk(self.dataDir):
|
||||
for f in filenames:
|
||||
fp = os.path.join(dirpath, f)
|
||||
|
||||
if not os.path.islink(fp):
|
||||
totalSize = totalSize + os.path.getsize(fp)
|
||||
|
||||
return totalSize
|
||||
|
||||
def deploy(self):
|
||||
self.logDir = "%s/sim/dnode%d/log" % (self.path, self.index)
|
||||
self.dataDir = "%s/sim/dnode%d/data" % (self.path, self.index)
|
||||
self.cfgDir = "%s/sim/dnode%d/cfg" % (self.path, self.index)
|
||||
self.cfgPath = "%s/sim/dnode%d/cfg/taos.cfg" % (
|
||||
self.path, self.index)
|
||||
|
||||
cmd = "rm -rf " + self.dataDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "rm -rf " + self.logDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "rm -rf " + self.cfgDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "mkdir -p " + self.dataDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "mkdir -p " + self.logDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "mkdir -p " + self.cfgDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "touch " + self.cfgPath
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
if self.testCluster:
|
||||
self.startIP()
|
||||
|
||||
if self.testCluster:
|
||||
self.cfg("masterIp", "192.168.0.1")
|
||||
self.cfg("secondIp", "192.168.0.2")
|
||||
self.cfg("publicIp", "192.168.0.%d" % (self.index))
|
||||
self.cfg("internalIp", "192.168.0.%d" % (self.index))
|
||||
self.cfg("privateIp", "192.168.0.%d" % (self.index))
|
||||
self.cfg("dataDir", self.dataDir)
|
||||
self.cfg("logDir", self.logDir)
|
||||
self.cfg("numOfLogLines", "100000000")
|
||||
self.cfg("mnodeEqualVnodeNum", "0")
|
||||
self.cfg("walLevel", "1")
|
||||
self.cfg("statusInterval", "1")
|
||||
self.cfg("numOfTotalVnodes", "64")
|
||||
self.cfg("numOfMnodes", "3")
|
||||
self.cfg("numOfThreadsPerCore", "2.0")
|
||||
self.cfg("monitor", "0")
|
||||
self.cfg("maxVnodeConnections", "30000")
|
||||
self.cfg("maxMgmtConnections", "30000")
|
||||
self.cfg("maxMeterConnections", "30000")
|
||||
self.cfg("maxShellConns", "30000")
|
||||
self.cfg("locale", "en_US.UTF-8")
|
||||
self.cfg("charset", "UTF-8")
|
||||
self.cfg("asyncLog", "0")
|
||||
self.cfg("anyIp", "0")
|
||||
self.cfg("dDebugFlag", "135")
|
||||
self.cfg("mDebugFlag", "135")
|
||||
self.cfg("sdbDebugFlag", "135")
|
||||
self.cfg("rpcDebugFlag", "135")
|
||||
self.cfg("tmrDebugFlag", "131")
|
||||
self.cfg("cDebugFlag", "135")
|
||||
self.cfg("httpDebugFlag", "135")
|
||||
self.cfg("monitorDebugFlag", "135")
|
||||
self.cfg("udebugFlag", "135")
|
||||
self.cfg("jnidebugFlag", "135")
|
||||
self.cfg("qdebugFlag", "135")
|
||||
self.deployed = 1
|
||||
tdLog.debug(
|
||||
"dnode:%d is deployed and configured by %s" %
|
||||
(self.index, self.cfgPath))
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root)-len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
def start(self):
|
||||
buildPath = self.getBuildPath()
|
||||
|
||||
if (buildPath == ""):
|
||||
tdLog.exit("taosd not found!")
|
||||
else:
|
||||
tdLog.info("taosd found in %s" % buildPath)
|
||||
|
||||
binPath = buildPath + "/build/bin/taosd"
|
||||
|
||||
if self.deployed == 0:
|
||||
tdLog.exit("dnode:%d is not deployed" % (self.index))
|
||||
|
||||
if self.valgrind == 0:
|
||||
cmd = "nohup %s -c %s > /dev/null 2>&1 & " % (
|
||||
binPath, self.cfgDir)
|
||||
else:
|
||||
valgrindCmdline = "valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"
|
||||
|
||||
cmd = "nohup %s %s -c %s --random-file-fail-factor 5 2>&1 & " % (
|
||||
valgrindCmdline, binPath, self.cfgDir)
|
||||
|
||||
print(cmd)
|
||||
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
self.running = 1
|
||||
tdLog.debug("dnode:%d is running with %s " % (self.index, cmd))
|
||||
|
||||
tdLog.debug("wait 5 seconds for the dnode:%d to start." % (self.index))
|
||||
time.sleep(5)
|
||||
|
||||
def stop(self):
|
||||
if self.valgrind == 0:
|
||||
toBeKilled = "taosd"
|
||||
else:
|
||||
toBeKilled = "valgrind.bin"
|
||||
|
||||
if self.running != 0:
|
||||
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
|
||||
while(processID):
|
||||
killCmd = "kill -INT %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
for port in range(6030, 6041):
|
||||
fuserCmd = "fuser -k -n tcp %d" % port
|
||||
os.system(fuserCmd)
|
||||
if self.valgrind:
|
||||
time.sleep(2)
|
||||
|
||||
self.running = 0
|
||||
tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index))
|
||||
|
||||
def forcestop(self):
|
||||
if self.valgrind == 0:
|
||||
toBeKilled = "taosd"
|
||||
else:
|
||||
toBeKilled = "valgrind.bin"
|
||||
|
||||
if self.running != 0:
|
||||
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
|
||||
while(processID):
|
||||
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
for port in range(6030, 6041):
|
||||
fuserCmd = "fuser -k -n tcp %d" % port
|
||||
os.system(fuserCmd)
|
||||
if self.valgrind:
|
||||
time.sleep(2)
|
||||
|
||||
self.running = 0
|
||||
tdLog.debug("dnode:%d is stopped by kill -KILL" % (self.index))
|
||||
|
||||
def startIP(self):
|
||||
cmd = "sudo ifconfig lo:%d 192.168.0.%d up" % (self.index, self.index)
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
def stopIP(self):
|
||||
cmd = "sudo ifconfig lo:%d 192.168.0.%d down" % (
|
||||
self.index, self.index)
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
def cfg(self, option, value):
|
||||
cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath)
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
def getDnodeRootDir(self, index):
|
||||
dnodeRootDir = "%s/sim/psim/dnode%d" % (self.path, index)
|
||||
return dnodeRootDir
|
||||
|
||||
def getDnodesRootDir(self):
|
||||
dnodesRootDir = "%s/sim/psim" % (self.path)
|
||||
return dnodesRootDir
|
||||
|
||||
|
||||
class TDDnodes:
|
||||
def __init__(self):
|
||||
self.dnodes = []
|
||||
self.dnodes.append(TDDnode(1))
|
||||
self.dnodes.append(TDDnode(2))
|
||||
self.dnodes.append(TDDnode(3))
|
||||
self.dnodes.append(TDDnode(4))
|
||||
self.dnodes.append(TDDnode(5))
|
||||
self.dnodes.append(TDDnode(6))
|
||||
self.dnodes.append(TDDnode(7))
|
||||
self.dnodes.append(TDDnode(8))
|
||||
self.dnodes.append(TDDnode(9))
|
||||
self.dnodes.append(TDDnode(10))
|
||||
self.simDeployed = False
|
||||
|
||||
def init(self, path):
|
||||
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||
while(processID):
|
||||
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
|
||||
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||
while(processID):
|
||||
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
|
||||
binPath = os.path.dirname(os.path.realpath(__file__))
|
||||
binPath = binPath + "/../../../debug/"
|
||||
tdLog.debug("binPath %s" % (binPath))
|
||||
binPath = os.path.realpath(binPath)
|
||||
tdLog.debug("binPath real path %s" % (binPath))
|
||||
|
||||
# cmd = "sudo cp %s/build/lib/libtaos.so /usr/local/lib/taos/" % (binPath)
|
||||
# tdLog.debug(cmd)
|
||||
# os.system(cmd)
|
||||
|
||||
# cmd = "sudo cp %s/build/bin/taos /usr/local/bin/taos/" % (binPath)
|
||||
# if os.system(cmd) != 0 :
|
||||
# tdLog.exit(cmd)
|
||||
# tdLog.debug("execute %s" % (cmd))
|
||||
|
||||
# cmd = "sudo cp %s/build/bin/taosd /usr/local/bin/taos/" % (binPath)
|
||||
# if os.system(cmd) != 0 :
|
||||
# tdLog.exit(cmd)
|
||||
# tdLog.debug("execute %s" % (cmd))
|
||||
|
||||
if path == "":
|
||||
# self.path = os.path.expanduser('~')
|
||||
self.path = os.path.abspath(binPath + "../../")
|
||||
else:
|
||||
self.path = os.path.realpath(path)
|
||||
|
||||
for i in range(len(self.dnodes)):
|
||||
self.dnodes[i].init(self.path)
|
||||
|
||||
self.sim = TDSimClient()
|
||||
self.sim.init(self.path)
|
||||
|
||||
def setTestCluster(self, value):
|
||||
self.testCluster = value
|
||||
|
||||
def setValgrind(self, value):
|
||||
self.valgrind = value
|
||||
|
||||
def deploy(self, index):
|
||||
self.sim.setTestCluster(self.testCluster)
|
||||
|
||||
if (self.simDeployed == False):
|
||||
self.sim.deploy()
|
||||
self.simDeployed = True
|
||||
|
||||
self.check(index)
|
||||
self.dnodes[index - 1].setTestCluster(self.testCluster)
|
||||
self.dnodes[index - 1].setValgrind(self.valgrind)
|
||||
self.dnodes[index - 1].deploy()
|
||||
|
||||
def cfg(self, index, option, value):
|
||||
self.check(index)
|
||||
self.dnodes[index - 1].cfg(option, value)
|
||||
|
||||
def start(self, index):
|
||||
self.check(index)
|
||||
self.dnodes[index - 1].start()
|
||||
|
||||
def stop(self, index):
|
||||
self.check(index)
|
||||
self.dnodes[index - 1].stop()
|
||||
|
||||
def getDataSize(self, index):
|
||||
self.check(index)
|
||||
return self.dnodes[index - 1].getDataSize()
|
||||
|
||||
def forcestop(self, index):
|
||||
self.check(index)
|
||||
self.dnodes[index - 1].forcestop()
|
||||
|
||||
def startIP(self, index):
|
||||
self.check(index)
|
||||
|
||||
if self.testCluster:
|
||||
self.dnodes[index - 1].startIP()
|
||||
|
||||
def stopIP(self, index):
|
||||
self.check(index)
|
||||
|
||||
if self.dnodes[index - 1].testCluster:
|
||||
self.dnodes[index - 1].stopIP()
|
||||
|
||||
def check(self, index):
|
||||
if index < 1 or index > 10:
|
||||
tdLog.exit("index:%d should on a scale of [1, 10]" % (index))
|
||||
|
||||
def stopAll(self):
|
||||
tdLog.info("stop all dnodes")
|
||||
for i in range(len(self.dnodes)):
|
||||
self.dnodes[i].stop()
|
||||
|
||||
psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep | awk '{print $2}'"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||
if processID:
|
||||
cmd = "sudo systemctl stop taosd"
|
||||
os.system(cmd)
|
||||
# if os.system(cmd) != 0 :
|
||||
# tdLog.exit(cmd)
|
||||
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||
while(processID):
|
||||
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
|
||||
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||
while(processID):
|
||||
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
|
||||
# if os.system(cmd) != 0 :
|
||||
# tdLog.exit(cmd)
|
||||
|
||||
def getDnodesRootDir(self):
|
||||
dnodesRootDir = "%s/sim" % (self.path)
|
||||
return dnodesRootDir
|
||||
|
||||
def getSimCfgPath(self):
|
||||
return self.sim.getCfgDir()
|
||||
|
||||
def getSimLogPath(self):
|
||||
return self.sim.getLogDir()
|
||||
|
||||
def addSimExtraCfg(self, option, value):
|
||||
self.sim.addExtraCfg(option, value)
|
||||
|
||||
|
||||
tdDnodes = TDDnodes()
|
|
@ -22,35 +22,59 @@ class TDSimClient:
|
|||
def __init__(self):
|
||||
self.testCluster = False
|
||||
|
||||
self.cfgDict = {
|
||||
"numOfLogLines": "100000000",
|
||||
"numOfThreadsPerCore": "2.0",
|
||||
"locale": "en_US.UTF-8",
|
||||
"charset": "UTF-8",
|
||||
"asyncLog": "0",
|
||||
"maxTablesPerVnode": "4",
|
||||
"maxVgroupsPerDb": "1000",
|
||||
"sdbDebugFlag": "143",
|
||||
"rpcDebugFlag": "135",
|
||||
"tmrDebugFlag": "131",
|
||||
"cDebugFlag": "135",
|
||||
"udebugFlag": "135",
|
||||
"jnidebugFlag": "135",
|
||||
"qdebugFlag": "135",
|
||||
}
|
||||
def init(self, path):
|
||||
self.__init__()
|
||||
self.path = path
|
||||
|
||||
def getLogDir(self):
|
||||
self.logDir = "%s/sim/psim/log" % (self.path)
|
||||
return self.logDir
|
||||
|
||||
def getCfgDir(self):
|
||||
self.cfgDir = "%s/sim/psim/cfg" % (self.path)
|
||||
return self.cfgDir
|
||||
|
||||
def setTestCluster(self, value):
|
||||
self.testCluster = value
|
||||
|
||||
def addExtraCfg(self, option, value):
|
||||
self.cfgDict.update({option: value})
|
||||
|
||||
def cfg(self, option, value):
|
||||
cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath)
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
def deploy(self):
|
||||
self.logDir = "%s/sim/psim/log" % (self.path,)
|
||||
self.logDir = "%s/sim/psim/log" % (self.path)
|
||||
self.cfgDir = "%s/sim/psim/cfg" % (self.path)
|
||||
self.cfgPath = "%s/sim/psim/cfg/taos.cfg" % (self.path)
|
||||
|
||||
cmd = "rm -rf " + self.logDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "rm -rf " + self.cfgDir
|
||||
|
||||
cmd = "mkdir -p " + self.logDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "mkdir -p " + self.logDir
|
||||
cmd = "rm -rf " + self.cfgDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
|
@ -66,19 +90,10 @@ class TDSimClient:
|
|||
self.cfg("masterIp", "192.168.0.1")
|
||||
self.cfg("secondIp", "192.168.0.2")
|
||||
self.cfg("logDir", self.logDir)
|
||||
self.cfg("numOfLogLines", "100000000")
|
||||
self.cfg("numOfThreadsPerCore", "2.0")
|
||||
self.cfg("locale", "en_US.UTF-8")
|
||||
self.cfg("charset", "UTF-8")
|
||||
self.cfg("asyncLog", "0")
|
||||
self.cfg("anyIp", "0")
|
||||
self.cfg("sdbDebugFlag", "135")
|
||||
self.cfg("rpcDebugFlag", "135")
|
||||
self.cfg("tmrDebugFlag", "131")
|
||||
self.cfg("cDebugFlag", "135")
|
||||
self.cfg("udebugFlag", "135")
|
||||
self.cfg("jnidebugFlag", "135")
|
||||
self.cfg("qdebugFlag", "135")
|
||||
|
||||
for key, value in self.cfgDict.items():
|
||||
self.cfg(key, value)
|
||||
|
||||
tdLog.debug("psim is deployed and configured by %s" % (self.cfgPath))
|
||||
|
||||
|
||||
|
@ -378,6 +393,9 @@ class TDDnodes:
|
|||
for i in range(len(self.dnodes)):
|
||||
self.dnodes[i].init(self.path)
|
||||
|
||||
self.sim = TDSimClient()
|
||||
self.sim.init(self.path)
|
||||
|
||||
def setTestCluster(self, value):
|
||||
self.testCluster = value
|
||||
|
||||
|
@ -385,8 +403,6 @@ class TDDnodes:
|
|||
self.valgrind = value
|
||||
|
||||
def deploy(self, index):
|
||||
self.sim = TDSimClient()
|
||||
self.sim.init(self.path)
|
||||
self.sim.setTestCluster(self.testCluster)
|
||||
|
||||
if (self.simDeployed == False):
|
||||
|
@ -474,5 +490,11 @@ class TDDnodes:
|
|||
def getSimCfgPath(self):
|
||||
return self.sim.getCfgDir()
|
||||
|
||||
def getSimLogPath(self):
|
||||
return self.sim.getLogDir()
|
||||
|
||||
def addSimExtraCfg(self, option, value):
|
||||
self.sim.addExtraCfg(option, value)
|
||||
|
||||
|
||||
tdDnodes = TDDnodes()
|
||||
|
|
|
@ -29,10 +29,8 @@ class TDSql:
|
|||
self.cursor = cursor
|
||||
|
||||
if (log):
|
||||
frame = inspect.stack()[1]
|
||||
callerModule = inspect.getmodule(frame[0])
|
||||
callerFilename = callerModule.__file__
|
||||
self.cursor.log(callerFilename + ".sql")
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
self.cursor.log(caller.filename + ".sql")
|
||||
|
||||
def close(self):
|
||||
self.cursor.close()
|
||||
|
@ -55,12 +53,8 @@ class TDSql:
|
|||
except BaseException:
|
||||
expectErrNotOccured = False
|
||||
if expectErrNotOccured:
|
||||
frame = inspect.stack()[1]
|
||||
callerModule = inspect.getmodule(frame[0])
|
||||
callerFilename = callerModule.__file__
|
||||
tdLog.exit(
|
||||
"%s failed: sql:%s, expect error not occured" %
|
||||
(callerFilename, sql))
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
tdLog.exit("%s(%d) failed: sql:%s, expect error not occured" % (caller.filename, caller.lineno, sql))
|
||||
else:
|
||||
self.queryRows = 0
|
||||
self.queryCols = 0
|
||||
|
@ -69,75 +63,70 @@ class TDSql:
|
|||
|
||||
def query(self, sql):
|
||||
self.sql = sql
|
||||
self.cursor.execute(sql)
|
||||
self.queryResult = self.cursor.fetchall()
|
||||
self.queryRows = len(self.queryResult)
|
||||
self.queryCols = len(self.cursor.description)
|
||||
# if self.queryRows == 1 and self.queryCols == 1:
|
||||
# tdLog.info("sql:%s, rows:%d cols:%d data:%s" % (self.sql, self.queryRows, self.queryCols, self.queryResult[0][0]))
|
||||
# else:
|
||||
# tdLog.info("sql:%s, rows:%d cols:%d" % (self.sql, self.queryRows, self.queryCols))
|
||||
try:
|
||||
self.cursor.execute(sql)
|
||||
self.queryResult = self.cursor.fetchall()
|
||||
self.queryRows = len(self.queryResult)
|
||||
self.queryCols = len(self.cursor.description)
|
||||
except Exception as e:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, sql, repr(e))
|
||||
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
|
||||
raise Exception(repr(e))
|
||||
return self.queryRows
|
||||
|
||||
def waitedQuery(self, sql, expectRows, timeout):
|
||||
tdLog.info("sql: %s, try to retrieve %d rows in %d seconds" % (sql, expectRows, timeout))
|
||||
self.sql = sql
|
||||
try:
|
||||
for i in range(timeout):
|
||||
self.cursor.execute(sql)
|
||||
self.queryResult = self.cursor.fetchall()
|
||||
self.queryRows = len(self.queryResult)
|
||||
self.queryCols = len(self.cursor.description)
|
||||
if self.queryRows >= expectRows:
|
||||
return (self.queryRows, i)
|
||||
time.sleep(1)
|
||||
except Exception as e:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, sql, repr(e))
|
||||
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
|
||||
raise Exception(repr(e))
|
||||
return (self.queryRows, timeout)
|
||||
|
||||
def checkRows(self, expectRows):
|
||||
if self.queryRows != expectRows:
|
||||
frame = inspect.stack()[1]
|
||||
callerModule = inspect.getmodule(frame[0])
|
||||
callerFilename = callerModule.__file__
|
||||
tdLog.exit(
|
||||
"%s failed: sql:%s, queryRows:%d != expect:%d" %
|
||||
(callerFilename, self.sql, self.queryRows, expectRows))
|
||||
tdLog.info("sql:%s, queryRows:%d == expect:%d" %
|
||||
(self.sql, self.queryRows, expectRows))
|
||||
if self.queryRows == expectRows:
|
||||
tdLog.info("sql:%s, queryRows:%d == expect:%d" % (self.sql, self.queryRows, expectRows))
|
||||
else:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, self.sql, self.queryRows, expectRows)
|
||||
tdLog.exit("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args)
|
||||
|
||||
def checkRowCol(self, row, col):
|
||||
caller = inspect.getframeinfo(inspect.stack()[2][0])
|
||||
if row < 0:
|
||||
args = (caller.filename, caller.lineno, self.sql, row)
|
||||
tdLog.exit("%s(%d) failed: sql:%s, row:%d is smaller than zero" % args)
|
||||
if col < 0:
|
||||
args = (caller.filename, caller.lineno, self.sql, row)
|
||||
tdLog.exit("%s(%d) failed: sql:%s, col:%d is smaller than zero" % args)
|
||||
if row > self.queryRows:
|
||||
args = (caller.filename, caller.lineno, self.sql, row, self.queryRows)
|
||||
tdLog.exit("%s(%d) failed: sql:%s, row:%d is larger than queryRows:%d" % args)
|
||||
if col > self.queryCols:
|
||||
args = (caller.filename, caller.lineno, self.sql, col, self.queryCols)
|
||||
tdLog.exit("%s(%d) failed: sql:%s, col:%d is larger than queryCols:%d" % args)
|
||||
|
||||
def checkDataType(self, row, col, dataType):
|
||||
frame = inspect.stack()[1]
|
||||
callerModule = inspect.getmodule(frame[0])
|
||||
callerFilename = callerModule.__file__
|
||||
|
||||
if row < 0:
|
||||
tdLog.exit(
|
||||
"%s failed: sql:%s, row:%d is smaller than zero" %
|
||||
(callerFilename, self.sql, row))
|
||||
if col < 0:
|
||||
tdLog.exit(
|
||||
"%s failed: sql:%s, col:%d is smaller than zero" %
|
||||
(callerFilename, self.sql, col))
|
||||
if row > self.queryRows:
|
||||
tdLog.exit(
|
||||
"%s failed: sql:%s, row:%d is larger than queryRows:%d" %
|
||||
(callerFilename, self.sql, row, self.queryRows))
|
||||
if col > self.queryCols:
|
||||
tdLog.exit(
|
||||
"%s failed: sql:%s, col:%d is larger than queryCols:%d" %
|
||||
(callerFilename, self.sql, col, self.queryCols))
|
||||
|
||||
self.checkRowCol(row, col)
|
||||
return self.cursor.istype(col, dataType)
|
||||
|
||||
def checkData(self, row, col, data):
|
||||
frame = inspect.stack()[1]
|
||||
callerModule = inspect.getmodule(frame[0])
|
||||
callerFilename = callerModule.__file__
|
||||
|
||||
if row < 0:
|
||||
tdLog.exit(
|
||||
"%s failed: sql:%s, row:%d is smaller than zero" %
|
||||
(callerFilename, self.sql, row))
|
||||
if col < 0:
|
||||
tdLog.exit(
|
||||
"%s failed: sql:%s, col:%d is smaller than zero" %
|
||||
(callerFilename, self.sql, col))
|
||||
if row > self.queryRows:
|
||||
tdLog.exit(
|
||||
"%s failed: sql:%s, row:%d is larger than queryRows:%d" %
|
||||
(callerFilename, self.sql, row, self.queryRows))
|
||||
if col > self.queryCols:
|
||||
tdLog.exit(
|
||||
"%s failed: sql:%s, col:%d is larger than queryCols:%d" %
|
||||
(callerFilename, self.sql, col, self.queryCols))
|
||||
self.checkRowCol(row, col)
|
||||
if self.queryResult[row][col] != data:
|
||||
tdLog.exit("%s failed: sql:%s row:%d col:%d data:%s != expect:%s" % (
|
||||
callerFilename, self.sql, row, col, self.queryResult[row][col], data))
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data)
|
||||
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
||||
|
||||
if data is None:
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
|
@ -153,26 +142,7 @@ class TDSql:
|
|||
(self.sql, row, col, self.queryResult[row][col], data))
|
||||
|
||||
def getData(self, row, col):
|
||||
frame = inspect.stack()[1]
|
||||
callerModule = inspect.getmodule(frame[0])
|
||||
callerFilename = callerModule.__file__
|
||||
|
||||
if row < 0:
|
||||
tdLog.exit(
|
||||
"%s failed: sql:%s, row:%d is smaller than zero" %
|
||||
(callerFilename, self.sql, row))
|
||||
if col < 0:
|
||||
tdLog.exit(
|
||||
"%s failed: sql:%s, col:%d is smaller than zero" %
|
||||
(callerFilename, self.sql, col))
|
||||
if row > self.queryRows:
|
||||
tdLog.exit(
|
||||
"%s failed: sql:%s, row:%d is larger than queryRows:%d" %
|
||||
(callerFilename, self.sql, row, self.queryRows))
|
||||
if col > self.queryCols:
|
||||
tdLog.exit(
|
||||
"%s failed: sql:%s, col:%d is larger than queryCols:%d" %
|
||||
(callerFilename, self.sql, col, self.queryCols))
|
||||
self.checkRowCol(row, col)
|
||||
return self.queryResult[row][col]
|
||||
|
||||
def executeTimes(self, sql, times):
|
||||
|
@ -185,20 +155,22 @@ class TDSql:
|
|||
|
||||
def execute(self, sql):
|
||||
self.sql = sql
|
||||
self.affectedRows = self.cursor.execute(sql)
|
||||
try:
|
||||
self.affectedRows = self.cursor.execute(sql)
|
||||
except Exception as e:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, sql, repr(e))
|
||||
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
|
||||
raise Exception(repr(e))
|
||||
return self.affectedRows
|
||||
|
||||
def checkAffectedRows(self, expectAffectedRows):
|
||||
if self.affectedRows != expectAffectedRows:
|
||||
frame = inspect.stack()[1]
|
||||
callerModule = inspect.getmodule(frame[0])
|
||||
callerFilename = callerModule.__file__
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, self.sql, self.affectedRows, expectAffectedRows)
|
||||
tdLog.exit("%s(%d) failed: sql:%s, affectedRows:%d != expect:%d" % args)
|
||||
|
||||
tdLog.exit(
|
||||
"%s failed: sql:%s, affectedRows:%d != expect:%d" %
|
||||
(callerFilename, self.sql, self.affectedRows, expectAffectedRows))
|
||||
tdLog.info("sql:%s, affectedRows:%d == expect:%d" %
|
||||
(self.sql, self.affectedRows, expectAffectedRows))
|
||||
tdLog.info("sql:%s, affectedRows:%d == expect:%d" % (self.sql, self.affectedRows, expectAffectedRows))
|
||||
|
||||
|
||||
tdSql = TDSql()
|
||||
|
|
|
@ -2,7 +2,7 @@ system sh/stop_dnodes.sh
|
|||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c wallevel -v 0
|
||||
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
|
||||
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 1000
|
||||
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 1000
|
||||
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
|
@ -10,7 +10,7 @@ sleep 3000
|
|||
sql connect
|
||||
print ============================ dnode1 start
|
||||
|
||||
sql create database db maxTables 500 cache 2 blocks 4 days 10 keep 20 minRows 300 maxRows 400 ctime 120 precision 'ms' comp 2 wal 1 replica 1
|
||||
sql create database db cache 2 blocks 4 days 10 keep 20 minRows 300 maxRows 400 ctime 120 precision 'ms' comp 2 wal 1 replica 1
|
||||
sql show databases
|
||||
print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09
|
||||
if $data00 != db then
|
||||
|
@ -31,13 +31,10 @@ endi
|
|||
if $data06 != 20,20,20 then
|
||||
return -1
|
||||
endi
|
||||
if $data07 != 500 then
|
||||
if $data07 != 2 then
|
||||
return -1
|
||||
endi
|
||||
if $data08 != 2 then
|
||||
return -1
|
||||
endi
|
||||
if $data09 != 4 then
|
||||
if $data08 != 4 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
@ -46,7 +43,6 @@ system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|||
return
|
||||
sql_error alter database db cache 256
|
||||
sql_error alter database db blocks 1
|
||||
sql_error alter database db maxTables 10
|
||||
sql_error alter database db days 10
|
||||
sql_error alter database db keep 10
|
||||
sql_error alter database db minRows 350
|
||||
|
@ -59,7 +55,6 @@ sql_error alter database db replica 2
|
|||
|
||||
|
||||
print ============== step3
|
||||
sql alter database db maxTables 1000
|
||||
sql alter database db comp 1
|
||||
sql alter database db blocks 40
|
||||
sql alter database db keep 30
|
||||
|
|
|
@ -1,11 +1,13 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c wallevel -v 2
|
||||
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 2
|
||||
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 4
|
||||
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 5
|
||||
|
||||
system sh/deploy.sh -n dnode2 -i 2
|
||||
system sh/cfg.sh -n dnode2 -c wallevel -v 2
|
||||
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 2
|
||||
system sh/cfg.sh -n dnode2 -c maxVgroupsPerDb -v 4
|
||||
system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 5
|
||||
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
|
@ -17,7 +19,7 @@ sleep 1000
|
|||
|
||||
print ============================ step1
|
||||
|
||||
sql create database db maxTables 5
|
||||
sql create database db
|
||||
sql create table db.st (ts timestamp, i int) tags(t int)
|
||||
sql create table db.t000 using db.st tags(0)
|
||||
sql create table db.t001 using db.st tags(1)
|
||||
|
@ -74,9 +76,14 @@ if $rows != 20 then
|
|||
endi
|
||||
|
||||
print ============================ step3
|
||||
|
||||
sql alter database db maxTables 10
|
||||
sleep 1000
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 10
|
||||
system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 10
|
||||
sleep 5000
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
sleep 5000
|
||||
|
||||
sql create table db.t100 using db.st tags(0)
|
||||
sql create table db.t101 using db.st tags(1)
|
||||
|
@ -133,9 +140,14 @@ if $rows != 40 then
|
|||
endi
|
||||
|
||||
print ============================ step5
|
||||
|
||||
sql alter database db maxTables 15
|
||||
sleep 1000
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 15
|
||||
system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 15
|
||||
sleep 5000
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
sleep 5000
|
||||
|
||||
sql create table db.t200 using db.st tags(0)
|
||||
sql create table db.t201 using db.st tags(1)
|
||||
|
@ -252,9 +264,14 @@ if $rows != 60 then
|
|||
endi
|
||||
|
||||
print ============================ step9
|
||||
|
||||
sql alter database db maxTables 20
|
||||
sleep 1000
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 20
|
||||
system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 20
|
||||
sleep 5000
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
sleep 5000
|
||||
|
||||
sql create table db.t300 using db.st tags(0)
|
||||
sql create table db.t301 using db.st tags(1)
|
||||
|
@ -380,9 +397,14 @@ if $rows != 80 then
|
|||
endi
|
||||
|
||||
print ============================ step9
|
||||
|
||||
sql alter database db maxTables 25
|
||||
sleep 1000
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 25
|
||||
system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 25
|
||||
sleep 5000
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
sleep 5000
|
||||
|
||||
sql create table db.t400 using db.st tags(0)
|
||||
sql create table db.t401 using db.st tags(1)
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c wallevel -v 0
|
||||
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 1
|
||||
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 1000
|
||||
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1
|
||||
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 10
|
||||
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
|
@ -11,7 +11,7 @@ sql connect
|
|||
|
||||
print ============================ step1
|
||||
|
||||
sql create database db maxTables 10
|
||||
sql create database db
|
||||
sql create table db.st (ts timestamp, i int) tags(t int)
|
||||
sql create table db.t0 using db.st tags(0)
|
||||
sql create table db.t1 using db.st tags(1)
|
||||
|
@ -49,8 +49,11 @@ endi
|
|||
|
||||
print ============================ step3
|
||||
|
||||
sql alter database db maxTables 20
|
||||
sleep 1000
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 20
|
||||
sleep 5000
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 5000
|
||||
|
||||
sql create table db.t10 using db.st tags(0)
|
||||
sql create table db.t11 using db.st tags(1)
|
||||
|
@ -86,9 +89,11 @@ if $rows != 20 then
|
|||
endi
|
||||
|
||||
print ============================ step5
|
||||
|
||||
sql alter database db maxTables 30
|
||||
sleep 1000
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 30
|
||||
sleep 5000
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 5000
|
||||
|
||||
sql create table db.t20 using db.st tags(0)
|
||||
sql create table db.t21 using db.st tags(1)
|
||||
|
@ -183,9 +188,11 @@ if $rows != 30 then
|
|||
endi
|
||||
|
||||
print ============================ step9
|
||||
|
||||
sql alter database db maxTables 40
|
||||
sleep 1000
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 40
|
||||
sleep 5000
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 5000
|
||||
|
||||
sql create table db.t30 using db.st tags(0)
|
||||
sql create table db.t31 using db.st tags(1)
|
||||
|
@ -285,9 +292,11 @@ if $rows != 40 then
|
|||
endi
|
||||
|
||||
print ============================ step12
|
||||
|
||||
sql alter database db maxTables 50
|
||||
sleep 1000
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 50
|
||||
sleep 5000
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 5000
|
||||
|
||||
sql create table db.t40 using db.st tags(0)
|
||||
sql create table db.t41 using db.st tags(1)
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c wallevel -v 0
|
||||
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
|
||||
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 1000
|
||||
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 4
|
||||
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 5
|
||||
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
|
@ -11,7 +11,7 @@ sql connect
|
|||
|
||||
print ============================ step1
|
||||
|
||||
sql create database db maxTables 5
|
||||
sql create database db
|
||||
sql create table db.st (ts timestamp, i int) tags(t int)
|
||||
sql create table db.t000 using db.st tags(0)
|
||||
sql create table db.t001 using db.st tags(1)
|
||||
|
@ -68,9 +68,11 @@ if $rows != 20 then
|
|||
endi
|
||||
|
||||
print ============================ step3
|
||||
|
||||
sql alter database db maxTables 10
|
||||
sleep 1000
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 10
|
||||
sleep 5000
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 5000
|
||||
|
||||
sql create table db.t100 using db.st tags(0)
|
||||
sql create table db.t101 using db.st tags(1)
|
||||
|
@ -127,9 +129,11 @@ if $rows != 40 then
|
|||
endi
|
||||
|
||||
print ============================ step5
|
||||
|
||||
sql alter database db maxTables 15
|
||||
sleep 1000
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 15
|
||||
sleep 5000
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 5000
|
||||
|
||||
sql create table db.t200 using db.st tags(0)
|
||||
sql create table db.t201 using db.st tags(1)
|
||||
|
@ -244,9 +248,11 @@ if $rows != 60 then
|
|||
endi
|
||||
|
||||
print ============================ step9
|
||||
|
||||
sql alter database db maxTables 20
|
||||
sleep 1000
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 20
|
||||
sleep 5000
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 5000
|
||||
|
||||
sql create table db.t300 using db.st tags(0)
|
||||
sql create table db.t301 using db.st tags(1)
|
||||
|
@ -370,10 +376,11 @@ if $rows != 80 then
|
|||
endi
|
||||
|
||||
print ============================ step12
|
||||
|
||||
sql alter database db maxTables 25
|
||||
sleep 1000
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 25
|
||||
sleep 5000
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 5000
|
||||
sql create table db.t400 using db.st tags(0)
|
||||
sql create table db.t401 using db.st tags(1)
|
||||
sql create table db.t402 using db.st tags(2)
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c wallevel -v 0
|
||||
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 1
|
||||
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 1000
|
||||
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1
|
||||
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 20
|
||||
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
|
@ -11,7 +11,7 @@ sql connect
|
|||
|
||||
print ============================ step1
|
||||
|
||||
sql create database db maxTables 20
|
||||
sql create database db
|
||||
sql create table db.st (ts timestamp, i int) tags(t int)
|
||||
sql create table db.t000 using db.st tags(0)
|
||||
sql create table db.t001 using db.st tags(1)
|
||||
|
@ -69,7 +69,7 @@ endi
|
|||
|
||||
print ============================ step3
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 2
|
||||
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 2
|
||||
sleep 5000
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 5000
|
||||
|
@ -131,7 +131,7 @@ endi
|
|||
print ============================ step5
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 3
|
||||
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 3
|
||||
sleep 5000
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 5000
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c wallevel -v 0
|
||||
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
|
||||
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 1000
|
||||
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 4
|
||||
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 1000
|
||||
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
|
@ -17,7 +17,7 @@ $db = $dbPrefix . $i
|
|||
$tb = $tbPrefix . $i
|
||||
|
||||
print =============== step1
|
||||
sql create database $db replica 1 days 20 keep 2000
|
||||
sql create database $db replica 1 days 20 keep 2000 cache 16
|
||||
sql show databases
|
||||
print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07
|
||||
if $data00 != $db then
|
||||
|
@ -35,7 +35,7 @@ endi
|
|||
if $data05 != 20 then
|
||||
return -1
|
||||
endi
|
||||
if $data07 != 1000 then
|
||||
if $data07 != 16 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
@ -76,13 +76,6 @@ if $data05 != 15 then
|
|||
return -1
|
||||
endi
|
||||
|
||||
#if $data06 != 1500,15000,1500 then
|
||||
# return -1
|
||||
#endi
|
||||
if $data07 != 1000 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print =============== step6
|
||||
sql use $db
|
||||
sql create table $tb (ts timestamp, speed int)
|
||||
|
|
|
@ -2,6 +2,8 @@ system sh/stop_dnodes.sh
|
|||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c wallevel -v 2
|
||||
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 10
|
||||
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 10
|
||||
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 1000
|
||||
|
||||
print ========= start dnodes
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
@ -9,7 +11,7 @@ sleep 3000
|
|||
sql connect
|
||||
|
||||
print ======== step1
|
||||
sql create database db blocks 2 maxtables 1000
|
||||
sql create database db blocks 2
|
||||
sql create table db.mt (ts timestamp, tbcol int) TAGS(tgcol int)
|
||||
|
||||
$tbPrefix = db.t
|
||||
|
@ -21,7 +23,7 @@ while $i < 2000
|
|||
endw
|
||||
|
||||
sql show db.vgroups
|
||||
if $rows != 2 then
|
||||
if $rows != 10 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
|
|
@ -2,15 +2,15 @@ system sh/stop_dnodes.sh
|
|||
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
|
||||
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 2000
|
||||
system sh/cfg.sh -n dnode2 -c maxVgroupsPerDb -v 4
|
||||
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
|
||||
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 3000
|
||||
sql connect
|
||||
|
||||
print =============== step2
|
||||
sql create database db maxtables 4
|
||||
sql create database db
|
||||
sql show databases
|
||||
print $rows $data07
|
||||
|
||||
|
@ -18,10 +18,6 @@ if $rows != 1 then
|
|||
return -1
|
||||
endi
|
||||
|
||||
if $data07 != 4 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print =============== step3
|
||||
sql use db
|
||||
sql create table t1 (ts timestamp, i int)
|
||||
|
@ -78,7 +74,7 @@ sql reset query cache
|
|||
sleep 4000
|
||||
|
||||
print =============== step7
|
||||
sql create database db maxtables 4
|
||||
sql create database db
|
||||
sql show databases
|
||||
print $rows $data07
|
||||
|
||||
|
@ -86,10 +82,6 @@ if $rows != 1 then
|
|||
return -1
|
||||
endi
|
||||
|
||||
if $data07 != 4 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print =============== step8
|
||||
sql use db
|
||||
sql create table t1 (ts timestamp, i int)
|
||||
|
|
|
@ -6,7 +6,8 @@ $totalRows = $totalVnodes * $maxTables
|
|||
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v $maxTables
|
||||
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v $maxTables
|
||||
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v $totalVnodes
|
||||
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v $totalVnodes
|
||||
system sh/cfg.sh -n dnode1 -c maxVnodeConnections -v 100000
|
||||
system sh/cfg.sh -n dnode1 -c maxMeterConnections -v 100000
|
||||
|
@ -17,7 +18,7 @@ print ========== prepare data
|
|||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 3000
|
||||
sql connect
|
||||
sql create database db blocks 2 cache 1 maxTables $maxTables
|
||||
sql create database db blocks 2 cache 1
|
||||
sql use db
|
||||
|
||||
print ========== step1
|
||||
|
|
|
@ -81,7 +81,7 @@ print =============== step2 - no db
|
|||
#11
|
||||
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show databases' 127.0.0.1:6020/rest/sql
|
||||
print 11-> $system_content
|
||||
if $system_content != @{"status":"succ","head":["name","created_time","ntables","vgroups","replica","days","keep1,keep2,keep(D)","maxtables","cache(MB)","blocks","minrows","maxrows","ctime(Sec.)","wallevel","comp","precision","status"],"data":[],"rows":0}@ then
|
||||
if $system_content != @{"status":"succ","head":["name","created_time","ntables","vgroups","replica","days","keep1,keep2,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","comp","precision","status"],"data":[],"rows":0}@ then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
|
|
@ -128,12 +128,12 @@ endi
|
|||
if $data06 != 365,365,365 then
|
||||
return -1
|
||||
endi
|
||||
print data08 = $data08
|
||||
if $data08 != $cache then
|
||||
print expect $cache, actual:$data08
|
||||
print data07 = $data07
|
||||
if $data07 != $cache then
|
||||
print expect $cache, actual:$data07
|
||||
return -1
|
||||
endi
|
||||
if $data09 != 4 then
|
||||
if $data08 != 4 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@ system sh/stop_dnodes.sh
|
|||
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 3000
|
||||
sql connect
|
||||
|
|
|
@ -2,6 +2,7 @@ system sh/stop_dnodes.sh
|
|||
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 3000
|
||||
sql connect
|
||||
|
|
|
@ -2,6 +2,7 @@ system sh/stop_dnodes.sh
|
|||
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 3000
|
||||
sql connect
|
||||
|
|
|
@ -63,6 +63,11 @@ if $data41 != 9 then
|
|||
endi
|
||||
|
||||
sql select * from $stb order by ts asc limit 5
|
||||
print select * from $stb order by ts asc limit 5
|
||||
print $data00 $data01
|
||||
print $data10 $data11
|
||||
print $data20 $data21
|
||||
|
||||
if $rows != 5 then
|
||||
return -1
|
||||
endi
|
||||
|
|
|
@ -356,6 +356,11 @@ if $rows != 0 then
|
|||
return -1
|
||||
endi
|
||||
sql select top(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu order by ts desc limit 3 offset 1
|
||||
print select top(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu order by ts desc limit 3 offset 1
|
||||
print $data00 $data01
|
||||
print $data10 $data11
|
||||
print $data20 $data21
|
||||
|
||||
if $rows != 3 then
|
||||
return -1
|
||||
endi
|
||||
|
|
|
@ -2,6 +2,7 @@ system sh/stop_dnodes.sh
|
|||
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 3000
|
||||
sql connect
|
||||
|
|
|
@ -1,212 +0,0 @@
|
|||
system sh/stop_dnodes.sh
|
||||
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||
system sh/cfg.sh -n dnode1 -c tableMetaKeepTimer -v 5
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 3000
|
||||
sql connect
|
||||
print ======================== stream.sim
|
||||
sleep 2000
|
||||
$db = strm_db
|
||||
$tb = tb
|
||||
$mt = mt
|
||||
$strm = strm
|
||||
$tbNum = 10
|
||||
$rowNum = 20
|
||||
$totalNum = 200
|
||||
|
||||
print =============== set up DB
|
||||
$i = 0
|
||||
|
||||
sql drop database if exists $db
|
||||
sql create database $db
|
||||
sql use $db
|
||||
|
||||
|
||||
## [TBASE300]
|
||||
print ====== TBASE-300
|
||||
sql create table mt (ts timestamp, c1 int, c2 int) tags(t1 int)
|
||||
sql create table tb1 using mt tags(1)
|
||||
sql create table tb2 using mt tags(2)
|
||||
sql create table strm as select count(*), avg(c1), sum(c2), max(c1), min(c2),first(c1), last(c2) from mt interval(4s) sliding(2s)
|
||||
sleep 10000
|
||||
sql insert into tb2 values(now,1,1)
|
||||
sql insert into tb1 values(now,1,1)
|
||||
sleep 4000
|
||||
sql select * from mt
|
||||
sql select * from strm
|
||||
sql drop table tb1
|
||||
sleep 100000
|
||||
sql select * from strm
|
||||
if $rows != 2 then
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
endi
|
||||
sql drop table tb2
|
||||
sql drop table mt
|
||||
sql drop table strm
|
||||
|
||||
## [TBASE304]
|
||||
print ====== TBASE-304
|
||||
sleep 10000
|
||||
# we cannot reset query cache in server side, as a workaround,
|
||||
# set super table name to mt304, need to change back to mt later
|
||||
print create mt304
|
||||
sql create table mt304 (ts timestamp, c1 int) tags(t1 int, t2 int)
|
||||
print create tb1
|
||||
sql create table tb1 using mt304 tags(1, 1)
|
||||
print create tb2
|
||||
sql create table tb2 using mt304 tags(1, -1)
|
||||
print create strm
|
||||
sql create table strm as select count(*), avg(c1) from mt304 where t2 >= 0 interval(4s) sliding(2s)
|
||||
sql insert into tb1 values (now,1)
|
||||
sql insert into tb2 values (now,2)
|
||||
sleep 100000
|
||||
sql select * from strm;
|
||||
if $rows != 2 then
|
||||
print ==== expect rows = 2, actually returned rows = $rows
|
||||
return -1
|
||||
endi
|
||||
if $data01 != 1 then
|
||||
return -1
|
||||
endi
|
||||
print data02 = $data02
|
||||
if $data02 != 1.000000000 then
|
||||
return -1
|
||||
endi
|
||||
sql alter table mt304 drop tag t2;
|
||||
sql insert into tb2 values (now,2);
|
||||
sql insert into tb1 values (now,1);
|
||||
sql select * from strm;
|
||||
sql alter table mt304 add tag t2 int;
|
||||
sleep 10000
|
||||
sql select * from strm
|
||||
|
||||
print ================= create a stream with a wildcard filter on tags of a STable
|
||||
sql drop database $db
|
||||
sql create database $db
|
||||
sql use $db
|
||||
sql create table stb (ts timestamp, c1 int, c2 binary(10)) tags(t1 binary(10))
|
||||
sql create table tb1 using stb tags('a1')
|
||||
sql create table tb2 using stb tags('b2')
|
||||
sql create table tb3 using stb tags('a3')
|
||||
sql create table strm as select count(*), avg(c1), first(c2) from stb where t1 like 'a%' interval(4s) sliding(2s)
|
||||
sleep 11000
|
||||
sql insert into tb1 values (now, 0, 'tb1')
|
||||
sleep 4000
|
||||
sql insert into tb2 values (now, 2, 'tb2')
|
||||
sleep 4000
|
||||
sql insert into tb3 values (now, 0, 'tb3')
|
||||
sleep 60000
|
||||
|
||||
sql describe strm
|
||||
if $rows == 0 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select * from strm
|
||||
sleep 1000
|
||||
print ======== data0: $data00 $data01 $data02 $data03
|
||||
print ======== data1: $data10 $data11 $data12 $data13
|
||||
print ======== data2: $data20 $data21 $data22 $data23
|
||||
print ======== data3: $data30 $data31 $data32 $data33
|
||||
if $rows != 4 then
|
||||
print ==== expect rows = 4, actually returned rows = $rows
|
||||
return -1
|
||||
endi
|
||||
if $data02 != 0.000000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data03 == tb2 then
|
||||
return -1
|
||||
endi
|
||||
if $data13 == tb2 then
|
||||
return -1
|
||||
endi
|
||||
if $data23 == tb2 then
|
||||
return -1
|
||||
endi
|
||||
if $data33 == tb2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
## The vnode client needs to refresh metadata cache to allow strm calculate tb4's data. But the current refreshing frequency is every 10 min
|
||||
## commented out the case below to save running time
|
||||
sql create table tb4 using stb tags('a4')
|
||||
sql insert into tb4 values(now, 4, 'tb4')
|
||||
sleep 60000
|
||||
sql select * from strm order by ts desc
|
||||
print ======== data0: $data00 $data01 $data02 $data03
|
||||
#print ======== data1: $data10 $data11 $data12 $data13
|
||||
#print ======== data2: $data20 $data21 $data22 $data23
|
||||
#print ======== data3: $data30 $data31 $data32 $data33
|
||||
if $rows != 6 then
|
||||
print ==== expect rows = 6, actually returned rows = $rows
|
||||
return -1
|
||||
endi
|
||||
if $data02 != 4.000000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data03 != tb4 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print =============== change tag values to see if stream still works correctly
|
||||
sql alter table tb4 set tag t1='b4'
|
||||
sleep 3000 # waiting for new tag valid
|
||||
sql insert into tb1 values (now, 1, 'tb1_a1')
|
||||
sleep 4000
|
||||
sql insert into tb4 values (now, -4, 'tb4_b4')
|
||||
sleep 100000
|
||||
sql select * from strm order by ts desc
|
||||
sleep 1000
|
||||
print ======== data0: $data00 $data01 $data02 $data03
|
||||
#print ======== data1: $data10 $data11 $data12 $data13
|
||||
#print ======== data2: $data20 $data21 $data22 $data23
|
||||
#print ======== data3: $data30 $data31 $data32 $data33
|
||||
if $rows != 8 then
|
||||
print ==== expect rows = 8, actually returned rows = $rows
|
||||
return -1
|
||||
endi
|
||||
if $data02 != 1.000000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data03 != tb1_a1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql drop database if exists $db
|
||||
sql drop database if exists strm_db_0
|
||||
sql create database strm_db_0
|
||||
sql use strm_db_0
|
||||
|
||||
sql create table stb (ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 binary(15), c6 nchar(15), c7 bool) tags(t1 int, t2 binary(15))
|
||||
sql create table tb0 using stb tags(0, 'tb0')
|
||||
sql create table tb1 using stb tags(1, 'tb1')
|
||||
sql create table tb2 using stb tags(2, 'tb2')
|
||||
sql create table tb3 using stb tags(3, 'tb3')
|
||||
sql create table tb4 using stb tags(4, 'tb4')
|
||||
|
||||
sql create table strm0 as select count(ts), count(c1), max(c2), min(c4), first(c5), last(c6) from stb where ts < now + 30s interval(4s) sliding(2s)
|
||||
sleep 1000
|
||||
sql insert into tb0 values (now, 0, 0, 0, 0, 'binary0', '涛思0', true) tb1 values (now, 1, 1, 1, 1, 'binary1', '涛思1', false) tb2 values (now, 2, 2, 2, 2, 'binary2', '涛思2', true) tb3 values (now, 3, 3, 3, 3, 'binary3', '涛思3', false) tb4 values (now, 4, 4, 4, 4, 'binary4', '涛思4', true)
|
||||
sleep 30000
|
||||
sql select * from strm0 order by ts desc
|
||||
sleep 1000
|
||||
if $rows != 2 then
|
||||
print ==== expect rows = 2, actually returned rows = $rows
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql insert into tb0 values (now, 10, 10, 10, 10, 'binary0', '涛思0', true) tb1 values (now, 11, 11, 11, 11, 'binary1', '涛思1', false) tb2 values (now, 12, 12, 12, 12, 'binary2', '涛思2', true) tb3 values (now, 13, 13, 13, 13, 'binary3', '涛思3', false) tb4 values (now, 14, 14, 14, 14, 'binary4', '涛思4', true)
|
||||
sleep 30000
|
||||
sql select * from strm0 order by ts desc
|
||||
sleep 10000
|
||||
if $rows == 4 then
|
||||
print ==== actually returned rows = $rows, expect always not equal to 4
|
||||
return -1
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -1,287 +0,0 @@
|
|||
system sh/stop_dnodes.sh
|
||||
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
sleep 3000
|
||||
sql connect
|
||||
|
||||
print ======================== dnode1 start
|
||||
|
||||
$dbPrefix = m1_db
|
||||
$tbPrefix = m1_tb
|
||||
$mtPrefix = m1_mt
|
||||
$stPrefix = m1_st
|
||||
$tbNum = 10
|
||||
$rowNum = 20
|
||||
$totalNum = 200
|
||||
|
||||
print =============== step1
|
||||
$i = 0
|
||||
$db = $dbPrefix . $i
|
||||
$mt = $mtPrefix . $i
|
||||
$st = $stPrefix . $i
|
||||
|
||||
sql drop databae $db -x step1
|
||||
step1:
|
||||
sql create database $db
|
||||
sql use $db
|
||||
sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
|
||||
|
||||
$i = 0
|
||||
while $i < $tbNum
|
||||
$tb = $tbPrefix . $i
|
||||
sql create table $tb using $mt tags( $i )
|
||||
|
||||
$x = -1440
|
||||
$y = 0
|
||||
while $y < $rowNum
|
||||
$ms = $x . m
|
||||
sql insert into $tb values (now $ms , $y , $y )
|
||||
$x = $x + 1
|
||||
$y = $y + 1
|
||||
endw
|
||||
|
||||
$i = $i + 1
|
||||
endw
|
||||
|
||||
sleep 100
|
||||
|
||||
print =============== step2 c1
|
||||
$i = 0
|
||||
$mt = $mtPrefix . $i
|
||||
|
||||
sql select count(*) from $mt interval(1d)
|
||||
print ===> $data00 $data01
|
||||
if $data01 != 200 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
$st = $stPrefix . c1
|
||||
sql create table $st as select count(*) from $mt interval(1d)
|
||||
|
||||
print =============== step3 c2
|
||||
sql select count(tbcol) from $mt interval(1d)
|
||||
print ===> $data00 $data01
|
||||
if $data01 != 200 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
$st = $stPrefix . c2
|
||||
sql create table $st as select count(tbcol) from $mt interval(1d)
|
||||
|
||||
print =============== step4 c3
|
||||
sql select count(tbcol2) from $mt interval(1d)
|
||||
print ===> $data00 $data01
|
||||
if $data01 != 200 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
$st = $stPrefix . c3
|
||||
sql create table $st as select count(tbcol2) from $mt interval(1d)
|
||||
|
||||
print =============== step5 avg
|
||||
sql select avg(tbcol) from $mt interval(1d)
|
||||
print ===> $data00 $data01
|
||||
if $data01 != 9.500000000 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
$st = $stPrefix . av
|
||||
print create table $st as select avg(tbcol) from $mt interval(1d)
|
||||
sql create table $st as select avg(tbcol) from $mt interval(1d)
|
||||
|
||||
print =============== step6 su
|
||||
sql select sum(tbcol) from $mt interval(1d)
|
||||
print ===> $data00 $data01
|
||||
if $data01 != 1900 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
$st = $stPrefix . su
|
||||
sql create table $st as select sum(tbcol) from $mt interval(1d)
|
||||
|
||||
print =============== step7 mi
|
||||
sql select min(tbcol) from $mt interval(1d)
|
||||
print ===> $data00 $data01
|
||||
if $data01 != 0 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
$st = $stPrefix . mi
|
||||
sql create table $st as select min(tbcol) from $mt interval(1d)
|
||||
|
||||
print =============== step8 ma
|
||||
sql select max(tbcol) from $mt interval(1d)
|
||||
print ===> $data00 $data01
|
||||
if $data01 != 19 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
$st = $stPrefix . ma
|
||||
sql create table $st as select max(tbcol) from $mt interval(1d)
|
||||
|
||||
print =============== step9 fi
|
||||
sql select first(tbcol) from $mt interval(1d)
|
||||
print ===> $data00 $data01
|
||||
if $data01 != 0 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
$st = $stPrefix . fi
|
||||
sql create table $st as select first(tbcol) from $mt interval(1d)
|
||||
|
||||
print =============== step10 la
|
||||
sql select last(tbcol) from $mt interval(1d)
|
||||
print ===> $data00 $data01
|
||||
if $data01 != 19 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
$st = $stPrefix . la
|
||||
sql create table $st as select last(tbcol) from $mt interval(1d)
|
||||
|
||||
print =============== step11 st
|
||||
sql select stddev(tbcol) from $mt interval(1d) -x step11
|
||||
return -1
|
||||
step11:
|
||||
|
||||
print =============== step12 le
|
||||
sql select leastsquares(tbcol, 1, 1) from $mt interval(1d) -x step12
|
||||
return -1
|
||||
step12:
|
||||
|
||||
print =============== step13
|
||||
sql select top(tbcol, 1) from $mt interval(1d)
|
||||
|
||||
print =============== step14
|
||||
|
||||
sql select bottom(tbcol, 1) from $mt interval(1d)
|
||||
|
||||
print =============== step15 pe
|
||||
|
||||
sql select percentile(tbcol, 1) from $mt interval(1d) -x step15
|
||||
return -1
|
||||
step15:
|
||||
|
||||
print =============== step16
|
||||
sql select diff(tbcol) from $mt interval(1d) -x step16
|
||||
return -1
|
||||
step16:
|
||||
|
||||
print =============== step17 wh
|
||||
sql select count(tbcol) from $mt where ts < now + 4m interval(1d)
|
||||
print ===> $data00 $data01
|
||||
if $data01 != 200 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
$st = $stPrefix . wh
|
||||
#sql create table $st as select count(tbcol) from $mt where ts < now + 4m interval(1d)
|
||||
|
||||
print =============== step18 as
|
||||
sql select count(tbcol) from $mt interval(1d)
|
||||
print ===> $data00 $data01
|
||||
if $data01 != 200 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
$st = $stPrefix . as
|
||||
sql create table $st as select count(tbcol) as c from $mt interval(1d)
|
||||
|
||||
print =============== step19 gb
|
||||
sql select count(tbcol) from $mt interval(1d) group by tgcol
|
||||
print ===> $data00 $data01
|
||||
if $data01 != 20 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print =============== step20 x
|
||||
sql select count(tbcol) from $mt where ts < now + 4m interval(1d) group by tgcol
|
||||
print ===> $data00 $data01
|
||||
if $data01 != 20 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print =============== step21
|
||||
print sleep 120 seconds
|
||||
sleep 120000
|
||||
|
||||
print =============== step22
|
||||
$st = $stPrefix . c1
|
||||
sql select * from $st
|
||||
print ===> select * from $st ===> $data00 $data01
|
||||
if $data01 != 200 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
$st = $stPrefix . c2
|
||||
sql select * from $st
|
||||
print ===> select * from $st ===> $data00 $data01
|
||||
if $data01 != 200 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
$st = $stPrefix . c3
|
||||
sql select * from $st
|
||||
print ===> select * from $st ===> $data00 $data01
|
||||
if $data01 != 200 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
$st = $stPrefix . av
|
||||
sql select * from $st
|
||||
print ===> select * from $st ===> $data00 $data01
|
||||
if $data01 != 9.500000000 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
$st = $stPrefix . su
|
||||
sql select * from $st
|
||||
print ===> select * from $st ===> $data00 $data01
|
||||
if $data01 != 1900 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
$st = $stPrefix . mi
|
||||
sql select * from $st
|
||||
print ===> select * from $st ===> $data00 $data01
|
||||
if $data01 != 0 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
$st = $stPrefix . ma
|
||||
sql select * from $st
|
||||
print ===> select * from $st ===> $data00 $data01
|
||||
if $data01 != 19 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
$st = $stPrefix . fi
|
||||
sql select * from $st
|
||||
print ===> select * from $st ===> $data00 $data01
|
||||
if $data01 != 0 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
$st = $stPrefix . la
|
||||
sql select * from $st
|
||||
print ===> select * from $st ===> $data00 $data01
|
||||
if $data01 != 19 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
#$st = $stPrefix . wh
|
||||
#sql select * from $st
|
||||
#print ===> select * from $st ===> $data00 $data01
|
||||
#if $data01 != 200 then
|
||||
# return -1
|
||||
#endi
|
||||
|
||||
$st = $stPrefix . as
|
||||
sql select * from $st
|
||||
print ===> select * from $st ===> $data00 $data01
|
||||
if $data01 != 200 then
|
||||
return -1
|
||||
endi
|
|
@ -4,7 +4,8 @@ system sh/stop_dnodes.sh
|
|||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 8
|
||||
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
|
||||
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 1000
|
||||
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 3
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
sleep 3000
|
||||
|
|
|
@ -1,106 +0,0 @@
|
|||
system sh/stop_dnodes.sh
|
||||
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||
system sh/cfg.sh -n dnode1 -c tableMetaKeepTimer -v 10
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
sleep 3000
|
||||
sql connect
|
||||
print ======================== dnode1 start
|
||||
|
||||
$dbPrefix = ns_db
|
||||
$tbPrefix = ns_tb
|
||||
$mtPrefix = ns_mt
|
||||
$stPrefix = ns_st
|
||||
$tbNum = 5
|
||||
$rowNum = 200
|
||||
$totalNum = 200
|
||||
|
||||
print =============== step1
|
||||
|
||||
$i = 0
|
||||
$db = $dbPrefix
|
||||
$mt = $mtPrefix
|
||||
$st = $stPrefix
|
||||
|
||||
sql create database $db
|
||||
sql use $db
|
||||
sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
|
||||
|
||||
$i = 0
|
||||
while $i < $tbNum
|
||||
$tb = $tbPrefix . $i
|
||||
sql create table $tb using $mt tags( $i )
|
||||
|
||||
$x = 0
|
||||
$y = 0
|
||||
while $y < $rowNum
|
||||
$ms = $x . s
|
||||
sql insert into $tb values (now + $ms , $y , $y )
|
||||
$x = $x + 1
|
||||
$y = $y + 1
|
||||
endw
|
||||
|
||||
$i = $i + 1
|
||||
endw
|
||||
|
||||
sleep 100
|
||||
|
||||
print =============== step2
|
||||
|
||||
sql select count(*), count(tbcol), count(tbcol2) from $mt interval(10s)
|
||||
print $data00 $data01 $data02 $data03
|
||||
|
||||
sql create table $st as select count(*), count(tbcol), count(tbcol2) from $mt interval(10s)
|
||||
|
||||
print =============== step3
|
||||
print sleep 120 seconds
|
||||
sleep 120000
|
||||
|
||||
print =============== step4
|
||||
|
||||
sql select * from $st
|
||||
print $st ==> $rows1 $data00 $data01 $data02 $data03
|
||||
if $data03 >= 51 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print =============== step5
|
||||
|
||||
$tbNum = 10
|
||||
while $i < $tbNum
|
||||
$tb = $tbPrefix . $i
|
||||
sql create table $tb using $mt tags( $i )
|
||||
if $i == 0 then
|
||||
sleep 2000
|
||||
endi
|
||||
|
||||
$x = 0
|
||||
$y = 0
|
||||
while $y < $rowNum
|
||||
$ms = $x . s
|
||||
sql insert into $tb values (now + $ms , $y , $y )
|
||||
$x = $x + 1
|
||||
$y = $y + 1
|
||||
endw
|
||||
|
||||
$i = $i + 1
|
||||
endw
|
||||
|
||||
print =============== step6
|
||||
print sleep 120 seconds
|
||||
sleep 120000
|
||||
|
||||
print =============== step7
|
||||
|
||||
sql select * from $st order by ts desc
|
||||
print $st ==> $rows1 $data00 $data01 $data02 $data03
|
||||
if $data03 <= 51 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
@ -1,206 +0,0 @@
|
|||
system sh/stop_dnodes.sh
|
||||
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
sleep 3000
|
||||
sql connect
|
||||
|
||||
print ======================== dnode1 start
|
||||
|
||||
$dbPrefix = s1_db
|
||||
$tbPrefix = s1_tb
|
||||
$mtPrefix = s1_mt
|
||||
$stPrefix = s1_st
|
||||
$tbNum = 10
|
||||
$rowNum = 20
|
||||
$totalNum = 200
|
||||
|
||||
print =============== step1
|
||||
$i = 0
|
||||
$db = $dbPrefix . $i
|
||||
$mt = $mtPrefix . $i
|
||||
$st = $stPrefix . $i
|
||||
|
||||
sql drop databae $db -x step1
|
||||
step1:
|
||||
sql create database $db
|
||||
sql use $db
|
||||
sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
|
||||
|
||||
$i = 0
|
||||
while $i < $tbNum
|
||||
$tb = $tbPrefix . $i
|
||||
sql create table $tb using $mt tags( $i )
|
||||
|
||||
$x = -1440
|
||||
$y = 0
|
||||
while $y < $rowNum
|
||||
$ms = $x . m
|
||||
sql insert into $tb values (now $ms , $y , $y )
|
||||
$x = $x + 1
|
||||
$y = $y + 1
|
||||
endw
|
||||
|
||||
$i = $i + 1
|
||||
endw
|
||||
|
||||
sleep 100
|
||||
|
||||
print =============== step2
|
||||
$i = 0
|
||||
$tb = $tbPrefix . $i
|
||||
$st = $stPrefix . $i
|
||||
|
||||
sql select count(*), count(tbcol), count(tbcol2) from $tb interval(1d)
|
||||
print select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) ===> $data00 $data01 $data02, $data03
|
||||
if $data01 != $rowNum then
|
||||
return -1
|
||||
endi
|
||||
if $data02 != $rowNum then
|
||||
return -1
|
||||
endi
|
||||
if $data03 != $rowNum then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql show tables
|
||||
if $rows != 10 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(1d)
|
||||
|
||||
sql show tables
|
||||
if $rows != 11 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print =============== step3
|
||||
print sleep 120 seconds
|
||||
sleep 120000
|
||||
sql select * from $st
|
||||
print select * from $st => $data01
|
||||
if $data01 != 20 then
|
||||
return -1
|
||||
endi
|
||||
if $data02 != 20 then
|
||||
return -1
|
||||
endi
|
||||
if $data03 != 20 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print =============== step4
|
||||
sql drop table $st
|
||||
sql show tables
|
||||
if $rows != 10 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print =============== step5
|
||||
sql select * from $st -x step4
|
||||
return -1
|
||||
step4:
|
||||
|
||||
print =============== step6
|
||||
sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(1d)
|
||||
sql show tables
|
||||
if $rows != 11 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print =============== step7
|
||||
print sleep 120 seconds
|
||||
sleep 120000
|
||||
sql select * from $st
|
||||
print select * from $st => $data01
|
||||
if $data01 != 20 then
|
||||
return -1
|
||||
endi
|
||||
if $data02 != 20 then
|
||||
return -1
|
||||
endi
|
||||
if $data03 != 20 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print =============== step8
|
||||
$i = 1
|
||||
$st = $stPrefix . $i
|
||||
|
||||
sql select count(*), count(tbcol), count(tbcol2) from $mt interval(1d)
|
||||
print select count(*), count(tbcol), count(tbcol2) from $mt interval(1d) ===> $data00 $data01 $data02, $data03
|
||||
if $data01 != 200 then
|
||||
return -1
|
||||
endi
|
||||
if $data02 != 200 then
|
||||
return -1
|
||||
endi
|
||||
if $data03 != 200 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql show tables
|
||||
if $rows != 11 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql create table $st as select count(*), count(tbcol), count(tbcol2) from $mt interval(1d)
|
||||
|
||||
sql show tables
|
||||
if $rows != 12 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print =============== step9
|
||||
print sleep 120 seconds
|
||||
sleep 120000
|
||||
sql select * from $st
|
||||
print select * from $st => $data01
|
||||
if $data01 != 200 then
|
||||
return -1
|
||||
endi
|
||||
if $data02 != 200 then
|
||||
return -1
|
||||
endi
|
||||
if $data03 != 200 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print =============== step10
|
||||
sql drop table $st
|
||||
sql show tables
|
||||
if $rows != 11 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print =============== step11
|
||||
sql select * from $st -x step10
|
||||
return -1
|
||||
step10:
|
||||
|
||||
print =============== step12
|
||||
sql create table $st as select count(*), count(tbcol), count(tbcol2) from $mt interval(1d)
|
||||
|
||||
sql show tables
|
||||
if $rows != 12 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print =============== step13
|
||||
print sleep 120 seconds
|
||||
sleep 120000
|
||||
sql select * from $st
|
||||
print select * from $st => $data01
|
||||
if $data01 != 200 then
|
||||
return -1
|
||||
endi
|
||||
if $data02 != 200 then
|
||||
return -1
|
||||
endi
|
||||
if $data03 != 200 then
|
||||
return -1
|
||||
endi
|
||||
|
|
@ -1,194 +0,0 @@
|
|||
system sh/stop_dnodes.sh
|
||||
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
sleep 3000
|
||||
sql connect
|
||||
|
||||
print ======================== dnode1 start
|
||||
|
||||
$dbPrefix = s2_db
|
||||
$tbPrefix = s2_tb
|
||||
$mtPrefix = s2_mt
|
||||
$stPrefix = s2_st
|
||||
$tbNum = 10
|
||||
$rowNum = 20
|
||||
$totalNum = 200
|
||||
|
||||
print =============== step1
|
||||
$i = 0
|
||||
$db = $dbPrefix . $i
|
||||
$mt = $mtPrefix . $i
|
||||
$st = $stPrefix . $i
|
||||
|
||||
sql drop databae $db -x step1
|
||||
step1:
|
||||
sql create database $db
|
||||
sql use $db
|
||||
sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
|
||||
|
||||
$i = 0
|
||||
while $i < $tbNum
|
||||
$tb = $tbPrefix . $i
|
||||
sql create table $tb using $mt tags( $i )
|
||||
|
||||
$x = -1440
|
||||
$y = 0
|
||||
while $y < $rowNum
|
||||
$ms = $x . m
|
||||
sql insert into $tb values (now $ms , $y , $y )
|
||||
$x = $x + 1
|
||||
$y = $y + 1
|
||||
endw
|
||||
|
||||
$i = $i + 1
|
||||
endw
|
||||
|
||||
sleep 100
|
||||
|
||||
print =============== step2
|
||||
$i = 0
|
||||
$tb = $tbPrefix . $i
|
||||
$st = $stPrefix . $i
|
||||
|
||||
sql select count(tbcol) from $tb interval(1d)
|
||||
print select count(tbcol) from $tb interval(1d) ===> $data00 $data01 $data02, $data03
|
||||
if $data01 != $rowNum then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql show tables
|
||||
if $rows != 10 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql create table $st as select count(tbcol) from $tb interval(1d)
|
||||
|
||||
sql show tables
|
||||
if $rows != 11 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print =============== step3
|
||||
print sleep 120 seconds
|
||||
sleep 120000
|
||||
sql select * from $st
|
||||
print select * from $st => $data01
|
||||
if $data01 != 20 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print =============== step4
|
||||
sql drop table $st
|
||||
sql show tables
|
||||
if $rows != 10 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print =============== step5
|
||||
sql select * from $st -x step4
|
||||
return -1
|
||||
step4:
|
||||
|
||||
print =============== step6
|
||||
sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(1d)
|
||||
sql show tables
|
||||
if $rows != 11 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print =============== step7
|
||||
print sleep 120 seconds
|
||||
sleep 120000
|
||||
sql select * from $st
|
||||
print select * from $st => $data01
|
||||
if $data01 != 20 then
|
||||
return -1
|
||||
endi
|
||||
if $data02 != 20 then
|
||||
return -1
|
||||
endi
|
||||
if $data03 != 20 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print =============== step8
|
||||
$i = 1
|
||||
$st = $stPrefix . $i
|
||||
|
||||
sql select count(*), count(tbcol), count(tbcol2) from $mt interval(1d)
|
||||
print select count(*), count(tbcol), count(tbcol2) from $mt interval(1d) ===> $data00 $data01 $data02, $data03
|
||||
if $data01 != 200 then
|
||||
return -1
|
||||
endi
|
||||
if $data02 != 200 then
|
||||
return -1
|
||||
endi
|
||||
if $data03 != 200 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql show tables
|
||||
if $rows != 11 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql create table $st as select count(*), count(tbcol), count(tbcol2) from $mt interval(1d)
|
||||
|
||||
sql show tables
|
||||
if $rows != 12 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print =============== step9
|
||||
print sleep 120 seconds
|
||||
sleep 120000
|
||||
sql select * from $st
|
||||
print select * from $st => $data01 $data02, $data03
|
||||
if $data01 != 200 then
|
||||
return -1
|
||||
endi
|
||||
if $data02 != 200 then
|
||||
return -1
|
||||
endi
|
||||
if $data03 != 200 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print =============== step10
|
||||
sql drop table $st
|
||||
sql show tables
|
||||
if $rows != 11 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print =============== step11
|
||||
sql select * from $st -x step10
|
||||
return -1
|
||||
step10:
|
||||
|
||||
print =============== step12
|
||||
sql create table $st as select count(tbcol) from $mt interval(1d)
|
||||
|
||||
sql show tables
|
||||
if $rows != 12 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print =============== step13
|
||||
print sleep 120 seconds
|
||||
sleep 120000
|
||||
sql select * from $st
|
||||
print select * from $st => $data01 $data02, $data03
|
||||
if $data01 != 200 then
|
||||
return -1
|
||||
endi
|
||||
if $data02 != null then
|
||||
return -1
|
||||
endi
|
||||
if $data03 != null then
|
||||
return -1
|
||||
endi
|
||||
|
|
@ -4,7 +4,8 @@ system sh/stop_dnodes.sh
|
|||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 8
|
||||
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
|
||||
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 1000
|
||||
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 3
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
sleep 3000
|
||||
|
|
|
@ -1,9 +1,6 @@
|
|||
run general/stream/stream_1.sim
|
||||
run general/stream/stream_2.sim
|
||||
run general/stream/stream_3.sim
|
||||
run general/stream/stream_restart.sim
|
||||
run general/stream/table_1.sim
|
||||
run general/stream/metrics_1.sim
|
||||
run general/stream/table_n.sim
|
||||
run general/stream/metrics_n.sim
|
||||
run general/stream/table_del.sim
|
||||
|
|
|
@ -3,6 +3,7 @@ system sh/deploy.sh -n dnode1 -i 1
|
|||
system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 8
|
||||
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 129
|
||||
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 8
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
sleep 3000
|
||||
|
@ -36,7 +37,7 @@ while $x > $y
|
|||
endw
|
||||
|
||||
sql show vgroups
|
||||
if $rows != 2 then
|
||||
if $rows != 8 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
@ -51,7 +52,7 @@ while $x > $y
|
|||
endw
|
||||
|
||||
sql show vgroups
|
||||
if $rows != 4 then
|
||||
if $rows != 8 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
@ -66,7 +67,7 @@ while $x > $y
|
|||
endw
|
||||
|
||||
sql show vgroups
|
||||
if $rows != 6 then
|
||||
if $rows != 8 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
|
|
@ -117,8 +117,6 @@ cd ../../../debug; make
|
|||
./test.sh -f general/parser/import_commit3.sim
|
||||
./test.sh -f general/parser/insert_tb.sim
|
||||
./test.sh -f general/parser/first_last.sim
|
||||
# dyh is processing this script
|
||||
#./test.sh -f general/parser/import_file.sim
|
||||
./test.sh -f general/parser/lastrow.sim
|
||||
./test.sh -f general/parser/nchar.sim
|
||||
./test.sh -f general/parser/null_char.sim
|
||||
|
@ -145,7 +143,6 @@ cd ../../../debug; make
|
|||
./test.sh -f general/parser/groupby.sim
|
||||
./test.sh -f general/parser/set_tag_vals.sim
|
||||
#./test.sh -f general/parser/sliding.sim
|
||||
./test.sh -f general/parser/tags_dynamically_specifiy.sim
|
||||
./test.sh -f general/parser/tags_filter.sim
|
||||
./test.sh -f general/parser/slimit_alter_tags.sim
|
||||
./test.sh -f general/parser/join.sim
|
||||
|
@ -313,17 +310,12 @@ cd ../../../debug; make
|
|||
|
||||
# stream still has bugs
|
||||
#./test.sh -f general/parser/stream_on_sys.sim
|
||||
#./test.sh -f general/parser/stream.sim
|
||||
#./test.sh -f general/parser/repeatStream.sim
|
||||
#./test.sh -f general/stream/new_stream.sim
|
||||
|
||||
./test.sh -f general/stream/metrics_1.sim
|
||||
./test.sh -f general/stream/metrics_del.sim
|
||||
#./test.sh -f general/stream/metrics_del.sim
|
||||
./test.sh -f general/stream/metrics_n.sim
|
||||
./test.sh -f general/stream/metrics_replica1_vnoden.sim
|
||||
#./test.sh -f general/stream/metrics_replica1_vnoden.sim
|
||||
./test.sh -f general/stream/restart_stream.sim
|
||||
./test.sh -f general/stream/stream_1.sim
|
||||
./test.sh -f general/stream/stream_2.sim
|
||||
./test.sh -f general/stream/stream_3.sim
|
||||
./test.sh -f general/stream/stream_restart.sim
|
||||
./test.sh -f general/stream/table_1.sim
|
||||
|
@ -332,7 +324,7 @@ cd ../../../debug; make
|
|||
./test.sh -f general/stream/table_replica1_vnoden.sim
|
||||
|
||||
./test.sh -f unique/arbitrator/check_cluster_cfg_para.sim
|
||||
./test.sh -f unique/arbitrator/dn2_mn1_cache_file_sync.sim
|
||||
#./test.sh -f unique/arbitrator/dn2_mn1_cache_file_sync.sim
|
||||
./test.sh -f unique/arbitrator/dn3_mn1_full_createTableFail.sim
|
||||
./test.sh -f unique/arbitrator/dn3_mn1_full_dropDnodeFail.sim
|
||||
./test.sh -f unique/arbitrator/dn3_mn1_multiCreateDropTable.sim
|
||||
|
@ -366,6 +358,7 @@ cd ../../../debug; make
|
|||
./test.sh -f unique/arbitrator/replica_changeWithArbitrator.sim
|
||||
./test.sh -f unique/arbitrator/sync_replica2_alterTable_add.sim
|
||||
./test.sh -f unique/arbitrator/sync_replica2_alterTable_drop.sim
|
||||
|
||||
./test.sh -f unique/arbitrator/sync_replica2_dropDb.sim
|
||||
./test.sh -f unique/arbitrator/sync_replica2_dropTable.sim
|
||||
./test.sh -f unique/arbitrator/sync_replica3_alterTable_add.sim
|
||||
|
|
|
@ -148,7 +148,6 @@ cd ../../../debug; make
|
|||
./test.sh -f general/parser/binary_escapeCharacter.sim
|
||||
./test.sh -f general/parser/bug.sim
|
||||
#./test.sh -f general/parser/stream_on_sys.sim
|
||||
./test.sh -f general/parser/stream.sim
|
||||
./test.sh -f general/parser/repeatAlter.sim
|
||||
#./test.sh -f general/parser/repeatStream.sim
|
||||
|
||||
|
|
|
@ -110,6 +110,7 @@ echo "second ${HOSTNAME}:7200" >> $TAOS_CFG
|
|||
echo "serverPort ${NODE}" >> $TAOS_CFG
|
||||
echo "dataDir $DATA_DIR" >> $TAOS_CFG
|
||||
echo "logDir $LOG_DIR" >> $TAOS_CFG
|
||||
echo "debugFlag 135" >> $TAOS_CFG
|
||||
echo "mDebugFlag 135" >> $TAOS_CFG
|
||||
echo "sdbDebugFlag 135" >> $TAOS_CFG
|
||||
echo "dDebugFlag 135" >> $TAOS_CFG
|
||||
|
@ -124,7 +125,6 @@ echo "mqttDebugFlag 131" >> $TAOS_CFG
|
|||
echo "qdebugFlag 135" >> $TAOS_CFG
|
||||
echo "rpcDebugFlag 135" >> $TAOS_CFG
|
||||
echo "tmrDebugFlag 131" >> $TAOS_CFG
|
||||
echo "cDebugFlag 135" >> $TAOS_CFG
|
||||
echo "udebugFlag 135" >> $TAOS_CFG
|
||||
echo "sdebugFlag 135" >> $TAOS_CFG
|
||||
echo "wdebugFlag 135" >> $TAOS_CFG
|
||||
|
@ -133,11 +133,13 @@ echo "monitorInterval 1" >> $TAOS_CFG
|
|||
echo "http 0" >> $TAOS_CFG
|
||||
echo "numOfThreadsPerCore 2.0" >> $TAOS_CFG
|
||||
echo "defaultPass taosdata" >> $TAOS_CFG
|
||||
echo "numOfLogLines 10000000" >> $TAOS_CFG
|
||||
echo "mnodeEqualVnodeNum 0" >> $TAOS_CFG
|
||||
echo "numOfLogLines 10000000" >> $TAOS_CFG
|
||||
echo "mnodeEqualVnodeNum 0" >> $TAOS_CFG
|
||||
echo "clog 2" >> $TAOS_CFG
|
||||
echo "statusInterval 1" >> $TAOS_CFG
|
||||
echo "numOfTotalVnodes 4" >> $TAOS_CFG
|
||||
echo "maxVgroupsPerDb 4" >> $TAOS_CFG
|
||||
echo "maxTablesPerVnode 1000" >> $TAOS_CFG
|
||||
echo "asyncLog 0" >> $TAOS_CFG
|
||||
echo "numOfMnodes 1" >> $TAOS_CFG
|
||||
echo "locale en_US.UTF-8" >> $TAOS_CFG
|
||||
|
|
|
@ -0,0 +1,113 @@
|
|||
#!/bin/bash
|
||||
|
||||
# if [ $# != 4 || $# != 5 ]; then
|
||||
# echo "argument list need input : "
|
||||
# echo " -n nodeName"
|
||||
# echo " -s start/stop"
|
||||
# echo " -c clear"
|
||||
# exit 1
|
||||
# fi
|
||||
|
||||
NODE_NAME=
|
||||
EXEC_OPTON=
|
||||
CLEAR_OPTION="false"
|
||||
while getopts "n:s:u:x:ct" arg
|
||||
do
|
||||
case $arg in
|
||||
n)
|
||||
NODE_NAME=$OPTARG
|
||||
;;
|
||||
s)
|
||||
EXEC_OPTON=$OPTARG
|
||||
;;
|
||||
c)
|
||||
CLEAR_OPTION="clear"
|
||||
;;
|
||||
t)
|
||||
SHELL_OPTION="true"
|
||||
;;
|
||||
u)
|
||||
USERS=$OPTARG
|
||||
;;
|
||||
x)
|
||||
SIGNAL=$OPTARG
|
||||
;;
|
||||
?)
|
||||
echo "unkown argument"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
SCRIPT_DIR=`dirname $0`
|
||||
cd $SCRIPT_DIR/../
|
||||
SCRIPT_DIR=`pwd`
|
||||
|
||||
IN_TDINTERNAL="community"
|
||||
if [[ "$SCRIPT_DIR" == *"$IN_TDINTERNAL"* ]]; then
|
||||
cd ../../..
|
||||
else
|
||||
cd ../../
|
||||
fi
|
||||
|
||||
TAOS_DIR=`pwd`
|
||||
TAOSD_DIR=`find . -name "taosd"|grep bin|head -n1`
|
||||
|
||||
if [[ "$TAOSD_DIR" == *"$IN_TDINTERNAL"* ]]; then
|
||||
BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' --fields=2,3`
|
||||
else
|
||||
BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' --fields=2`
|
||||
fi
|
||||
|
||||
BUILD_DIR=$TAOS_DIR/$BIN_DIR/build
|
||||
|
||||
SIM_DIR=$TAOS_DIR/sim
|
||||
NODE_DIR=$SIM_DIR/$NODE_NAME
|
||||
EXE_DIR=$BUILD_DIR/bin
|
||||
CFG_DIR=$NODE_DIR/cfg
|
||||
LOG_DIR=$NODE_DIR/log
|
||||
DATA_DIR=$NODE_DIR/data
|
||||
MGMT_DIR=$NODE_DIR/data/mgmt
|
||||
TSDB_DIR=$NODE_DIR/data/tsdb
|
||||
|
||||
TAOS_CFG=$NODE_DIR/cfg/taos.cfg
|
||||
|
||||
echo ------------ $EXEC_OPTON $NODE_NAME
|
||||
|
||||
TAOS_FLAG=$SIM_DIR/tsim/flag
|
||||
if [ -f "$TAOS_FLAG" ]; then
|
||||
EXE_DIR=/usr/local/bin/taos
|
||||
fi
|
||||
|
||||
if [ "$CLEAR_OPTION" = "clear" ]; then
|
||||
echo rm -rf $MGMT_DIR $TSDB_DIR
|
||||
rm -rf $TSDB_DIR
|
||||
rm -rf $MGMT_DIR
|
||||
fi
|
||||
|
||||
if [ "$EXEC_OPTON" = "start" ]; then
|
||||
echo "ExcuteCmd:" $EXE_DIR/taosd -c $CFG_DIR
|
||||
|
||||
if [ "$SHELL_OPTION" = "true" ]; then
|
||||
nohup valgrind --log-file=${LOG_DIR}/valgrind.log --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes $EXE_DIR/taosd -c $CFG_DIR > /dev/null 2>&1 &
|
||||
else
|
||||
nohup $EXE_DIR/taosd -c $CFG_DIR --random-file-fail-factor 5 > /dev/null 2>&1 &
|
||||
fi
|
||||
|
||||
else
|
||||
#relative path
|
||||
RCFG_DIR=sim/$NODE_NAME/cfg
|
||||
PID=`ps -ef|grep taosd | grep $RCFG_DIR | grep -v grep | awk '{print $2}'`
|
||||
while [ -n "$PID" ]
|
||||
do
|
||||
if [ "$SIGNAL" = "SIGINT" ]; then
|
||||
echo try to kill by signal SIGINT
|
||||
kill -SIGINT $PID
|
||||
else
|
||||
echo try to kill by signal SIGKILL
|
||||
kill -9 $PID
|
||||
fi
|
||||
sleep 1
|
||||
PID=`ps -ef|grep taosd | grep $RCFG_DIR | grep -v grep | awk '{print $2}'`
|
||||
done
|
||||
fi
|
||||
|
|
@ -1,7 +1,120 @@
|
|||
system sh/stop_dnodes.sh
|
||||
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/deploy.sh -n dnode2 -i 2
|
||||
system sh/deploy.sh -n dnode3 -i 3
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c walLevel -v 2
|
||||
system sh/cfg.sh -n dnode2 -c walLevel -v 2
|
||||
system sh/cfg.sh -n dnode3 -c walLevel -v 2
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3
|
||||
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3
|
||||
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 4
|
||||
system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 4
|
||||
system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 4
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 20
|
||||
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 20
|
||||
system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 20
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c http -v 1
|
||||
system sh/cfg.sh -n dnode2 -c http -v 1
|
||||
system sh/cfg.sh -n dnode3 -c http -v 1
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c mDebugFlag -v 143
|
||||
system sh/cfg.sh -n dnode2 -c mDebugFlag -v 143
|
||||
system sh/cfg.sh -n dnode3 -c mDebugFlag -v 143
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c sdbDebugFlag -v 143
|
||||
system sh/cfg.sh -n dnode2 -c sdbDebugFlag -v 143
|
||||
system sh/cfg.sh -n dnode3 -c sdbDebugFlag -v 143
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c sdebugFlag -v 143
|
||||
system sh/cfg.sh -n dnode2 -c sdebugFlag -v 143
|
||||
system sh/cfg.sh -n dnode3 -c sdebugFlag -v 143
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c rpcDebugFlag -v 135
|
||||
system sh/cfg.sh -n dnode2 -c rpcDebugFlag -v 135
|
||||
system sh/cfg.sh -n dnode3 -c rpcDebugFlag -v 135
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c tsdbDebugFlag -v 131
|
||||
system sh/cfg.sh -n dnode2 -c tsdbDebugFlag -v 131
|
||||
system sh/cfg.sh -n dnode3 -c tsdbDebugFlag -v 131
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c mqttDebugFlag -v 131
|
||||
system sh/cfg.sh -n dnode2 -c mqttDebugFlag -v 131
|
||||
system sh/cfg.sh -n dnode3 -c mqttDebugFlag -v 131
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c qdebugFlag -v 131
|
||||
system sh/cfg.sh -n dnode2 -c qdebugFlag -v 131
|
||||
system sh/cfg.sh -n dnode3 -c qdebugFlag -v 131
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c cDebugFlag -v 131
|
||||
system sh/cfg.sh -n dnode2 -c cDebugFlag -v 131
|
||||
system sh/cfg.sh -n dnode3 -c cDebugFlag -v 131
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c udebugFlag -v 131
|
||||
system sh/cfg.sh -n dnode2 -c udebugFlag -v 131
|
||||
system sh/cfg.sh -n dnode3 -c udebugFlag -v 131
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c wdebugFlag -v 131
|
||||
system sh/cfg.sh -n dnode2 -c wdebugFlag -v 131
|
||||
system sh/cfg.sh -n dnode3 -c wdebugFlag -v 131
|
||||
|
||||
print ============== deploy
|
||||
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 2001
|
||||
sql connect
|
||||
|
||||
sql create dnode $hostname2
|
||||
sql create dnode $hostname3
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
system sh/exec.sh -n dnode3 -s start
|
||||
|
||||
print =============== step1
|
||||
$x = 0
|
||||
show1:
|
||||
$x = $x + 1
|
||||
sleep 2000
|
||||
if $x == 5 then
|
||||
return -1
|
||||
endi
|
||||
sql show mnodes -x show1
|
||||
$mnode1Role = $data2_1
|
||||
print mnode1Role $mnode1Role
|
||||
$mnode2Role = $data2_2
|
||||
print mnode2Role $mnode2Role
|
||||
$mnode3Role = $data2_3
|
||||
print mnode3Role $mnode3Role
|
||||
|
||||
if $mnode1Role != master then
|
||||
goto show1
|
||||
endi
|
||||
if $mnode2Role != slave then
|
||||
goto show1
|
||||
endi
|
||||
if $mnode3Role != slave then
|
||||
goto show1
|
||||
endi
|
||||
|
||||
$x = 1
|
||||
show2:
|
||||
|
||||
print =============== step $x
|
||||
sql show mnodes
|
||||
print $data0_1 $data2_1
|
||||
print $data0_2 $data2_2
|
||||
print $data0_3 $data2_3
|
||||
|
||||
|
||||
$x = $x + 1
|
||||
sleep 2000
|
||||
if $x == 1000 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
goto show2
|
||||
|
|
|
@ -25,6 +25,12 @@ system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 20
|
|||
system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 20
|
||||
system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 20
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 20
|
||||
system sh/cfg.sh -n dnode2 -c maxVgroupsPerDb -v 20
|
||||
system sh/cfg.sh -n dnode3 -c maxVgroupsPerDb -v 20
|
||||
system sh/cfg.sh -n dnode4 -c maxVgroupsPerDb -v 20
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c http -v 1
|
||||
system sh/cfg.sh -n dnode2 -c http -v 1
|
||||
system sh/cfg.sh -n dnode3 -c http -v 1
|
||||
system sh/cfg.sh -n dnode3 -c http -v 1
|
||||
system sh/cfg.sh -n dnode4 -c http -v 1
|
|
@ -161,7 +161,7 @@ sleep 10000
|
|||
$loopCnt = 0
|
||||
wait_dnode_offline_overtime_dropped:
|
||||
$loopCnt = $loopCnt + 1
|
||||
if $loopCnt == 10 then
|
||||
if $loopCnt == 20 then
|
||||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
|
|
|
@ -29,10 +29,16 @@ system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
|
|||
system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
|
||||
system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
|
||||
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
|
||||
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
|
||||
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
|
||||
$totalTableNum = 10
|
||||
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v $totalTableNum
|
||||
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v $totalTableNum
|
||||
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v $totalTableNum
|
||||
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v $totalTableNum
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1
|
||||
system sh/cfg.sh -n dnode2 -c maxVgroupsPerDb -v 1
|
||||
system sh/cfg.sh -n dnode3 -c maxVgroupsPerDb -v 1
|
||||
system sh/cfg.sh -n dnode4 -c maxVgroupsPerDb -v 1
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
|
||||
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
|
||||
|
@ -56,11 +62,10 @@ sql create dnode $hostname3
|
|||
sql create dnode $hostname4
|
||||
sleep 3000
|
||||
|
||||
$totalTableNum = 10
|
||||
$sleepTimer = 3000
|
||||
|
||||
$db = db
|
||||
sql create database $db replica 3 maxTables $totalTableNum
|
||||
sql create database $db replica 3
|
||||
sql use $db
|
||||
|
||||
# create table , insert data
|
||||
|
|
|
@ -29,10 +29,16 @@ system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
|
|||
system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
|
||||
system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
|
||||
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
|
||||
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
|
||||
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
|
||||
$totalTableNum = 10
|
||||
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v $totalTableNum
|
||||
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v $totalTableNum
|
||||
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v $totalTableNum
|
||||
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v $totalTableNum
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1
|
||||
system sh/cfg.sh -n dnode2 -c maxVgroupsPerDb -v 1
|
||||
system sh/cfg.sh -n dnode3 -c maxVgroupsPerDb -v 1
|
||||
system sh/cfg.sh -n dnode4 -c maxVgroupsPerDb -v 1
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
|
||||
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
|
||||
|
@ -54,11 +60,10 @@ sql create dnode $hostname2
|
|||
sql create dnode $hostname3
|
||||
sleep 3000
|
||||
|
||||
$totalTableNum = 10
|
||||
$sleepTimer = 3000
|
||||
|
||||
$db = db
|
||||
sql create database $db replica 2 maxTables $totalTableNum
|
||||
sql create database $db replica 2
|
||||
sql use $db
|
||||
|
||||
# create table , insert data
|
||||
|
@ -137,8 +142,8 @@ print show vgroups:
|
|||
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
|
||||
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
|
||||
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
|
||||
$dnode3Vtatus = $data4_2
|
||||
$dnode2Vtatus = $data7_2
|
||||
$dnode3Vtatus = $data6_2
|
||||
$dnode2Vtatus = $data9_2
|
||||
|
||||
if $dnode3Vtatus != offline then
|
||||
sleep 2000
|
||||
|
@ -204,8 +209,8 @@ print show vgroups:
|
|||
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
|
||||
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
|
||||
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
|
||||
$dnode3Vtatus = $data4_2
|
||||
$dnode2Vtatus = $data7_2
|
||||
$dnode3Vtatus = $data6_2
|
||||
$dnode2Vtatus = $data9_2
|
||||
|
||||
print dnode2Vtatus: $dnode3Vtatus
|
||||
print dnode3Vtatus: $dnode3Vtatus
|
||||
|
@ -319,8 +324,8 @@ print show vgroups:
|
|||
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
|
||||
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
|
||||
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
|
||||
$dnode3Vtatus = $data4_2
|
||||
$dnode2Vtatus = $data7_2
|
||||
$dnode3Vtatus = $data6_2
|
||||
$dnode2Vtatus = $data9_2
|
||||
|
||||
print dnode4Vtatus: $dnode4Vtatus
|
||||
print dnode3Vtatus: $dnode3Vtatus
|
||||
|
|
|
@ -29,10 +29,16 @@ system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
|
|||
system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
|
||||
system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
|
||||
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
|
||||
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
|
||||
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
|
||||
$totalTableNum = 10
|
||||
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v $totalTableNum
|
||||
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v $totalTableNum
|
||||
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v $totalTableNum
|
||||
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v $totalTableNum
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1
|
||||
system sh/cfg.sh -n dnode2 -c maxVgroupsPerDb -v 1
|
||||
system sh/cfg.sh -n dnode3 -c maxVgroupsPerDb -v 1
|
||||
system sh/cfg.sh -n dnode4 -c maxVgroupsPerDb -v 1
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
|
||||
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
|
||||
|
@ -56,11 +62,10 @@ sql create dnode $hostname3
|
|||
sql create dnode $hostname4
|
||||
sleep 3000
|
||||
|
||||
$totalTableNum = 10
|
||||
$sleepTimer = 3000
|
||||
|
||||
$db = db
|
||||
sql create database $db replica 3 maxTables $totalTableNum
|
||||
sql create database $db replica 3
|
||||
sql use $db
|
||||
|
||||
# create table , insert data
|
||||
|
@ -139,8 +144,8 @@ print show vgroups:
|
|||
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
|
||||
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
|
||||
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
|
||||
$dnode4Vtatus = $data4_2
|
||||
$dnode3Vtatus = $data7_2
|
||||
$dnode4Vtatus = $data6_2
|
||||
$dnode3Vtatus = $data9_2
|
||||
|
||||
if $dnode4Vtatus != offline then
|
||||
sleep 2000
|
||||
|
@ -206,8 +211,8 @@ print show vgroups:
|
|||
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
|
||||
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
|
||||
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
|
||||
$dnode4Vtatus = $data4_2
|
||||
$dnode3Vtatus = $data7_2
|
||||
$dnode4Vtatus = $data6_2
|
||||
$dnode3Vtatus = $data9_2
|
||||
|
||||
print dnode4Vtatus: $dnode4Vtatus
|
||||
print dnode3Vtatus: $dnode3Vtatus
|
||||
|
@ -325,8 +330,8 @@ print show vgroups:
|
|||
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
|
||||
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
|
||||
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
|
||||
$dnode4Vtatus = $data4_2
|
||||
$dnode3Vtatus = $data7_2
|
||||
$dnode4Vtatus = $data6_2
|
||||
$dnode3Vtatus = $data9_2
|
||||
|
||||
print dnode4Vtatus: $dnode4Vtatus
|
||||
print dnode3Vtatus: $dnode3Vtatus
|
||||
|
@ -386,8 +391,8 @@ print show vgroups:
|
|||
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
|
||||
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
|
||||
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
|
||||
$dnode4Vtatus = $data4_2
|
||||
$dnode3Vtatus = $data7_2
|
||||
$dnode4Vtatus = $data6_2
|
||||
$dnode3Vtatus = $data9_2
|
||||
|
||||
print dnode4Vtatus: $dnode4Vtatus
|
||||
print dnode3Vtatus: $dnode3Vtatus
|
||||
|
|
|
@ -23,6 +23,12 @@ system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
|
|||
system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
|
||||
system sh/cfg.sh -n dnode5 -c balanceInterval -v 10
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 16
|
||||
system sh/cfg.sh -n dnode2 -c maxVgroupsPerDb -v 16
|
||||
system sh/cfg.sh -n dnode3 -c maxVgroupsPerDb -v 16
|
||||
system sh/cfg.sh -n dnode4 -c maxVgroupsPerDb -v 16
|
||||
system sh/cfg.sh -n dnode5 -c maxVgroupsPerDb -v 16
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
|
||||
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
|
||||
system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
|
||||
|
@ -215,6 +221,7 @@ system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
|
|||
system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
|
||||
system sh/cfg.sh -n dnode3 -c offlineThreshold -v 10
|
||||
system sh/cfg.sh -n dnode3 -c enableCoreFile -v 1
|
||||
system sh/cfg.sh -n dnode3 -c maxVgroupsPerDb -v 16
|
||||
|
||||
system sh/exec.sh -n dnode3 -s start
|
||||
sql create dnode $hostname3
|
||||
|
@ -322,7 +329,7 @@ $totalRows = 0
|
|||
$tsStart = 1420041600000
|
||||
|
||||
$db = db1
|
||||
sql create database $db replica 2 maxTables 4
|
||||
sql create database $db replica 2
|
||||
sql use $db
|
||||
$stb = stb
|
||||
sql create table $stb (ts timestamp, c1 int) tags(t1 int)
|
||||
|
|
|
@ -29,10 +29,16 @@ system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
|
|||
system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
|
||||
system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
|
||||
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
|
||||
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
|
||||
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
|
||||
$totalTableNum = 10
|
||||
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v $totalTableNum
|
||||
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v $totalTableNum
|
||||
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v $totalTableNum
|
||||
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v $totalTableNum
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1
|
||||
system sh/cfg.sh -n dnode2 -c maxVgroupsPerDb -v 1
|
||||
system sh/cfg.sh -n dnode3 -c maxVgroupsPerDb -v 1
|
||||
system sh/cfg.sh -n dnode4 -c maxVgroupsPerDb -v 1
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
|
||||
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
|
||||
|
@ -52,11 +58,10 @@ system sh/exec.sh -n dnode2 -s start
|
|||
sql create dnode $hostname2
|
||||
sleep 3000
|
||||
|
||||
$totalTableNum = 10
|
||||
$sleepTimer = 10000
|
||||
|
||||
$db = db
|
||||
sql create database $db replica 1 maxTables $totalTableNum
|
||||
sql create database $db replica 1
|
||||
sql use $db
|
||||
|
||||
# create table , insert data
|
||||
|
@ -352,16 +357,16 @@ if $loopCnt == 10 then
|
|||
endi
|
||||
|
||||
sql show vgroups
|
||||
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1
|
||||
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2
|
||||
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3
|
||||
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 $data5_4 $data6_4 $data7_4 $data8_4
|
||||
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1 $data10_1
|
||||
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2 $data10_2
|
||||
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 $data10_3
|
||||
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 $data5_4 $data6_4 $data7_4 $data8_4 $data9_4 $data10_4
|
||||
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
|
||||
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
|
||||
$thirdDnode_2 = $data8_1
|
||||
$thirdDnode_3 = $data8_2
|
||||
$thirdDnode_4 = $data8_3
|
||||
$thirdDnode_5 = $data8_4
|
||||
$thirdDnode_2 = $data10_1
|
||||
$thirdDnode_3 = $data10_2
|
||||
$thirdDnode_4 = $data10_3
|
||||
$thirdDnode_5 = $data10_4
|
||||
|
||||
if $thirdDnode_2 != null then
|
||||
sleep 2000
|
||||
|
@ -405,10 +410,10 @@ print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $dat
|
|||
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 $data5_4 $data6_4 $data7_4 $data8_4
|
||||
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
|
||||
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
|
||||
$sencodDnode_2 = $data5_1
|
||||
$sencodDnode_3 = $data5_2
|
||||
$sencodDnode_4 = $data5_3
|
||||
$sencodDnode_5 = $data5_4
|
||||
$sencodDnode_2 = $data7_1
|
||||
$sencodDnode_3 = $data7_2
|
||||
$sencodDnode_4 = $data7_3
|
||||
$sencodDnode_5 = $data7_4
|
||||
|
||||
if $sencodDnode_2 != null then
|
||||
sleep 2000
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue