diff --git a/.travis.yml b/.travis.yml
index 2e268ae04a..4ae38f5032 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -45,11 +45,13 @@ matrix:
cd ${TRAVIS_BUILD_DIR}/debug
make install > /dev/null || travis_terminate $?
+ pip install numpy
pip install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python2/
+ pip3 install numpy
pip3 install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python3/
cd ${TRAVIS_BUILD_DIR}/tests
- ./test-all.sh $TRAVIS_EVENT_TYPE || travis_terminate $?
+ ./test-all.sh smoke || travis_terminate $?
cd ${TRAVIS_BUILD_DIR}/tests/pytest
./valgrind-test.sh 2>&1 > mem-error-out.log
@@ -164,12 +166,14 @@ matrix:
cd ${TRAVIS_BUILD_DIR}/debug
make install > /dev/null || travis_terminate $?
+ pip install numpy
pip install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python2/
+ pip3 install numpy
pip3 install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python3/
cd ${TRAVIS_BUILD_DIR}/tests
- ./test-all.sh $TRAVIS_EVENT_TYPE COVER
+ ./test-all.sh smoke COVER
TEST_RESULT=$?
@@ -178,7 +182,7 @@ matrix:
cd ${TRAVIS_BUILD_DIR}
lcov -d . --capture --rc lcov_branch_coverage=1 -o coverage.info
- lcov --remove coverage.info '*/tests/*' '*/test/*' '*/deps/*' -o coverage.info
+ lcov --remove coverage.info '*/tests/*' '*/test/*' '*/deps/*' '*/plugins/*' -o coverage.info
lcov -l --rc lcov_branch_coverage=1 coverage.info || travis_terminate $?
gem install coveralls-lcov
diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h
index c03e3be4ed..ed3b481d0e 100644
--- a/src/client/inc/tsclient.h
+++ b/src/client/inc/tsclient.h
@@ -285,8 +285,8 @@ typedef struct STscObj {
void * pTimer;
char user[TSDB_USER_LEN];
char pass[TSDB_KEY_LEN];
- char acctId[TSDB_DB_NAME_LEN];
- char db[TSDB_TABLE_ID_LEN];
+ char acctId[TSDB_ACCT_LEN];
+ char db[TSDB_ACCT_LEN + TSDB_DB_NAME_LEN];
char sversion[TSDB_VERSION_LEN];
char writeAuth : 1;
char superAuth : 1;
diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c
index 602b8cc430..c61402192d 100644
--- a/src/client/src/tscAsync.c
+++ b/src/client/src/tscAsync.c
@@ -449,10 +449,13 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
tscTrace("%p update table meta in local cache, continue to process sql and send corresponding subquery", pSql);
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
- if (pTableMetaInfo->pTableMeta == NULL){
- code = tscGetTableMeta(pSql, pTableMetaInfo);
+ code = tscGetTableMeta(pSql, pTableMetaInfo);
+ if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
+ return;
+ } else {
assert(code == TSDB_CODE_SUCCESS);
}
+
assert((tscGetNumOfTags(pTableMetaInfo->pTableMeta) != 0) && pTableMetaInfo->vgroupIndex >= 0 && pSql->param != NULL);
@@ -473,7 +476,11 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
code = tscGetTableMeta(pSql, pTableMetaInfo);
- assert(code == TSDB_CODE_SUCCESS);
+ if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
+ return;
+ } else {
+ assert(code == TSDB_CODE_SUCCESS);
+ }
// if failed to process sql, go to error handler
if ((code = tscProcessSql(pSql)) == TSDB_CODE_SUCCESS) {
@@ -483,7 +490,6 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
// // 1. table uid, 2. ip address
// code = tscSendMsgToServer(pSql);
// if (code == TSDB_CODE_SUCCESS) return;
-// }
} else {
tscTrace("%p continue parse sql after get table meta", pSql);
@@ -491,8 +497,11 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_STMT_INSERT)) {
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
code = tscGetTableMeta(pSql, pTableMetaInfo);
- assert(code == TSDB_CODE_SUCCESS && pTableMetaInfo->pTableMeta != NULL);
-
+ if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
+ return;
+ } else {
+ assert(code == TSDB_CODE_SUCCESS);
+ }
(*pSql->fp)(pSql->param, pSql, code);
return;
}
diff --git a/src/client/src/tscFunctionImpl.c b/src/client/src/tscFunctionImpl.c
index b05e82b39a..457e187971 100644
--- a/src/client/src/tscFunctionImpl.c
+++ b/src/client/src/tscFunctionImpl.c
@@ -151,7 +151,6 @@ typedef struct SRateInfo {
double sum; // for sum/avg
} SRateInfo;
-
int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionId, int32_t param, int16_t *type,
int16_t *bytes, int32_t *interBytes, int16_t extLength, bool isSuperTable) {
if (!isValidDataType(dataType, dataBytes)) {
@@ -700,7 +699,7 @@ static int32_t first_data_req_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end,
}
static int32_t last_data_req_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) {
- if (pCtx->order == TSDB_ORDER_ASC) {
+ if (pCtx->order != pCtx->param[0].i64Key) {
return BLK_DATA_NO_NEEDED;
}
@@ -728,7 +727,7 @@ static int32_t first_dist_data_req_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY
}
static int32_t last_dist_data_req_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) {
- if (pCtx->order == TSDB_ORDER_ASC) {
+ if (pCtx->order != pCtx->param[0].i64Key) {
return BLK_DATA_NO_NEEDED;
}
@@ -1594,7 +1593,7 @@ static void first_dist_function_f(SQLFunctionCtx *pCtx, int32_t index) {
if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
return;
}
-
+
if (pCtx->order == TSDB_ORDER_DESC) {
return;
}
@@ -1653,7 +1652,7 @@ static void first_dist_func_second_merge(SQLFunctionCtx *pCtx) {
* least one data in this block that is not null.(TODO opt for this case)
*/
static void last_function(SQLFunctionCtx *pCtx) {
- if (pCtx->order == TSDB_ORDER_ASC) {
+ if (pCtx->order != pCtx->param[0].i64Key) {
return;
}
@@ -1682,7 +1681,6 @@ static void last_function(SQLFunctionCtx *pCtx) {
}
static void last_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- assert(pCtx->order != TSDB_ORDER_ASC);
void *pData = GET_INPUT_CHAR_INDEX(pCtx, index);
if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
return;
@@ -1726,7 +1724,7 @@ static void last_dist_function(SQLFunctionCtx *pCtx) {
* 1. for scan data in asc order, no need to check data
* 2. for data blocks that are not loaded, no need to check data
*/
- if (pCtx->order == TSDB_ORDER_ASC) {
+ if (pCtx->order != pCtx->param[0].i64Key) {
return;
}
@@ -1764,7 +1762,7 @@ static void last_dist_function_f(SQLFunctionCtx *pCtx, int32_t index) {
* 1. for scan data in asc order, no need to check data
* 2. for data blocks that are not loaded, no need to check data
*/
- if (pCtx->order == TSDB_ORDER_ASC) {
+ if (pCtx->order != pCtx->param[0].i64Key) {
return;
}
diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c
index 91e51f16ed..eaf9c21bfb 100644
--- a/src/client/src/tscLocal.c
+++ b/src/client/src/tscLocal.c
@@ -132,7 +132,7 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
for (int32_t i = 0; i < numOfRows; ++i) {
TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, 0);
char* dst = pRes->data + tscFieldInfoGetOffset(pQueryInfo, 0) * totalNumOfRows + pField->bytes * i;
- STR_WITH_MAXSIZE_TO_VARSTR(dst, pSchema[i].name, TSDB_COL_NAME_LEN);
+ STR_WITH_MAXSIZE_TO_VARSTR(dst, pSchema[i].name, TSDB_COL_NAME_LEN - 1);
char *type = tDataTypeDesc[pSchema[i].type].aName;
@@ -171,7 +171,7 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
// field name
TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, 0);
char* output = pRes->data + tscFieldInfoGetOffset(pQueryInfo, 0) * totalNumOfRows + pField->bytes * i;
- STR_WITH_MAXSIZE_TO_VARSTR(output, pSchema[i].name, TSDB_COL_NAME_LEN);
+ STR_WITH_MAXSIZE_TO_VARSTR(output, pSchema[i].name, TSDB_COL_NAME_LEN - 1);
// type name
pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, 1);
@@ -211,18 +211,18 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols,
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
pQueryInfo->order.order = TSDB_ORDER_ASC;
- TAOS_FIELD f = {.type = TSDB_DATA_TYPE_BINARY, .bytes = TSDB_COL_NAME_LEN + VARSTR_HEADER_SIZE};
- strncpy(f.name, "Field", TSDB_COL_NAME_LEN);
+ TAOS_FIELD f = {.type = TSDB_DATA_TYPE_BINARY, .bytes = (TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE};
+ tstrncpy(f.name, "Field", sizeof(f.name));
SFieldSupInfo* pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY,
- TSDB_COL_NAME_LEN + VARSTR_HEADER_SIZE, TSDB_COL_NAME_LEN, false);
+ (TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE, (TSDB_COL_NAME_LEN - 1), false);
- rowLen += (TSDB_COL_NAME_LEN + VARSTR_HEADER_SIZE);
+ rowLen += ((TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE);
f.bytes = typeColLength;
f.type = TSDB_DATA_TYPE_BINARY;
- strncpy(f.name, "Type", TSDB_COL_NAME_LEN);
+ tstrncpy(f.name, "Type", sizeof(f.name));
pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, typeColLength,
@@ -232,7 +232,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols,
f.bytes = sizeof(int32_t);
f.type = TSDB_DATA_TYPE_INT;
- strncpy(f.name, "Length", TSDB_COL_NAME_LEN);
+ tstrncpy(f.name, "Length", sizeof(f.name));
pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_INT, sizeof(int32_t),
@@ -242,7 +242,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols,
f.bytes = noteColLength;
f.type = TSDB_DATA_TYPE_BINARY;
- strncpy(f.name, "Note", TSDB_COL_NAME_LEN);
+ tstrncpy(f.name, "Note", sizeof(f.name));
pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, noteColLength,
@@ -279,14 +279,15 @@ static void tscProcessCurrentUser(SSqlObj *pSql) {
pExpr->resType = TSDB_DATA_TYPE_BINARY;
char* vx = calloc(1, pExpr->resBytes);
- STR_WITH_MAXSIZE_TO_VARSTR(vx, pSql->pTscObj->user, TSDB_USER_LEN);
+ size_t size = sizeof(pSql->pTscObj->user);
+ STR_WITH_MAXSIZE_TO_VARSTR(vx, pSql->pTscObj->user, size);
tscSetLocalQueryResult(pSql, vx, pExpr->aliasName, pExpr->resType, pExpr->resBytes);
free(vx);
}
static void tscProcessCurrentDB(SSqlObj *pSql) {
- char db[TSDB_DB_NAME_LEN + 1] = {0};
+ char db[TSDB_DB_NAME_LEN] = {0};
extractDBName(pSql->pTscObj->db, db);
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c
index cbc83c6e75..afd1a3e8c8 100644
--- a/src/client/src/tscParseInsert.c
+++ b/src/client/src/tscParseInsert.c
@@ -97,7 +97,7 @@ int tsParseTime(SSQLToken *pToken, int64_t *time, char **next, char *error, int1
useconds = str2int64(pToken->z);
} else {
// strptime("2001-11-12 18:31:01", "%Y-%m-%d %H:%M:%S", &tm);
- if (taosParseTime(pToken->z, time, pToken->n, timePrec) != TSDB_CODE_SUCCESS) {
+ if (taosParseTime(pToken->z, time, pToken->n, timePrec, tsDaylight) != TSDB_CODE_SUCCESS) {
return tscInvalidSQLErrMsg(error, "invalid timestamp format", pToken->z);
}
@@ -795,7 +795,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
STableMetaInfo *pSTableMeterMetaInfo = tscGetMetaInfo(pQueryInfo, STABLE_INDEX);
tscSetTableFullName(pSTableMeterMetaInfo, &sToken, pSql);
- strncpy(pTag->name, pSTableMeterMetaInfo->name, TSDB_TABLE_ID_LEN);
+ tstrncpy(pTag->name, pSTableMeterMetaInfo->name, sizeof(pTag->name));
code = tscGetTableMeta(pSql, pSTableMeterMetaInfo);
if (code != TSDB_CODE_SUCCESS) {
return code;
@@ -989,7 +989,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
int validateTableName(char *tblName, int len) {
char buf[TSDB_TABLE_ID_LEN] = {0};
- strncpy(buf, tblName, len);
+ tstrncpy(buf, tblName, sizeof(buf));
SSQLToken token = {.n = len, .type = TK_ID, .z = buf};
tSQLGetToken(buf, &token.type);
@@ -1512,7 +1512,7 @@ void tscProcessMultiVnodesInsertFromFile(SSqlObj *pSql) {
}
pCmd->count = 1;
- strncpy(path, pDataBlock->filename, PATH_MAX);
+ tstrncpy(path, pDataBlock->filename, sizeof(path));
FILE *fp = fopen(path, "r");
if (fp == NULL) {
@@ -1520,7 +1520,7 @@ void tscProcessMultiVnodesInsertFromFile(SSqlObj *pSql) {
continue;
}
- strncpy(pTableMetaInfo->name, pDataBlock->tableId, TSDB_TABLE_ID_LEN);
+ tstrncpy(pTableMetaInfo->name, pDataBlock->tableId, sizeof(pTableMetaInfo->name));
memset(pDataBlock->pData, 0, pDataBlock->nAllocSize);
int32_t ret = tscGetTableMeta(pSql, pTableMetaInfo);
diff --git a/src/client/src/tscProfile.c b/src/client/src/tscProfile.c
index 79d00bf5dc..55243c4382 100644
--- a/src/client/src/tscProfile.c
+++ b/src/client/src/tscProfile.c
@@ -239,8 +239,7 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) {
continue;
}
- strncpy(pQdesc->sql, pSql->sqlstr, TSDB_SHOW_SQL_LEN - 1);
- pQdesc->sql[TSDB_SHOW_SQL_LEN - 1] = 0;
+ tstrncpy(pQdesc->sql, pSql->sqlstr, sizeof(pQdesc->sql));
pQdesc->stime = htobe64(pSql->stime);
pQdesc->queryId = htonl(pSql->queryId);
pQdesc->useconds = htobe64(pSql->res.useconds);
@@ -256,8 +255,7 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) {
SSqlStream *pStream = pObj->streamList;
while (pStream) {
- strncpy(pSdesc->sql, pStream->pSql->sqlstr, TSDB_SHOW_SQL_LEN - 1);
- pSdesc->sql[TSDB_SHOW_SQL_LEN - 1] = 0;
+ tstrncpy(pSdesc->sql, pStream->pSql->sqlstr, sizeof(pSdesc->sql));
pSdesc->streamId = htonl(pStream->streamId);
pSdesc->num = htobe64(pStream->num);
diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c
index f29c886cba..144baa6026 100644
--- a/src/client/src/tscSQLParser.c
+++ b/src/client/src/tscSQLParser.c
@@ -138,7 +138,7 @@ static int setColumnFilterInfoForTimestamp(SQueryInfo* pQueryInfo, tVariant* pVa
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
if (seg != NULL) {
- if (taosParseTime(pVar->pz, &time, pVar->nLen, tinfo.precision) != TSDB_CODE_SUCCESS) {
+ if (taosParseTime(pVar->pz, &time, pVar->nLen, tinfo.precision, tsDaylight) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(pQueryInfo->msg, msg);
}
} else {
@@ -169,7 +169,7 @@ static int32_t handlePassword(SSqlCmd* pCmd, SSQLToken* pPwd) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
- if (pPwd->n > TSDB_PASSWORD_LEN) {
+ if (pPwd->n >= TSDB_PASSWORD_LEN) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
@@ -232,7 +232,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
pzName->n = strdequote(pzName->z);
strncpy(pTableMetaInfo->name, pzName->z, pzName->n);
} else { // drop user
- if (pzName->n > TSDB_USER_LEN) {
+ if (pzName->n >= TSDB_USER_LEN) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
@@ -317,7 +317,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
return TSDB_CODE_TSC_INVALID_SQL;
}
- if (pName->n > TSDB_USER_LEN) {
+ if (pName->n >= TSDB_USER_LEN) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
@@ -348,7 +348,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
- if (pToken->n > TSDB_TABLE_NAME_LEN) {
+ if (!tscValidateTableNameLength(pToken->n)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
@@ -401,7 +401,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
SSQLToken* pName = &pUser->user;
SSQLToken* pPwd = &pUser->passwd;
- if (pName->n > TSDB_USER_LEN) {
+ if (pName->n >= TSDB_USER_LEN) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
@@ -543,7 +543,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "not support sql expression");
}
- pSql->cmd.parseFinished = true;
+ pSql->cmd.parseFinished = 1;
return tscBuildMsg[pCmd->command](pSql, pInfo);
}
@@ -933,7 +933,7 @@ bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField) {
SSchema* pSchema = tscGetTableSchema(pTableMeta);
for (int32_t i = 0; i < numOfTags + numOfCols; ++i) {
- if (strncasecmp(pTagField->name, pSchema[i].name, TSDB_COL_NAME_LEN) == 0) {
+ if (strncasecmp(pTagField->name, pSchema[i].name, sizeof(pTagField->name) - 1) == 0) {
invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
return false;
}
@@ -993,7 +993,7 @@ bool validateOneColumn(SSqlCmd* pCmd, TAOS_FIELD* pColField) {
// field name must be unique
for (int32_t i = 0; i < numOfTags + numOfCols; ++i) {
- if (strncasecmp(pColField->name, pSchema[i].name, TSDB_COL_NAME_LEN) == 0) {
+ if (strncasecmp(pColField->name, pSchema[i].name, sizeof(pColField->name) - 1) == 0) {
invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
return false;
}
@@ -1005,7 +1005,8 @@ bool validateOneColumn(SSqlCmd* pCmd, TAOS_FIELD* pColField) {
/* is contained in pFieldList or not */
static bool has(tFieldList* pFieldList, int32_t startIdx, const char* name) {
for (int32_t j = startIdx; j < pFieldList->nField; ++j) {
- if (strncasecmp(name, pFieldList->p[j].name, TSDB_COL_NAME_LEN) == 0) return true;
+ TAOS_FIELD* field = pFieldList->p + j;
+ if (strncasecmp(name, field->name, sizeof(field->name) - 1) == 0) return true;
}
return false;
@@ -1041,7 +1042,7 @@ int32_t setObjFullName(char* fullName, const char* account, SSQLToken* pDB, SSQL
/* db name is not specified, the tableName dose not include db name */
if (pDB != NULL) {
- if (pDB->n > TSDB_DB_NAME_LEN) {
+ if (pDB->n >= TSDB_ACCT_LEN + TSDB_DB_NAME_LEN) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -1055,12 +1056,12 @@ int32_t setObjFullName(char* fullName, const char* account, SSQLToken* pDB, SSQL
totalLen += 1;
/* here we only check the table name length limitation */
- if (tableName->n > TSDB_TABLE_NAME_LEN) {
+ if (!tscValidateTableNameLength(tableName->n)) {
return TSDB_CODE_TSC_INVALID_SQL;
}
} else { // pDB == NULL, the db prefix name is specified in tableName
/* the length limitation includes tablename + dbname + sep */
- if (tableName->n > TSDB_TABLE_NAME_LEN + TSDB_DB_NAME_LEN + tListLen(TS_PATH_DELIMITER)) {
+ if (tableName->n >= TSDB_TABLE_NAME_LEN + TSDB_DB_NAME_LEN) {
return TSDB_CODE_TSC_INVALID_SQL;
}
}
@@ -1077,7 +1078,7 @@ int32_t setObjFullName(char* fullName, const char* account, SSQLToken* pDB, SSQL
fullName[totalLen] = 0;
}
- return (totalLen <= TSDB_TABLE_ID_LEN) ? TSDB_CODE_SUCCESS : TSDB_CODE_TSC_INVALID_SQL;
+ return (totalLen < TSDB_TABLE_ID_LEN) ? TSDB_CODE_SUCCESS : TSDB_CODE_TSC_INVALID_SQL;
}
static void extractColumnNameFromString(tSQLExprItem* pItem) {
@@ -1166,7 +1167,7 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSel
/* todo alias name should use the original sql string */
char* name = (pItem->aliasName != NULL)? pItem->aliasName:arithmeticExprStr;
- strncpy(pExpr->aliasName, name, TSDB_COL_NAME_LEN);
+ tstrncpy(pExpr->aliasName, name, sizeof(pExpr->aliasName));
tExprNode* pNode = NULL;
SArray* colList = taosArrayInit(10, sizeof(SColIndex));
@@ -1308,7 +1309,7 @@ static void addProjectQueryCol(SQueryInfo* pQueryInfo, int32_t startPos, SColumn
SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, pIndex->columnIndex);
char* colName = (pItem->aliasName == NULL) ? pSchema->name : pItem->aliasName;
- strncpy(pExpr->aliasName, colName, tListLen(pExpr->aliasName));
+ tstrncpy(pExpr->aliasName, colName, sizeof(pExpr->aliasName));
SColumnList ids = {0};
ids.num = 1;
@@ -1358,7 +1359,7 @@ static int32_t doAddProjectionExprAndResultFields(SQueryInfo* pQueryInfo, SColum
for (int32_t j = 0; j < numOfTotalColumns; ++j) {
SSqlExpr* pExpr = doAddProjectCol(pQueryInfo, startPos + j, j, pIndex->tableIndex);
- strncpy(pExpr->aliasName, pSchema[j].name, tListLen(pExpr->aliasName));
+ tstrncpy(pExpr->aliasName, pSchema[j].name, sizeof(pExpr->aliasName));
pIndex->columnIndex = j;
SColumnList ids = {0};
@@ -1401,9 +1402,7 @@ int32_t addProjectionExprAndResultField(SQueryInfo* pQueryInfo, tSQLExprItem* pI
}
if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
- SSchema colSchema = {.type = TSDB_DATA_TYPE_BINARY, .bytes = TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE};
- strcpy(colSchema.name, TSQL_TBNAME_L);
-
+ SSchema colSchema = tGetTableNameColumnSchema();
tscAddSpecialColumnForSelect(pQueryInfo, startPos, TSDB_FUNC_TAGPRJ, &index, &colSchema, true);
} else {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
@@ -1448,11 +1447,18 @@ static int32_t setExprInfoForFunctions(SQueryInfo* pQueryInfo, SSchema* pSchema,
if (aliasName != NULL) {
strcpy(columnName, aliasName);
} else {
- getRevisedName(columnName, functionID, TSDB_COL_NAME_LEN, pSchema[pColIndex->columnIndex].name);
+ getRevisedName(columnName, functionID, sizeof(columnName) - 1, pSchema[pColIndex->columnIndex].name);
}
SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, functionID, pColIndex, type, bytes, bytes, false);
- strncpy(pExpr->aliasName, columnName, tListLen(pExpr->aliasName));
+ tstrncpy(pExpr->aliasName, columnName, sizeof(pExpr->aliasName));
+
+ // set reverse order scan data blocks for last query
+ if (functionID == TSDB_FUNC_LAST) {
+ pExpr->numOfParams = 1;
+ pExpr->param[0].i64Key = TSDB_ORDER_DESC;
+ pExpr->param[0].nType = TSDB_DATA_TYPE_INT;
+ }
// for all queries, the timestamp column needs to be loaded
SColumnIndex index = {.tableIndex = pColIndex->tableIndex, .columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX};
@@ -1536,7 +1542,7 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
}
memset(pExpr->aliasName, 0, tListLen(pExpr->aliasName));
- getColumnName(pItem, pExpr->aliasName, TSDB_COL_NAME_LEN);
+ getColumnName(pItem, pExpr->aliasName, sizeof(pExpr->aliasName) - 1);
SColumnList ids = getColumnList(1, index.tableIndex, index.columnIndex);
if (finalResult) {
@@ -1651,7 +1657,7 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
ids.ids[0] = index;
memset(pExpr->aliasName, 0, tListLen(pExpr->aliasName));
- getColumnName(pItem, pExpr->aliasName, TSDB_COL_NAME_LEN);
+ getColumnName(pItem, pExpr->aliasName, sizeof(pExpr->aliasName) - 1);
if (finalResult) {
int32_t numOfOutput = tscNumOfFields(pQueryInfo);
@@ -1725,6 +1731,22 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
if (setExprInfoForFunctions(pQueryInfo, pSchema, functionID, pItem->aliasName, colIndex + i, &index) != 0) {
return TSDB_CODE_TSC_INVALID_SQL;
}
+
+ if (optr == TK_LAST) { // todo refactor
+ SSqlGroupbyExpr* pGroupBy = &pQueryInfo->groupbyExpr;
+ if (pGroupBy->numOfGroupCols > 0) {
+ for(int32_t k = 0; k < pGroupBy->numOfGroupCols; ++k) {
+ SColIndex* pIndex = taosArrayGet(pGroupBy->columnInfo, k);
+ if (!TSDB_COL_IS_TAG(pIndex->flag) && pIndex->colIndex < tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) { // group by normal columns
+ SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, colIndex + i);
+ pExpr->numOfParams = 1;
+ pExpr->param->i64Key = TSDB_ORDER_ASC;
+
+ break;
+ }
+ }
+ }
+ }
}
}
@@ -1851,7 +1873,7 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
}
memset(pExpr->aliasName, 0, tListLen(pExpr->aliasName));
- getColumnName(pItem, pExpr->aliasName, TSDB_COL_NAME_LEN);
+ getColumnName(pItem, pExpr->aliasName, sizeof(pExpr->aliasName) - 1);
SColumnList ids = getColumnList(1, 0, index.columnIndex);
if (finalResult) {
@@ -1913,9 +1935,7 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
SSchema s = {0};
if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
- s.bytes = TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE;
- s.type = TSDB_DATA_TYPE_BINARY;
- s.colId = TSDB_TBNAME_COLUMN_INDEX;
+ s = tGetTableNameColumnSchema();
} else {
s = pTagSchema[index.columnIndex];
}
@@ -1993,6 +2013,7 @@ static int16_t doGetColumnIndex(SQueryInfo* pQueryInfo, int32_t index, SSQLToken
if (strncasecmp(pSchema[i].name, pToken->z, pToken->n) == 0) {
columnIndex = i;
+ break;
}
}
@@ -2051,7 +2072,7 @@ int32_t getTableIndexImpl(SSQLToken* pTableToken, SQueryInfo* pQueryInfo, SColum
}
pIndex->tableIndex = COLUMN_INDEX_INITIAL_VAL;
- char tableName[TSDB_TABLE_ID_LEN + 1] = {0};
+ char tableName[TSDB_TABLE_ID_LEN] = {0};
for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, i);
@@ -2202,7 +2223,7 @@ int32_t setShowInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
if (pDbPrefixToken->type != 0) {
assert(pDbPrefixToken->n >= 0);
- if (pDbPrefixToken->n > TSDB_DB_NAME_LEN) { // db name is too long
+ if (pDbPrefixToken->n >= TSDB_DB_NAME_LEN) { // db name is too long
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
@@ -2229,7 +2250,7 @@ int32_t setShowInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
- if (pCmd->payloadLen > TSDB_TABLE_NAME_LEN) {
+ if (!tscValidateTableNameLength(pCmd->payloadLen)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
}
@@ -2240,7 +2261,7 @@ int32_t setShowInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
// show vnodes may be ip addr of dnode in payload
SSQLToken* pDnodeIp = &pShowInfo->prefix;
- if (pDnodeIp->n > TSDB_IPv4ADDR_LEN) { // ip addr is too long
+ if (pDnodeIp->n >= TSDB_IPv4ADDR_LEN) { // ip addr is too long
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
@@ -2589,9 +2610,7 @@ int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, tVariantList* pList, SSqlCmd*
tscColumnListInsert(pQueryInfo->colList, &index);
- SColIndex colIndex = {
- .colIndex = index.columnIndex, .flag = TSDB_COL_NORMAL, .colId = pSchema->colId,
- };
+ SColIndex colIndex = { .colIndex = index.columnIndex, .flag = TSDB_COL_NORMAL, .colId = pSchema->colId };
taosArrayPush(pGroupExpr->columnInfo, &colIndex);
pQueryInfo->groupbyExpr.orderType = TSDB_ORDER_ASC;
@@ -2860,7 +2879,7 @@ static int32_t tablenameListToString(tSQLExpr* pExpr, SStringBuilder* sb) {
taosStringBuilderAppendString(sb, TBNAME_LIST_SEP);
}
- if (pSub->val.nLen <= 0 || pSub->val.nLen > TSDB_TABLE_NAME_LEN) {
+ if (pSub->val.nLen <= 0 || !tscValidateTableNameLength(pSub->val.nLen)) {
return TSDB_CODE_TSC_INVALID_SQL;
}
}
@@ -2889,7 +2908,8 @@ static int32_t extractColumnFilterInfo(SQueryInfo* pQueryInfo, SColumnIndex* pIn
SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, pIndex->columnIndex);
const char* msg1 = "non binary column not support like operator";
- const char* msg2 = "binary column not support this operator";
+ const char* msg2 = "binary column not support this operator";
+ const char* msg3 = "bool column not support this operator";
SColumn* pColumn = tscColumnListInsert(pQueryInfo->colList, pIndex);
SColumnFilterInfo* pColFilter = NULL;
@@ -2923,6 +2943,12 @@ static int32_t extractColumnFilterInfo(SQueryInfo* pQueryInfo, SColumnIndex* pIn
if (pExpr->nSQLOptr == TK_LIKE) {
return invalidSqlErrMsg(pQueryInfo->msg, msg1);
}
+
+ if (pSchema->type == TSDB_DATA_TYPE_BOOL) {
+ if (pExpr->nSQLOptr != TK_EQ && pExpr->nSQLOptr != TK_NE) {
+ return invalidSqlErrMsg(pQueryInfo->msg, msg3);
+ }
+ }
}
pColumn->colIndex = *pIndex;
@@ -3615,7 +3641,7 @@ static int32_t setTableCondForSTableQuery(SQueryInfo* pQueryInfo, const char* ac
taosStringBuilderAppendStringLen(&sb1, TBNAME_LIST_SEP, 1);
}
- char idBuf[TSDB_TABLE_ID_LEN + 1] = {0};
+ char idBuf[TSDB_TABLE_ID_LEN] = {0};
int32_t xlen = strlen(segments[i]);
SSQLToken t = {.z = segments[i], .n = xlen, .type = TK_STRING};
@@ -3924,7 +3950,7 @@ int32_t getTimeRange(STimeWindow* win, tSQLExpr* pRight, int32_t optr, int16_t t
char* seg = strnchr(pRight->val.pz, '-', pRight->val.nLen, false);
if (seg != NULL) {
- if (taosParseTime(pRight->val.pz, &val, pRight->val.nLen, TSDB_TIME_PRECISION_MICRO) == TSDB_CODE_SUCCESS) {
+ if (taosParseTime(pRight->val.pz, &val, pRight->val.nLen, TSDB_TIME_PRECISION_MICRO, tsDaylight) == TSDB_CODE_SUCCESS) {
parsed = true;
} else {
return TSDB_CODE_TSC_INVALID_SQL;
@@ -3997,7 +4023,7 @@ int32_t tsRewriteFieldNameIfNecessary(SQueryInfo* pQueryInfo) {
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
char* fieldName = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i)->name;
- for (int32_t j = 0; j < TSDB_COL_NAME_LEN && fieldName[j] != 0; ++j) {
+ for (int32_t j = 0; j < (TSDB_COL_NAME_LEN - 1) && fieldName[j] != 0; ++j) {
for (int32_t k = 0; k < tListLen(rep); ++k) {
if (fieldName[j] == rep[k]) {
fieldName[j] = '_';
@@ -4013,7 +4039,7 @@ int32_t tsRewriteFieldNameIfNecessary(SQueryInfo* pQueryInfo) {
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
char* fieldName = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i)->name;
for (int32_t j = i + 1; j < pQueryInfo->fieldsInfo.numOfOutput; ++j) {
- if (strncasecmp(fieldName, tscFieldInfoGetField(&pQueryInfo->fieldsInfo, j)->name, TSDB_COL_NAME_LEN) == 0) {
+ if (strncasecmp(fieldName, tscFieldInfoGetField(&pQueryInfo->fieldsInfo, j)->name, (TSDB_COL_NAME_LEN - 1)) == 0) {
const char* msg = "duplicated column name in new table";
return invalidSqlErrMsg(pQueryInfo->msg, msg);
}
@@ -4368,7 +4394,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
}
tVariantListItem* pItem = &pAlterSQL->varList->a[0];
- if (pItem->pVar.nLen > TSDB_COL_NAME_LEN) {
+ if (pItem->pVar.nLen >= TSDB_COL_NAME_LEN) {
return invalidSqlErrMsg(pQueryInfo->msg, msg9);
}
@@ -4379,9 +4405,10 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
return TSDB_CODE_TSC_INVALID_SQL;
}
- if (index.columnIndex < tscGetNumOfColumns(pTableMeta)) {
+ int32_t numOfCols = tscGetNumOfColumns(pTableMeta);
+ if (index.columnIndex < numOfCols) {
return invalidSqlErrMsg(pQueryInfo->msg, msg10);
- } else if (index.columnIndex == 0) {
+ } else if (index.columnIndex == numOfCols) {
return invalidSqlErrMsg(pQueryInfo->msg, msg11);
}
@@ -4420,7 +4447,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
return TSDB_CODE_TSC_INVALID_SQL;
}
- char name[TSDB_COL_NAME_LEN + 1] = {0};
+ char name[TSDB_COL_NAME_LEN] = {0};
strncpy(name, pVarList->a[0].pVar.pz, pVarList->a[0].pVar.nLen);
TAOS_FIELD f = tscCreateField(TSDB_DATA_TYPE_INT, name, tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize);
tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
@@ -4522,8 +4549,8 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
return invalidSqlErrMsg(pQueryInfo->msg, msg18);
}
- char name1[TSDB_COL_NAME_LEN + 1] = {0};
- strncpy(name1, pItem->pVar.pz, pItem->pVar.nLen);
+ char name1[TSDB_COL_NAME_LEN] = {0};
+ tstrncpy(name1, pItem->pVar.pz, sizeof(name1));
TAOS_FIELD f = tscCreateField(TSDB_DATA_TYPE_INT, name1, tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize);
tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
}
@@ -5231,9 +5258,10 @@ static int32_t doAddGroupbyColumnsOnDemand(SQueryInfo* pQueryInfo) {
int16_t colIndex = pColIndex->colIndex;
if (colIndex == TSDB_TBNAME_COLUMN_INDEX) {
- type = TSDB_DATA_TYPE_BINARY;
- bytes = TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE; // todo extract method
- name = TSQL_TBNAME_L;
+ SSchema s = tGetTableNameColumnSchema();
+ type = s.type;
+ bytes = s.bytes;
+ name = s.name;
} else {
if (TSDB_COL_IS_TAG(pColIndex->flag)) {
SSchema* tagSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta);
@@ -5254,8 +5282,8 @@ static int32_t doAddGroupbyColumnsOnDemand(SQueryInfo* pQueryInfo) {
SColumnIndex index = {.tableIndex = pQueryInfo->groupbyExpr.tableIndex, .columnIndex = colIndex};
SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TAG, &index, type, bytes, bytes, true);
- memset(pExpr->aliasName, 0, tListLen(pExpr->aliasName));
- strncpy(pExpr->aliasName, name, TSDB_COL_NAME_LEN);
+ memset(pExpr->aliasName, 0, sizeof(pExpr->aliasName));
+ tstrncpy(pExpr->aliasName, name, sizeof(pExpr->aliasName));
pExpr->colInfo.flag = TSDB_COL_TAG;
@@ -5419,7 +5447,7 @@ int32_t doLocalQueryProcess(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) {
tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize, tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize, false);
const char* name = (pExprList->a[0].aliasName != NULL)? pExprList->a[0].aliasName:functionsInfo[index].name;
- strncpy(pExpr1->aliasName, name, tListLen(pExpr1->aliasName));
+ tstrncpy(pExpr1->aliasName, name, tListLen(pExpr1->aliasName));
return TSDB_CODE_SUCCESS;
}
@@ -5601,7 +5629,7 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
}
// get meter meta from mnode
- strncpy(pCreateTable->usingInfo.tagdata.name, pStableMeterMetaInfo->name, TSDB_TABLE_ID_LEN);
+ tstrncpy(pCreateTable->usingInfo.tagdata.name, pStableMeterMetaInfo->name, sizeof(pCreateTable->usingInfo.tagdata.name));
tVariantList* pList = pInfo->pCreateTableInfo->usingInfo.pTagVals;
int32_t code = tscGetTableMeta(pSql, pStableMeterMetaInfo);
@@ -6019,7 +6047,7 @@ int32_t exprTreeFromSqlExpr(tExprNode **pExpr, const tSQLExpr* pSqlExpr, SArray*
if (pCols != NULL) { // record the involved columns
SColIndex colIndex = {0};
- strncpy(colIndex.name, pSchema->name, TSDB_COL_NAME_LEN);
+ tstrncpy(colIndex.name, pSchema->name, sizeof(colIndex.name));
colIndex.colId = pSchema->colId;
colIndex.colIndex = index.columnIndex;
@@ -6053,6 +6081,16 @@ int32_t exprTreeFromSqlExpr(tExprNode **pExpr, const tSQLExpr* pSqlExpr, SArray*
}
}
}
+
+ if ((*pExpr)->_node.optr != TSDB_RELATION_EQUAL && (*pExpr)->_node.optr != TSDB_RELATION_NOT_EQUAL) {
+ if (pRight->nodeType == TSQL_NODE_VALUE) {
+ if ( pRight->pVal->nType == TSDB_DATA_TYPE_BOOL
+ || pRight->pVal->nType == TSDB_DATA_TYPE_BINARY
+ || pRight->pVal->nType == TSDB_DATA_TYPE_NCHAR) {
+ return TSDB_CODE_TSC_INVALID_SQL;
+ }
+ }
+ }
}
return TSDB_CODE_SUCCESS;
diff --git a/src/client/src/tscSchemaUtil.c b/src/client/src/tscSchemaUtil.c
index 59415f842e..439aa7c1de 100644
--- a/src/client/src/tscSchemaUtil.c
+++ b/src/client/src/tscSchemaUtil.c
@@ -50,14 +50,6 @@ int32_t tscGetNumOfColumns(const STableMeta* pTableMeta) {
SSchema *tscGetTableSchema(const STableMeta *pTableMeta) {
assert(pTableMeta != NULL);
-
-// if (pTableMeta->tableType == TSDB_CHILD_TABLE) {
-// STableMeta* pSTableMeta = pTableMeta->pSTable;
-// assert (pSTableMeta != NULL);
-//
-// return pSTableMeta->schema;
-// }
-
return (SSchema*) pTableMeta->schema;
}
@@ -115,7 +107,7 @@ bool isValidSchema(struct SSchema* pSchema, int32_t numOfCols) {
// 3. valid column names
for (int32_t j = i + 1; j < numOfCols; ++j) {
- if (strncasecmp(pSchema[i].name, pSchema[j].name, TSDB_COL_NAME_LEN) == 0) {
+ if (strncasecmp(pSchema[i].name, pSchema[j].name, sizeof(pSchema[i].name) - 1) == 0) {
return false;
}
}
diff --git a/src/client/src/tscSecondaryMerge.c b/src/client/src/tscSecondaryMerge.c
index 95d559b4fa..52a06277e3 100644
--- a/src/client/src/tscSecondaryMerge.c
+++ b/src/client/src/tscSecondaryMerge.c
@@ -217,7 +217,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
pReducer->numOfBuffer = numOfFlush;
pReducer->numOfVnode = numOfBuffer;
-
+
pReducer->pDesc = pDesc;
tscTrace("%p the number of merged leaves is: %d", pSql, pReducer->numOfBuffer);
@@ -324,7 +324,6 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
tfree(pReducer->discardData);
tfree(pReducer->pResultBuf);
tfree(pReducer->pFinalRes);
-// tfree(pReducer->pBufForInterpo);
tfree(pReducer->prevRowOfInput);
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
@@ -363,7 +362,8 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
if (pQueryInfo->fillType != TSDB_FILL_NONE) {
SFillColInfo* pFillCol = createFillColInfo(pQueryInfo);
pReducer->pFillInfo = taosInitFillInfo(pQueryInfo->order.order, revisedSTime, pQueryInfo->groupbyExpr.numOfGroupCols,
- 4096, numOfCols, pQueryInfo->slidingTime, pQueryInfo->fillType, pFillCol);
+ 4096, numOfCols, pQueryInfo->slidingTime, pQueryInfo->slidingTimeUnit,
+ tinfo.precision, pQueryInfo->fillType, pFillCol);
}
int32_t startIndex = pQueryInfo->fieldsInfo.numOfOutput - pQueryInfo->groupbyExpr.numOfGroupCols;
@@ -494,7 +494,7 @@ void tscDestroyLocalReducer(SSqlObj *pSql) {
tscTrace("%p waiting for delete procedure, status: %d", pSql, status);
}
- taosDestoryFillInfo(pLocalReducer->pFillInfo);
+ pLocalReducer->pFillInfo = taosDestoryFillInfo(pLocalReducer->pFillInfo);
if (pLocalReducer->pCtx != NULL) {
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
@@ -604,7 +604,7 @@ bool isSameGroup(SSqlCmd *pCmd, SLocalReducer *pReducer, char *pPrev, tFilePage
tOrderDescriptor *pOrderDesc = pReducer->pDesc;
SColumnOrderInfo* orderInfo = &pOrderDesc->orderInfo;
-
+
// no group by columns, all data belongs to one group
int32_t numOfCols = orderInfo->numOfCols;
if (numOfCols <= 0) {
@@ -627,7 +627,7 @@ bool isSameGroup(SSqlCmd *pCmd, SLocalReducer *pReducer, char *pPrev, tFilePage
// only one row exists
int32_t index = orderInfo->pData[0];
int32_t offset = (pOrderDesc->pColumnModel)->pFields[index].offset;
-
+
int32_t ret = memcmp(pPrev + offset, tmpBuffer->data + offset, pOrderDesc->pColumnModel->rowSize - offset);
return ret == 0;
}
@@ -980,8 +980,7 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
}
/* all output for current group are completed */
- int32_t totalRemainRows =
- taosGetNumOfResultWithFill(pFillInfo, rpoints, pFillInfo->slidingTime, actualETime);
+ int32_t totalRemainRows = getFilledNumOfRes(pFillInfo, actualETime, pLocalReducer->resColModel->capacity);
if (totalRemainRows <= 0) {
break;
}
@@ -1041,7 +1040,7 @@ static void savePreviousRow(SLocalReducer *pLocalReducer, tFilePage *tmpBuffer)
static void doExecuteSecondaryMerge(SSqlCmd *pCmd, SLocalReducer *pLocalReducer, bool needInit) {
// the tag columns need to be set before all functions execution
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
-
+
size_t size = tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t j = 0; j < size; ++j) {
SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[j];
@@ -1183,7 +1182,7 @@ int32_t finalizeRes(SQueryInfo *pQueryInfo, SLocalReducer *pLocalReducer) {
*/
bool needToMerge(SQueryInfo *pQueryInfo, SLocalReducer *pLocalReducer, tFilePage *tmpBuffer) {
int32_t ret = 0; // merge all result by default
-
+
int16_t functionId = pLocalReducer->pCtx[0].functionId;
// todo opt performance
@@ -1267,13 +1266,7 @@ bool doGenerateFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool no
SFillInfo* pFillInfo = pLocalReducer->pFillInfo;
if (pFillInfo != NULL) {
- STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
- STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
-
- TSKEY ekey = taosGetRevisedEndKey(pQueryInfo->window.ekey, pFillInfo->order, pFillInfo->slidingTime,
- pQueryInfo->slidingTimeUnit, tinfo.precision);
-
- taosFillSetStartInfo(pFillInfo, pResBuf->num, ekey);
+ taosFillSetStartInfo(pFillInfo, pResBuf->num, pQueryInfo->window.ekey);
taosFillCopyInputDataFromOneFilePage(pFillInfo, pResBuf);
}
@@ -1327,23 +1320,15 @@ static bool doBuildFilledResultForGroup(SSqlObj *pSql) {
SLocalReducer *pLocalReducer = pRes->pLocalReducer;
SFillInfo *pFillInfo = pLocalReducer->pFillInfo;
- STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
- STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
-
- int8_t p = tinfo.precision;
-
if (pFillInfo != NULL && taosNumOfRemainRows(pFillInfo) > 0) {
assert(pQueryInfo->fillType != TSDB_FILL_NONE);
tFilePage *pFinalDataBuf = pLocalReducer->pResultBuf;
- int64_t etime = *(int64_t *)(pFinalDataBuf->data + TSDB_KEYSIZE * (pFillInfo->numOfRows - 1));
+ int64_t etime = *(int64_t *)(pFinalDataBuf->data + TSDB_KEYSIZE * (pFillInfo->numOfRows - 1));
- int32_t remain = taosNumOfRemainRows(pFillInfo);
- TSKEY ekey = taosGetRevisedEndKey(etime, pQueryInfo->order.order, pQueryInfo->slidingTime, pQueryInfo->slidingTimeUnit, p);
-
// the first column must be the timestamp column
- int32_t rows = taosGetNumOfResultWithFill(pFillInfo, remain, ekey, pLocalReducer->resColModel->capacity);
- if (rows > 0) { // do interpo
+ int32_t rows = getFilledNumOfRes(pFillInfo, etime, pLocalReducer->resColModel->capacity);
+ if (rows > 0) { // do fill gap
doFillResult(pSql, pLocalReducer, false);
}
@@ -1362,10 +1347,7 @@ static bool doHandleLastRemainData(SSqlObj *pSql) {
bool prevGroupCompleted = (!pLocalReducer->discard) && pLocalReducer->hasUnprocessedRow;
- SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
- STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
-
- STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
+ SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
if ((isAllSourcesCompleted(pLocalReducer) && !pLocalReducer->hasPrevRow) || pLocalReducer->pLocalDataSrc[0] == NULL ||
prevGroupCompleted) {
@@ -1373,9 +1355,8 @@ static bool doHandleLastRemainData(SSqlObj *pSql) {
if (pQueryInfo->fillType != TSDB_FILL_NONE) {
int64_t etime = (pQueryInfo->window.skey < pQueryInfo->window.ekey) ? pQueryInfo->window.ekey : pQueryInfo->window.skey;
- etime = taosGetRevisedEndKey(etime, pQueryInfo->order.order, pQueryInfo->intervalTime,
- pQueryInfo->slidingTimeUnit, tinfo.precision);
- int32_t rows = taosGetNumOfResultWithFill(pFillInfo, 0, etime, pLocalReducer->resColModel->capacity);
+ assert(pFillInfo->numOfRows == 0);
+ int32_t rows = getFilledNumOfRes(pFillInfo, etime, pLocalReducer->resColModel->capacity);
if (rows > 0) { // do interpo
doFillResult(pSql, pLocalReducer, true);
}
diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c
index f96b979105..09a3f6767b 100644
--- a/src/client/src/tscServer.c
+++ b/src/client/src/tscServer.c
@@ -209,6 +209,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcIpSet *pIpSet) {
tscError("%p sql is already released", pSql->signature);
return;
}
+
if (pSql->signature != pSql) {
tscError("%p sql is already released, signature:%p", pSql, pSql->signature);
return;
@@ -217,10 +218,9 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcIpSet *pIpSet) {
SSqlRes *pRes = &pSql->res;
SSqlCmd *pCmd = &pSql->cmd;
STscObj *pObj = pSql->pTscObj;
- // tscTrace("%p msg:%s is received from server", pSql, taosMsg[rpcMsg->msgType]);
- if (pObj->signature != pObj) {
- tscTrace("%p sql is already released or DB connection is closed, freed:%d pObj:%p signature:%p", pSql, pSql->freed,
+ if (pObj->signature != pObj || pSql->freed == 1) {
+ tscTrace("%p sqlObj needs to be released or DB connection is closed, freed:%d pObj:%p signature:%p", pSql, pSql->freed,
pObj, pObj->signature);
tscFreeSqlObj(pSql);
rpcFreeCont(rpcMsg->pCont);
@@ -375,7 +375,7 @@ int tscProcessSql(SSqlObj *pSql) {
SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
STableMetaInfo *pTableMetaInfo = NULL;
- uint16_t type = 0;
+ uint32_t type = 0;
if (pQueryInfo != NULL) {
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
@@ -424,13 +424,13 @@ void tscKillSTableQuery(SSqlObj *pSql) {
* sub-queries not correctly released and master sql object of super table query reaches an abnormal state.
*/
pSql->pSubs[i]->res.code = TSDB_CODE_TSC_QUERY_CANCELLED;
- //taosStopRpcConn(pSql->pSubs[i]->thandle);
+// taosStopRpcConn(pSql->pSubs[i]->);
}
/*
* 1. if the subqueries are not launched or partially launched, we need to waiting the launched
* query return to successfully free allocated resources.
- * 2. if no any subqueries are launched yet, which means the metric query only in parse sql stage,
+ * 2. if no any subqueries are launched yet, which means the super table query only in parse sql stage,
* set the res.code, and return.
*/
const int64_t MAX_WAITING_TIME = 10000; // 10 Sec.
@@ -866,7 +866,7 @@ int32_t tscBuildCreateDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
assert(pCmd->numOfClause == 1);
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
- strncpy(pCreateDbMsg->db, pTableMetaInfo->name, tListLen(pCreateDbMsg->db));
+ tstrncpy(pCreateDbMsg->db, pTableMetaInfo->name, sizeof(pCreateDbMsg->db));
return TSDB_CODE_SUCCESS;
}
@@ -1018,7 +1018,7 @@ int32_t tscBuildDropDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SCMDropDnodeMsg *pDrop = (SCMDropDnodeMsg *)pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
- strcpy(pDrop->ep, pTableMetaInfo->name);
+ tstrncpy(pDrop->ep, pTableMetaInfo->name, sizeof(pDrop->ep));
pCmd->msgType = TSDB_MSG_TYPE_CM_DROP_DNODE;
return TSDB_CODE_SUCCESS;
@@ -1036,7 +1036,7 @@ int32_t tscBuildDropUserMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SCMDropUserMsg *pDropMsg = (SCMDropUserMsg*)pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
- strcpy(pDropMsg->user, pTableMetaInfo->name);
+ tstrncpy(pDropMsg->user, pTableMetaInfo->name, sizeof(pDropMsg->user));
return TSDB_CODE_SUCCESS;
}
@@ -1091,9 +1091,9 @@ int32_t tscBuildShowMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
size_t nameLen = strlen(pTableMetaInfo->name);
if (nameLen > 0) {
- strcpy(pShowMsg->db, pTableMetaInfo->name); // prefix is set here
+ tstrncpy(pShowMsg->db, pTableMetaInfo->name, sizeof(pShowMsg->db)); // prefix is set here
} else {
- strcpy(pShowMsg->db, pObj->db);
+ tstrncpy(pShowMsg->db, pObj->db, sizeof(pShowMsg->db));
}
SShowInfo *pShowInfo = &pInfo->pDCLInfo->showOpt;
@@ -1289,7 +1289,10 @@ int tscBuildUpdateTagMsg(SSqlObj* pSql, SSqlInfo *pInfo) {
SUpdateTableTagValMsg* pUpdateMsg = (SUpdateTableTagValMsg*) (pCmd->payload + tsRpcHeadSize);
pCmd->payloadLen = htonl(pUpdateMsg->head.contLen);
-
+ SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
+ STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
+ tscSetDnodeIpList(pSql, &pTableMetaInfo->pTableMeta->vgroupInfo);
+
return TSDB_CODE_SUCCESS;
}
@@ -1300,7 +1303,7 @@ int tscAlterDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SCMAlterDbMsg *pAlterDbMsg = (SCMAlterDbMsg*)pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
- strcpy(pAlterDbMsg->db, pTableMetaInfo->name);
+ tstrncpy(pAlterDbMsg->db, pTableMetaInfo->name, sizeof(pAlterDbMsg->db));
return TSDB_CODE_SUCCESS;
}
@@ -1430,9 +1433,9 @@ int tscBuildConnectMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
char *db; // ugly code to move the space
db = strstr(pObj->db, TS_PATH_DELIMITER);
db = (db == NULL) ? pObj->db : db + 1;
- strcpy(pConnect->db, db);
- strcpy(pConnect->clientVersion, version);
- strcpy(pConnect->msgVersion, "");
+ tstrncpy(pConnect->db, db, sizeof(pConnect->db));
+ tstrncpy(pConnect->clientVersion, version, sizeof(pConnect->clientVersion));
+ tstrncpy(pConnect->msgVersion, "", sizeof(pConnect->msgVersion));
return TSDB_CODE_SUCCESS;
}
@@ -1705,8 +1708,9 @@ int tscBuildSTableVgroupMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
for(int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, i);
- strncpy(pMsg, pTableMetaInfo->name, TSDB_TABLE_ID_LEN);
- pMsg += TSDB_TABLE_ID_LEN;
+ size_t size = sizeof(pTableMetaInfo->name);
+ tstrncpy(pMsg, pTableMetaInfo->name, size);
+ pMsg += size;
}
pCmd->msgType = TSDB_MSG_TYPE_CM_STABLE_VGROUP;
@@ -2150,11 +2154,11 @@ int tscProcessConnectRsp(SSqlObj *pSql) {
SSqlRes *pRes = &pSql->res;
SCMConnectRsp *pConnect = (SCMConnectRsp *)pRes->pRsp;
- strcpy(pObj->acctId, pConnect->acctId); // copy acctId from response
+ tstrncpy(pObj->acctId, pConnect->acctId, sizeof(pObj->acctId)); // copy acctId from response
int32_t len = sprintf(temp, "%s%s%s", pObj->acctId, TS_PATH_DELIMITER, pObj->db);
- assert(len <= tListLen(pObj->db));
- strncpy(pObj->db, temp, tListLen(pObj->db));
+ assert(len <= sizeof(pObj->db));
+ tstrncpy(pObj->db, temp, sizeof(pObj->db));
if (pConnect->ipList.numOfIps > 0)
tscSetMgmtIpList(&pConnect->ipList);
@@ -2172,7 +2176,7 @@ int tscProcessUseDbRsp(SSqlObj *pSql) {
STscObj * pObj = pSql->pTscObj;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
- strcpy(pObj->db, pTableMetaInfo->name);
+ tstrncpy(pObj->db, pTableMetaInfo->name, sizeof(pObj->db));
return 0;
}
@@ -2197,7 +2201,7 @@ int tscProcessDropTableRsp(SSqlObj *pSql) {
* The cached information is expired, however, we may have lost the ref of original meter. So, clear whole cache
* instead.
*/
- tscTrace("%p force release metermeta after drop table:%s", pSql, pTableMetaInfo->name);
+ tscTrace("%p force release table meta after drop table:%s", pSql, pTableMetaInfo->name);
taosCacheRelease(tscCacheHandle, (void **)&pTableMeta, true);
if (pTableMetaInfo->pTableMeta) {
@@ -2220,9 +2224,7 @@ int tscProcessAlterTableMsgRsp(SSqlObj *pSql) {
if (pTableMetaInfo->pTableMeta) {
bool isSuperTable = UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo);
-
taosCacheRelease(tscCacheHandle, (void **)&(pTableMetaInfo->pTableMeta), true);
-// taosCacheRelease(tscCacheHandle, (void **)&(pTableMetaInfo->pMetricMeta), true);
if (isSuperTable) { // if it is a super table, reset whole query cache
tscTrace("%p reset query cache since table:%s is stable", pSql, pTableMetaInfo->name);
@@ -2338,7 +2340,7 @@ static int32_t getTableMetaFromMgmt(SSqlObj *pSql, STableMetaInfo *pTableMetaInf
STableMetaInfo *pNewMeterMetaInfo = tscAddEmptyMetaInfo(pNewQueryInfo);
assert(pNew->cmd.numOfClause == 1 && pNewQueryInfo->numOfTables == 1);
- strncpy(pNewMeterMetaInfo->name, pTableMetaInfo->name, tListLen(pNewMeterMetaInfo->name));
+ tstrncpy(pNewMeterMetaInfo->name, pTableMetaInfo->name, sizeof(pNewMeterMetaInfo->name));
memcpy(pNew->cmd.payload, pSql->cmd.payload, pSql->cmd.payloadLen); // tag information if table does not exists.
pNew->cmd.payloadLen = pSql->cmd.payloadLen;
tscTrace("%p new pSqlObj:%p to get tableMeta, auto create:%d", pSql, pNew, pNew->cmd.autoCreated);
diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c
index aab931cf93..6f043f186a 100644
--- a/src/client/src/tscSql.c
+++ b/src/client/src/tscSql.c
@@ -45,11 +45,11 @@ static bool validImpl(const char* str, size_t maxsize) {
}
static bool validUserName(const char* user) {
- return validImpl(user, TSDB_USER_LEN);
+ return validImpl(user, TSDB_USER_LEN - 1);
}
static bool validPassword(const char* passwd) {
- return validImpl(passwd, TSDB_PASSWORD_LEN);
+ return validImpl(passwd, TSDB_PASSWORD_LEN - 1);
}
SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pass, const char *db, uint16_t port,
@@ -86,21 +86,21 @@ SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pass, con
pObj->signature = pObj;
- strncpy(pObj->user, user, TSDB_USER_LEN);
+ tstrncpy(pObj->user, user, sizeof(pObj->user));
taosEncryptPass((uint8_t *)pass, strlen(pass), pObj->pass);
if (db) {
int32_t len = strlen(db);
/* db name is too long */
- if (len > TSDB_DB_NAME_LEN) {
+ if (len >= TSDB_DB_NAME_LEN) {
terrno = TSDB_CODE_TSC_INVALID_DB_LENGTH;
rpcClose(pDnodeConn);
free(pObj);
return NULL;
}
- char tmp[TSDB_DB_NAME_LEN + 1] = {0};
- strcpy(tmp, db);
+ char tmp[TSDB_DB_NAME_LEN] = {0};
+ tstrncpy(tmp, db, sizeof(tmp));
strdequote(tmp);
strtolower(pObj->db, tmp);
@@ -219,6 +219,11 @@ void waitForQueryRsp(void *param, TAOS_RES *tres, int code) {
sem_post(&pSql->rspSem);
}
+static void waitForRetrieveRsp(void *param, TAOS_RES *tres, int numOfRows) {
+ SSqlObj* pSql = (SSqlObj*) tres;
+ sem_post(&pSql->rspSem);
+}
+
TAOS_RES* taos_query(TAOS *taos, const char *sqlstr) {
STscObj *pObj = (STscObj *)taos;
if (pObj == NULL || pObj->signature != pObj) {
@@ -369,11 +374,6 @@ int taos_fetch_block_impl(TAOS_RES *res, TAOS_ROW *rows) {
return (pQueryInfo->order.order == TSDB_ORDER_DESC) ? pRes->numOfRows : -pRes->numOfRows;
}
-static void waitForRetrieveRsp(void *param, TAOS_RES *tres, int numOfRows) {
- SSqlObj* pSql = (SSqlObj*) tres;
- sem_post(&pSql->rspSem);
-}
-
TAOS_ROW taos_fetch_row(TAOS_RES *res) {
SSqlObj *pSql = (SSqlObj *)res;
if (pSql == NULL || pSql->signature != pSql) {
@@ -475,6 +475,42 @@ int taos_select_db(TAOS *taos, const char *db) {
return code;
}
+// send free message to vnode to free qhandle and corresponding resources in vnode
+static bool tscFreeQhandleInVnode(SSqlObj* pSql) {
+ SSqlCmd* pCmd = &pSql->cmd;
+ SSqlRes* pRes = &pSql->res;
+
+ SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
+ STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
+
+ if (pRes->code == TSDB_CODE_SUCCESS && pRes->completed == false && !tscIsTwoStageSTableQuery(pQueryInfo, 0) &&
+ (pCmd->command == TSDB_SQL_SELECT ||
+ pCmd->command == TSDB_SQL_SHOW ||
+ pCmd->command == TSDB_SQL_RETRIEVE ||
+ pCmd->command == TSDB_SQL_FETCH) &&
+ (pCmd->command == TSDB_SQL_SELECT && pSql->pStream == NULL && pTableMetaInfo->pTableMeta != NULL)) {
+
+ pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH;
+ tscTrace("%p start to send msg to free qhandle in dnode, command:%s", pSql, sqlCmd[pCmd->command]);
+ pSql->freed = 1;
+ tscProcessSql(pSql);
+
+ // in case of sync model query, waits for response and then goes on
+// if (pSql->fp == waitForQueryRsp || pSql->fp == waitForRetrieveRsp) {
+// sem_wait(&pSql->rspSem);
+
+// tscFreeSqlObj(pSql);
+// tscTrace("%p sqlObj is freed by app", pSql);
+// } else {
+ tscTrace("%p sqlObj will be freed while rsp received", pSql);
+// }
+
+ return true;
+ }
+
+ return false;
+}
+
void taos_free_result(TAOS_RES *res) {
SSqlObj *pSql = (SSqlObj *)res;
tscTrace("%p start to free result", res);
@@ -484,10 +520,8 @@ void taos_free_result(TAOS_RES *res) {
return;
}
- SSqlRes *pRes = &pSql->res;
- SSqlCmd *pCmd = &pSql->cmd;
-
// The semaphore can not be changed while freeing async sub query objects.
+ SSqlRes *pRes = &pSql->res;
if (pRes == NULL || pRes->qhandle == 0) {
tscTrace("%p SqlObj is freed by app, qhandle is null", pSql);
tscFreeSqlObj(pSql);
@@ -502,31 +536,10 @@ void taos_free_result(TAOS_RES *res) {
}
pQueryInfo->type = TSDB_QUERY_TYPE_FREE_RESOURCE;
- STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
-
- /*
- * If the query process is cancelled by user in stable query, tscProcessSql should not be called
- * for each subquery. Because the failure of execution tsProcessSql may trigger the callback function
- * be executed, and the retry efforts may result in double free the resources, e.g.,SRetrieveSupport
- */
- if (pRes->code == TSDB_CODE_SUCCESS && pRes->completed == false &&
- (pCmd->command == TSDB_SQL_SELECT || pCmd->command == TSDB_SQL_SHOW ||
- pCmd->command == TSDB_SQL_RETRIEVE || pCmd->command == TSDB_SQL_FETCH) &&
- (pCmd->command == TSDB_SQL_SELECT && pSql->pStream == NULL && pTableMetaInfo->pTableMeta != NULL)) {
- pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH;
-
- tscTrace("%p start to send msg to free qhandle in dnode, command:%s", pSql, sqlCmd[pCmd->command]);
- pSql->freed = 1;
- tscProcessSql(pSql);
-
- // in case of sync model query, waits for response and then goes on
- if (pSql->fp == waitForQueryRsp || pSql->fp == waitForRetrieveRsp) {
- sem_wait(&pSql->rspSem);
- }
+ if (!tscFreeQhandleInVnode(pSql)) {
+ tscFreeSqlObj(pSql);
+ tscTrace("%p sqlObj is freed by app", pSql);
}
-
- tscFreeSqlObj(pSql);
- tscTrace("%p sql result is freed by app", pSql);
}
// todo should not be used in async query
diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c
index 84daf63b42..3bc931a855 100644
--- a/src/client/src/tscSubquery.c
+++ b/src/client/src/tscSubquery.c
@@ -1662,12 +1662,13 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
*/
if (code != TSDB_CODE_SUCCESS) {
if (trsupport->numOfRetry++ >= MAX_NUM_OF_SUBQUERY_RETRY) {
- tscTrace("%p sub:%p reach the max retry times, set global code:%d", pParentSql, pSql, code);
+ tscTrace("%p sub:%p reach the max retry times, set global code:%s", pParentSql, pSql, tstrerror(code));
atomic_val_compare_exchange_32(&pState->code, 0, code);
} else { // does not reach the maximum retry time, go on
tscTrace("%p sub:%p failed code:%s, retry:%d", pParentSql, pSql, tstrerror(code), trsupport->numOfRetry);
SSqlObj *pNew = tscCreateSqlObjForSubquery(pParentSql, trsupport, pSql);
+
if (pNew == NULL) {
tscError("%p sub:%p failed to create new subquery due to out of memory, abort retry, vgId:%d, orderOfSub:%d",
trsupport->pParentSqlObj, pSql, pVgroup->vgId, trsupport->subqueryIndex);
@@ -1677,7 +1678,8 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
} else {
SQueryInfo *pNewQueryInfo = tscGetQueryInfoDetail(&pNew->cmd, 0);
assert(pNewQueryInfo->pTableMetaInfo[0]->pTableMeta != NULL);
-
+
+ taos_free_result(pSql);
tscProcessSql(pNew);
return;
}
@@ -1921,7 +1923,7 @@ static char *getArithemicInputSrc(void *param, const char *name, int32_t colId)
for (int32_t i = 0; i < pSupport->numOfCols; ++i) {
pExpr = taosArrayGetP(pSupport->exprList, i);
- if (strncmp(name, pExpr->aliasName, TSDB_COL_NAME_LEN) == 0) {
+ if (strncmp(name, pExpr->aliasName, sizeof(pExpr->aliasName) - 1) == 0) {
index = i;
break;
}
diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c
index fd7d31aa9f..bcd01a322e 100644
--- a/src/client/src/tscSystem.c
+++ b/src/client/src/tscSystem.c
@@ -144,11 +144,11 @@ void taos_init_imp() {
}
int64_t refreshTime = tsTableMetaKeepTimer;
- refreshTime = refreshTime > 2 ? 2 : refreshTime;
- refreshTime = refreshTime < 1 ? 1 : refreshTime;
+ refreshTime = refreshTime > 10 ? 10 : refreshTime;
+ refreshTime = refreshTime < 10 ? 10 : refreshTime;
if (tscCacheHandle == NULL) {
- tscCacheHandle = taosCacheInit(tscTmr, refreshTime);
+ tscCacheHandle = taosCacheInit(refreshTime);
}
tscTrace("client is initialized successfully");
@@ -180,7 +180,7 @@ static int taos_options_imp(TSDB_OPTION option, const char *pStr) {
assert(cfg != NULL);
if (cfg->cfgStatus <= TAOS_CFG_CSTATUS_OPTION) {
- strncpy(configDir, pStr, TSDB_FILENAME_LEN);
+ tstrncpy(configDir, pStr, TSDB_FILENAME_LEN);
cfg->cfgStatus = TAOS_CFG_CSTATUS_OPTION;
tscPrint("set config file directory:%s", pStr);
} else {
@@ -234,7 +234,7 @@ static int taos_options_imp(TSDB_OPTION option, const char *pStr) {
tscPrint("failed to set locale:%s, current locale:%s", pStr, tsLocale);
}
- strncpy(tsLocale, locale, tListLen(tsLocale));
+ tstrncpy(tsLocale, locale, sizeof(tsLocale));
char *charset = strrchr(tsLocale, sep);
if (charset != NULL) {
@@ -249,7 +249,7 @@ static int taos_options_imp(TSDB_OPTION option, const char *pStr) {
tscPrint("charset changed from %s to %s", tsCharset, charset);
}
- strncpy(tsCharset, charset, tListLen(tsCharset));
+ tstrncpy(tsCharset, charset, sizeof(tsCharset));
cfg->cfgStatus = TAOS_CFG_CSTATUS_OPTION;
} else {
@@ -286,7 +286,7 @@ static int taos_options_imp(TSDB_OPTION option, const char *pStr) {
tscPrint("charset changed from %s to %s", tsCharset, pStr);
}
- strncpy(tsCharset, pStr, tListLen(tsCharset));
+ tstrncpy(tsCharset, pStr, sizeof(tsCharset));
cfg->cfgStatus = TAOS_CFG_CSTATUS_OPTION;
} else {
tscPrint("charset:%s not valid", pStr);
@@ -324,7 +324,7 @@ static int taos_options_imp(TSDB_OPTION option, const char *pStr) {
// return -1;
// }
- strncpy(tsSocketType, pStr, tListLen(tsSocketType));
+ tstrncpy(tsSocketType, pStr, sizeof(tsSocketType));
cfg->cfgStatus = TAOS_CFG_CSTATUS_OPTION;
tscPrint("socket type is set:%s", tsSocketType);
}
diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c
index caf424080d..57634e73fd 100644
--- a/src/client/src/tscUtil.c
+++ b/src/client/src/tscUtil.c
@@ -134,24 +134,6 @@ void tscGetDBInfoFromMeterId(char* tableId, char* db) {
db[0] = 0;
}
-//STableIdInfo* tscGetMeterSidInfo(SVnodeSidList* pSidList, int32_t idx) {
-// if (pSidList == NULL) {
-// tscError("illegal sidlist");
-// return 0;
-// }
-//
-// if (idx < 0 || idx >= pSidList->numOfSids) {
-// int32_t sidRange = (pSidList->numOfSids > 0) ? (pSidList->numOfSids - 1) : 0;
-//
-// tscError("illegal sidIdx:%d, reset to 0, sidIdx range:%d-%d", idx, 0, sidRange);
-// idx = 0;
-// }
-//
-// assert(pSidList->pSidExtInfoList[idx] >= 0);
-//
-// return (STableIdInfo*)(pSidList->pSidExtInfoList[idx] + (char*)pSidList);
-//}
-
bool tscIsTwoStageSTableQuery(SQueryInfo* pQueryInfo, int32_t tableIndex) {
if (pQueryInfo == NULL) {
return false;
@@ -176,8 +158,7 @@ bool tscIsTwoStageSTableQuery(SQueryInfo* pQueryInfo, int32_t tableIndex) {
return false;
}
- if (((pQueryInfo->type & TSDB_QUERY_TYPE_STABLE_SUBQUERY) != TSDB_QUERY_TYPE_STABLE_SUBQUERY) &&
- pQueryInfo->command == TSDB_SQL_SELECT) {
+ if (!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_STABLE_SUBQUERY) && pQueryInfo->command == TSDB_SQL_SELECT) {
return UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo);
}
@@ -593,7 +574,7 @@ int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOff
dataBuf->size = startOffset;
dataBuf->tsSource = -1;
- strncpy(dataBuf->tableId, name, TSDB_TABLE_ID_LEN);
+ tstrncpy(dataBuf->tableId, name, sizeof(dataBuf->tableId));
/*
* The table meta may be released since the table meta cache are completed clean by other thread
@@ -801,7 +782,7 @@ int tscAllocPayload(SSqlCmd* pCmd, int size) {
TAOS_FIELD tscCreateField(int8_t type, const char* name, int16_t bytes) {
TAOS_FIELD f = { .type = type, .bytes = bytes, };
- strncpy(f.name, name, TSDB_COL_NAME_LEN);
+ tstrncpy(f.name, name, sizeof(f.name));
return f;
}
@@ -966,12 +947,12 @@ static SSqlExpr* doBuildSqlExpr(SQueryInfo* pQueryInfo, int16_t functionId, SCol
if (isTagCol) {
SSchema* pSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta);
pExpr->colInfo.colId = pSchema[pColIndex->columnIndex].colId;
- strncpy(pExpr->colInfo.name, pSchema[pColIndex->columnIndex].name, TSDB_COL_NAME_LEN);
+ tstrncpy(pExpr->colInfo.name, pSchema[pColIndex->columnIndex].name, sizeof(pExpr->colInfo.name));
} else if (pTableMetaInfo->pTableMeta != NULL) {
// in handling select database/version/server_status(), the pTableMeta is NULL
SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, pColIndex->columnIndex);
pExpr->colInfo.colId = pSchema->colId;
- strncpy(pExpr->colInfo.name, pSchema->name, TSDB_COL_NAME_LEN);
+ tstrncpy(pExpr->colInfo.name, pSchema->name, sizeof(pExpr->colInfo.name));
}
}
@@ -1666,7 +1647,7 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, const char* name, ST
assert(pTableMetaInfo != NULL);
if (name != NULL) {
- strncpy(pTableMetaInfo->name, name, TSDB_TABLE_ID_LEN);
+ tstrncpy(pTableMetaInfo->name, name, sizeof(pTableMetaInfo->name));
}
pTableMetaInfo->pTableMeta = pTableMeta;
diff --git a/src/common/inc/tdataformat.h b/src/common/inc/tdataformat.h
index ec4e544e18..ea0eb9ff29 100644
--- a/src/common/inc/tdataformat.h
+++ b/src/common/inc/tdataformat.h
@@ -31,7 +31,7 @@ extern "C" {
do { \
VarDataLenT __len = strlen(str); \
*(VarDataLenT *)(x) = __len; \
- strncpy(varDataVal(x), (str), __len); \
+ memcpy(varDataVal(x), (str), __len); \
} while (0);
#define STR_WITH_MAXSIZE_TO_VARSTR(x, str, _maxs) \
@@ -43,7 +43,7 @@ extern "C" {
#define STR_WITH_SIZE_TO_VARSTR(x, str, _size) \
do { \
*(VarDataLenT *)(x) = (_size); \
- strncpy(varDataVal(x), (str), (_size)); \
+ memcpy(varDataVal(x), (str), (_size)); \
} while (0);
// ----------------- TSDB COLUMN DEFINITION
@@ -69,7 +69,8 @@ typedef struct {
int version; // version
int numOfCols; // Number of columns appended
int tlen; // maximum length of a SDataRow without the header part
- int flen; // First part length in a SDataRow after the header part
+ uint16_t flen; // First part length in a SDataRow after the header part
+ uint16_t vlen; // pure value part length, excluded the overhead
STColumn columns[];
} STSchema;
@@ -77,6 +78,7 @@ typedef struct {
#define schemaVersion(s) ((s)->version)
#define schemaTLen(s) ((s)->tlen)
#define schemaFLen(s) ((s)->flen)
+#define schemaVLen(s) ((s)->vlen)
#define schemaColAt(s, i) ((s)->columns + i)
#define tdFreeSchema(s) tfree((s))
@@ -105,7 +107,8 @@ typedef struct {
int tCols;
int nCols;
int tlen;
- int flen;
+ uint16_t flen;
+ uint16_t vlen;
int version;
STColumn *columns;
} STSchemaBuilder;
@@ -122,16 +125,16 @@ STSchema *tdGetSchemaFromBuilder(STSchemaBuilder *pBuilder);
* |<--------------------+--------------------------- len ---------------------------------->|
* |<-- Head -->|<--------- flen -------------->| |
* +---------------------+---------------------------------+---------------------------------+
- * | int16_t | int16_t | | |
+ * | uint16_t | int16_t | | |
* +----------+----------+---------------------------------+---------------------------------+
* | len | sversion | First part | Second part |
* +----------+----------+---------------------------------+---------------------------------+
*/
typedef void *SDataRow;
-#define TD_DATA_ROW_HEAD_SIZE sizeof(int16_t)*2
+#define TD_DATA_ROW_HEAD_SIZE (sizeof(uint16_t) + sizeof(int16_t))
-#define dataRowLen(r) (*(int16_t *)(r))
+#define dataRowLen(r) (*(uint16_t *)(r))
#define dataRowVersion(r) *(int16_t *)POINTER_SHIFT(r, sizeof(int16_t))
#define dataRowTuple(r) POINTER_SHIFT(r, TD_DATA_ROW_HEAD_SIZE)
#define dataRowKey(r) (*(TSKEY *)(dataRowTuple(r)))
diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h
index 39144c3083..eaceea468b 100644
--- a/src/common/inc/tglobal.h
+++ b/src/common/inc/tglobal.h
@@ -53,6 +53,7 @@ extern int64_t tsMsPerDay[3];
extern char tsFirst[];
extern char tsSecond[];
+extern char tsLocalFqdn[];
extern char tsLocalEp[];
extern uint16_t tsServerPort;
extern uint16_t tsDnodeShellPort;
@@ -169,6 +170,7 @@ extern char gitinfo[];
extern char gitinfoOfInternal[];
extern char buildinfo[];
+extern int8_t tsDaylight;
extern char tsTimezone[64];
extern char tsLocale[64];
extern char tsCharset[64]; // default encode string
diff --git a/src/common/inc/tname.h b/src/common/inc/tname.h
index 810d7f492c..d2008c9ff8 100644
--- a/src/common/inc/tname.h
+++ b/src/common/inc/tname.h
@@ -23,5 +23,8 @@ void extractTableName(const char *tableId, char *name);
char* extractDBName(const char *tableId, char *name);
+SSchema tGetTableNameColumnSchema();
+
+bool tscValidateTableNameLength(size_t len);
#endif // TDENGINE_NAME_H
diff --git a/src/common/src/tdataformat.c b/src/common/src/tdataformat.c
index 7880a4b302..77e91acc14 100644
--- a/src/common/src/tdataformat.c
+++ b/src/common/src/tdataformat.c
@@ -100,6 +100,7 @@ void tdResetTSchemaBuilder(STSchemaBuilder *pBuilder, int32_t version) {
pBuilder->nCols = 0;
pBuilder->tlen = 0;
pBuilder->flen = 0;
+ pBuilder->vlen = 0;
pBuilder->version = version;
}
@@ -124,10 +125,12 @@ int tdAddColToSchema(STSchemaBuilder *pBuilder, int8_t type, int16_t colId, int3
if (IS_VAR_DATA_TYPE(type)) {
colSetBytes(pCol, bytes);
- pBuilder->tlen += (TYPE_BYTES[type] + sizeof(VarDataLenT) + bytes);
+ pBuilder->tlen += (TYPE_BYTES[type] + bytes);
+ pBuilder->vlen += bytes - sizeof(VarDataLenT);
} else {
colSetBytes(pCol, TYPE_BYTES[type]);
pBuilder->tlen += TYPE_BYTES[type];
+ pBuilder->vlen += TYPE_BYTES[type];
}
pBuilder->nCols++;
@@ -150,6 +153,7 @@ STSchema *tdGetSchemaFromBuilder(STSchemaBuilder *pBuilder) {
schemaNCols(pSchema) = pBuilder->nCols;
schemaTLen(pSchema) = pBuilder->tlen;
schemaFLen(pSchema) = pBuilder->flen;
+ schemaVLen(pSchema) = pBuilder->vlen;
memcpy(schemaColAt(pSchema, 0), pBuilder->columns, sizeof(STColumn) * pBuilder->nCols);
@@ -197,10 +201,10 @@ void dataColInit(SDataCol *pDataCol, STColumn *pCol, void **pBuf, int maxPoints)
pDataCol->len = 0;
if (pDataCol->type == TSDB_DATA_TYPE_BINARY || pDataCol->type == TSDB_DATA_TYPE_NCHAR) {
- pDataCol->spaceSize = (sizeof(VarDataLenT) + pDataCol->bytes) * maxPoints;
pDataCol->dataOff = (VarDataOffsetT *)(*pBuf);
- pDataCol->pData = POINTER_SHIFT(*pBuf, TYPE_BYTES[pDataCol->type] * maxPoints);
- *pBuf = POINTER_SHIFT(*pBuf, pDataCol->spaceSize + TYPE_BYTES[pDataCol->type] * maxPoints);
+ pDataCol->pData = POINTER_SHIFT(*pBuf, sizeof(VarDataOffsetT) * maxPoints);
+ pDataCol->spaceSize = pDataCol->bytes * maxPoints;
+ *pBuf = POINTER_SHIFT(*pBuf, pDataCol->spaceSize + sizeof(VarDataOffsetT) * maxPoints);
} else {
pDataCol->spaceSize = pDataCol->bytes * maxPoints;
pDataCol->dataOff = NULL;
@@ -269,8 +273,7 @@ void dataColSetNullAt(SDataCol *pCol, int index) {
if (IS_VAR_DATA_TYPE(pCol->type)) {
pCol->dataOff[index] = pCol->len;
char *ptr = POINTER_SHIFT(pCol->pData, pCol->len);
- varDataLen(ptr) = (pCol->type == TSDB_DATA_TYPE_BINARY) ? sizeof(char) : TSDB_NCHAR_SIZE;
- setNull(varDataVal(ptr), pCol->type, pCol->bytes);
+ setVardataNull(ptr, pCol->type);
pCol->len += varDataTLen(ptr);
} else {
setNull(POINTER_SHIFT(pCol->pData, TYPE_BYTES[pCol->type] * index), pCol->type, pCol->bytes);
diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c
index e010f7fe56..e80597ca95 100644
--- a/src/common/src/tglobal.c
+++ b/src/common/src/tglobal.c
@@ -65,6 +65,7 @@ int64_t tsMsPerDay[] = {86400000L, 86400000000L, 86400000000000L};
char tsFirst[TSDB_EP_LEN] = {0};
char tsSecond[TSDB_EP_LEN] = {0};
char tsArbitrator[TSDB_EP_LEN] = {0};
+char tsLocalFqdn[TSDB_FQDN_LEN] = {0};
char tsLocalEp[TSDB_EP_LEN] = {0}; // Local End Point, hostname:port
uint16_t tsServerPort = 6030;
uint16_t tsDnodeShellPort = 6030; // udp[6035-6039] tcp[6035]
@@ -197,6 +198,7 @@ char tsMonitorDbName[TSDB_DB_NAME_LEN] = "log";
char tsInternalPass[] = "secretkey";
int32_t tsMonitorInterval = 30; // seconds
+int8_t tsDaylight = 0;
char tsTimezone[64] = {0};
char tsLocale[TSDB_LOCALE_LEN] = {0};
char tsCharset[TSDB_LOCALE_LEN] = {0}; // default encode string
@@ -305,6 +307,16 @@ static void doInitGlobalConfig() {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
+ cfg.option = "fqdn";
+ cfg.ptr = tsLocalFqdn;
+ cfg.valType = TAOS_CFG_VTYPE_STRING;
+ cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT;
+ cfg.minValue = 0;
+ cfg.maxValue = 0;
+ cfg.ptrLength = TSDB_FQDN_LEN;
+ cfg.unitType = TAOS_CFG_UTYPE_NONE;
+ taosInitConfigOption(cfg);
+
// port
cfg.option = "serverPort";
cfg.ptr = &tsServerPort;
@@ -708,7 +720,7 @@ static void doInitGlobalConfig() {
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT;
cfg.minValue = 0;
cfg.maxValue = 0;
- cfg.ptrLength = TSDB_DB_NAME_LEN;
+ cfg.ptrLength = TSDB_DB_NAME_LEN - 1;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
@@ -718,7 +730,7 @@ static void doInitGlobalConfig() {
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT;
cfg.minValue = 0;
cfg.maxValue = 0;
- cfg.ptrLength = TSDB_USER_LEN;
+ cfg.ptrLength = TSDB_USER_LEN - 1;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
@@ -728,7 +740,7 @@ static void doInitGlobalConfig() {
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_NOT_PRINT;
cfg.minValue = 0;
cfg.maxValue = 0;
- cfg.ptrLength = TSDB_PASSWORD_LEN;
+ cfg.ptrLength = TSDB_PASSWORD_LEN - 1;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
@@ -923,7 +935,7 @@ static void doInitGlobalConfig() {
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = 0;
cfg.maxValue = 0;
- cfg.ptrLength = TSDB_DB_NAME_LEN;
+ cfg.ptrLength = TSDB_DB_NAME_LEN - 1;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
@@ -1251,9 +1263,14 @@ bool taosCheckGlobalCfg() {
taosSetAllDebugFlag();
}
- taosGetFqdn(tsLocalEp);
- sprintf(tsLocalEp + strlen(tsLocalEp), ":%d", tsServerPort);
- uPrint("localEp is %s", tsLocalEp);
+ if (tsLocalFqdn[0] == 0) {
+ taosGetFqdn(tsLocalFqdn);
+ }
+
+ strcpy(tsLocalEp, tsLocalFqdn);
+
+ snprintf(tsLocalEp + strlen(tsLocalEp), sizeof(tsLocalEp), ":%d", tsServerPort);
+ uPrint("localEp is: %s", tsLocalEp);
if (tsFirst[0] == 0) {
strcpy(tsFirst, tsLocalEp);
diff --git a/src/common/src/tname.c b/src/common/src/tname.c
index 3566f26abd..2514ed26e5 100644
--- a/src/common/src/tname.c
+++ b/src/common/src/tname.c
@@ -28,8 +28,7 @@ void extractTableName(const char* tableId, char* name) {
size_t s1 = strcspn(tableId, &TS_PATH_DELIMITER[0]);
size_t s2 = strcspn(&tableId[s1 + 1], &TS_PATH_DELIMITER[0]);
- strncpy(name, &tableId[s1 + s2 + 2], TSDB_TABLE_NAME_LEN);
- name[TSDB_TABLE_NAME_LEN] = 0;
+ tstrncpy(name, &tableId[s1 + s2 + 2], TSDB_TABLE_NAME_LEN);
}
char* extractDBName(const char* tableId, char* name) {
@@ -38,3 +37,16 @@ char* extractDBName(const char* tableId, char* name) {
return strncpy(name, &tableId[offset1 + 1], len);
}
+
+SSchema tGetTableNameColumnSchema() {
+ SSchema s = {0};
+ s.bytes = TSDB_TABLE_NAME_LEN - 1 + VARSTR_HEADER_SIZE;
+ s.type = TSDB_DATA_TYPE_BINARY;
+ s.colId = TSDB_TBNAME_COLUMN_INDEX;
+ strncpy(s.name, TSQL_TBNAME_L, TSDB_COL_NAME_LEN);
+ return s;
+}
+
+bool tscValidateTableNameLength(size_t len) {
+ return len < TSDB_TABLE_NAME_LEN;
+}
\ No newline at end of file
diff --git a/src/common/src/ttimezone.c b/src/common/src/ttimezone.c
index 0e8e1316b3..ae6ffea59a 100644
--- a/src/common/src/ttimezone.c
+++ b/src/common/src/ttimezone.c
@@ -58,6 +58,7 @@ void tsSetTimeZone() {
* (BST, +0100)
*/
sprintf(tsTimezone, "(%s, %s%02d00)", tzname[daylight], tz >= 0 ? "+" : "-", abs(tz));
+ tsDaylight = daylight;
uPrint("timezone format changed to %s", tsTimezone);
}
diff --git a/src/connector/python/linux/python2/taos/cinterface.py b/src/connector/python/linux/python2/taos/cinterface.py
index 10df10b31f..c26e5c0967 100644
--- a/src/connector/python/linux/python2/taos/cinterface.py
+++ b/src/connector/python/linux/python2/taos/cinterface.py
@@ -130,9 +130,9 @@ _CONVERT_FUNC = {
# Corresponding TAOS_FIELD structure in C
class TaosField(ctypes.Structure):
- _fields_ = [('name', ctypes.c_char * 64),
- ('bytes', ctypes.c_short),
- ('type', ctypes.c_char)]
+ _fields_ = [('name', ctypes.c_char * 65),
+ ('type', ctypes.c_char),
+ ('bytes', ctypes.c_short)]
# C interface class
class CTaosInterface(object):
diff --git a/src/connector/python/linux/python3/taos/cinterface.py b/src/connector/python/linux/python3/taos/cinterface.py
index c3c8a4603a..c9d0551af5 100644
--- a/src/connector/python/linux/python3/taos/cinterface.py
+++ b/src/connector/python/linux/python3/taos/cinterface.py
@@ -130,9 +130,9 @@ _CONVERT_FUNC = {
# Corresponding TAOS_FIELD structure in C
class TaosField(ctypes.Structure):
- _fields_ = [('name', ctypes.c_char * 64),
- ('bytes', ctypes.c_short),
- ('type', ctypes.c_char)]
+ _fields_ = [('name', ctypes.c_char * 65),
+ ('type', ctypes.c_char),
+ ('bytes', ctypes.c_short)]
# C interface class
class CTaosInterface(object):
@@ -226,8 +226,8 @@ class CTaosInterface(object):
if connection.value == None:
print('connect to TDengine failed')
# sys.exit(1)
- else:
- print('connect to TDengine success')
+ #else:
+ # print('connect to TDengine success')
return connection
@@ -236,7 +236,7 @@ class CTaosInterface(object):
'''Close the TDengine handle
'''
CTaosInterface.libtaos.taos_close(connection)
- print('connection is closed')
+ #print('connection is closed')
@staticmethod
def query(connection, sql):
diff --git a/src/connector/python/windows/python2/taos/cinterface.py b/src/connector/python/windows/python2/taos/cinterface.py
index 06ade4fc35..6a9c5bfcef 100644
--- a/src/connector/python/windows/python2/taos/cinterface.py
+++ b/src/connector/python/windows/python2/taos/cinterface.py
@@ -130,9 +130,9 @@ _CONVERT_FUNC = {
# Corresponding TAOS_FIELD structure in C
class TaosField(ctypes.Structure):
- _fields_ = [('name', ctypes.c_char * 64),
- ('bytes', ctypes.c_short),
- ('type', ctypes.c_char)]
+ _fields_ = [('name', ctypes.c_char * 65),
+ ('type', ctypes.c_char),
+ ('bytes', ctypes.c_short)]
# C interface class
class CTaosInterface(object):
diff --git a/src/connector/python/windows/python3/taos/cinterface.py b/src/connector/python/windows/python3/taos/cinterface.py
index c6218fe9d4..fa7124431c 100644
--- a/src/connector/python/windows/python3/taos/cinterface.py
+++ b/src/connector/python/windows/python3/taos/cinterface.py
@@ -130,9 +130,9 @@ _CONVERT_FUNC = {
# Corresponding TAOS_FIELD structure in C
class TaosField(ctypes.Structure):
- _fields_ = [('name', ctypes.c_char * 64),
- ('bytes', ctypes.c_short),
- ('type', ctypes.c_char)]
+ _fields_ = [('name', ctypes.c_char * 65),
+ ('type', ctypes.c_char),
+ ('bytes', ctypes.c_short)]
# C interface class
class CTaosInterface(object):
diff --git a/src/cq/src/cqMain.c b/src/cq/src/cqMain.c
index edb588b554..13f3c0a2b0 100644
--- a/src/cq/src/cqMain.c
+++ b/src/cq/src/cqMain.c
@@ -73,8 +73,8 @@ void *cqOpen(void *ahandle, const SCqCfg *pCfg) {
return NULL;
}
- strcpy(pContext->user, pCfg->user);
- strcpy(pContext->pass, pCfg->pass);
+ tstrncpy(pContext->user, pCfg->user, sizeof(pContext->user));
+ tstrncpy(pContext->pass, pCfg->pass, sizeof(pContext->pass));
const char* db = pCfg->db;
for (const char* p = db; *p != 0; p++) {
if (*p == '.') {
@@ -82,7 +82,7 @@ void *cqOpen(void *ahandle, const SCqCfg *pCfg) {
break;
}
}
- strcpy(pContext->db, db);
+ tstrncpy(pContext->db, db, sizeof(pContext->db));
pContext->vgId = pCfg->vgId;
pContext->cqWrite = pCfg->cqWrite;
pContext->ahandle = ahandle;
@@ -215,7 +215,7 @@ void cqDrop(void *handle) {
cTrace("vgId:%d, id:%d CQ:%s is dropped", pContext->vgId, pObj->tid, pObj->sqlStr);
free(pObj);
- pthread_mutex_lock(&pContext->mutex);
+ pthread_mutex_unlock(&pContext->mutex);
}
static void cqCreateStream(SCqContext *pContext, SCqObj *pObj) {
diff --git a/src/dnode/src/dnodeMain.c b/src/dnode/src/dnodeMain.c
index 73bc2923b2..91f146dde4 100644
--- a/src/dnode/src/dnodeMain.c
+++ b/src/dnode/src/dnodeMain.c
@@ -61,12 +61,10 @@ static const SDnodeComponent tsDnodeComponents[] = {
};
static int dnodeCreateDir(const char *dir) {
- struct stat dirstat;
- if (stat(dir, &dirstat) < 0) {
- if (mkdir(dir, 0755) != 0 && errno != EEXIST) {
- return -1;
- }
+ if (mkdir(dir, 0755) != 0 && errno != EEXIST) {
+ return -1;
}
+
return 0;
}
diff --git a/src/dnode/src/dnodeMgmt.c b/src/dnode/src/dnodeMgmt.c
index 4b448837fe..d35e82fa47 100644
--- a/src/dnode/src/dnodeMgmt.c
+++ b/src/dnode/src/dnodeMgmt.c
@@ -177,10 +177,12 @@ void dnodeDispatchToMgmtQueue(SRpcMsg *pMsg) {
memcpy(item, pMsg, sizeof(SRpcMsg));
taosWriteQitem(tsMgmtQueue, 1, item);
} else {
- SRpcMsg rsp;
- rsp.handle = pMsg->handle;
- rsp.pCont = NULL;
- rsp.code = TSDB_CODE_DND_OUT_OF_MEMORY;
+ SRpcMsg rsp = {
+ .handle = pMsg->handle,
+ .pCont = NULL,
+ .code = TSDB_CODE_DND_OUT_OF_MEMORY
+ };
+
rpcSendResponse(&rsp);
rpcFreeCont(pMsg->pCont);
}
@@ -188,9 +190,9 @@ void dnodeDispatchToMgmtQueue(SRpcMsg *pMsg) {
static void *dnodeProcessMgmtQueue(void *param) {
SRpcMsg *pMsg;
- SRpcMsg rsp;
+ SRpcMsg rsp = {0};
int type;
- void *handle;
+ void * handle;
while (1) {
if (taosReadQitemFromQset(tsMgmtQset, &type, (void **) &pMsg, &handle) == 0) {
@@ -251,6 +253,7 @@ static int32_t dnodeOpenVnodes() {
if (status != TSDB_CODE_SUCCESS) {
dPrint("Get dnode list failed");
+ free(vnodeList);
return status;
}
@@ -290,6 +293,7 @@ static void dnodeCloseVnodes() {
if (status != TSDB_CODE_SUCCESS) {
dPrint("Get dnode list failed");
+ free(vnodeList);
return;
}
@@ -410,15 +414,35 @@ static void dnodeProcessStatusRsp(SRpcMsg *pMsg) {
dnodeProcessModuleStatus(pCfg->moduleStatus);
dnodeUpdateDnodeCfg(pCfg);
+
dnodeUpdateMnodeInfos(pMnodes);
taosTmrReset(dnodeSendStatusMsg, tsStatusInterval * 1000, NULL, tsDnodeTmr, &tsStatusTimer);
}
+static bool dnodeCheckMnodeInfos(SDMMnodeInfos *pMnodes) {
+ if (pMnodes->nodeNum <= 0 || pMnodes->nodeNum > 3) {
+ dError("invalid mnode infos, num:%d", pMnodes->nodeNum);
+ return false;
+ }
+
+ for (int32_t i = 0; i < pMnodes->nodeNum; ++i) {
+ SDMMnodeInfo *pMnodeInfo = &pMnodes->nodeInfos[i];
+ if (pMnodeInfo->nodeId <= 0 || strlen(pMnodeInfo->nodeEp) <= 5) {
+ dError("invalid mnode info:%d, nodeId:%d nodeEp:%s", i, pMnodeInfo->nodeId, pMnodeInfo->nodeEp);
+ return false;
+ }
+ }
+
+ return true;
+}
+
static void dnodeUpdateMnodeInfos(SDMMnodeInfos *pMnodes) {
bool mnodesChanged = (memcmp(&tsDMnodeInfos, pMnodes, sizeof(SDMMnodeInfos)) != 0);
bool mnodesNotInit = (tsDMnodeInfos.nodeNum == 0);
if (!(mnodesChanged || mnodesNotInit)) return;
+ if (!dnodeCheckMnodeInfos(pMnodes)) return;
+
memcpy(&tsDMnodeInfos, pMnodes, sizeof(SDMMnodeInfos));
dPrint("mnode infos is changed, nodeNum:%d inUse:%d", tsDMnodeInfos.nodeNum, tsDMnodeInfos.inUse);
for (int32_t i = 0; i < tsDMnodeInfos.nodeNum; i++) {
@@ -456,6 +480,7 @@ static bool dnodeReadMnodeInfos() {
return false;
}
+ content[len] = 0;
cJSON* root = cJSON_Parse(content);
if (root == NULL) {
dError("failed to read mnodeIpList.json, invalid json format");
@@ -547,6 +572,7 @@ static void dnodeSaveMnodeInfos() {
len += snprintf(content + len, maxLen - len, "}\n");
fwrite(content, 1, len, fp);
+ fflush(fp);
fclose(fp);
free(content);
@@ -628,6 +654,7 @@ static bool dnodeReadDnodeCfg() {
return false;
}
+ content[len] = 0;
cJSON* root = cJSON_Parse(content);
if (root == NULL) {
dError("failed to read dnodeCfg.json, invalid json format");
@@ -668,6 +695,7 @@ static void dnodeSaveDnodeCfg() {
len += snprintf(content + len, maxLen - len, "}\n");
fwrite(content, 1, len, fp);
+ fflush(fp);
fclose(fp);
free(content);
diff --git a/src/dnode/src/dnodePeer.c b/src/dnode/src/dnodePeer.c
index eb017c335e..bd5f3208ee 100644
--- a/src/dnode/src/dnodePeer.c
+++ b/src/dnode/src/dnodePeer.c
@@ -83,10 +83,13 @@ void dnodeCleanupServer() {
}
static void dnodeProcessReqMsgFromDnode(SRpcMsg *pMsg, SRpcIpSet *pIpSet) {
- SRpcMsg rspMsg;
- rspMsg.handle = pMsg->handle;
- rspMsg.pCont = NULL;
- rspMsg.contLen = 0;
+ SRpcMsg rspMsg = {
+ .handle = pMsg->handle,
+ .pCont = NULL,
+ .contLen = 0
+ };
+
+ if (pMsg->pCont == NULL) return;
if (dnodeGetRunStatus() != TSDB_DNODE_RUN_STATUS_RUNING) {
rspMsg.code = TSDB_CODE_RPC_NOT_READY;
diff --git a/src/dnode/src/dnodeShell.c b/src/dnode/src/dnodeShell.c
index 8eba1f3775..4252e63f8d 100644
--- a/src/dnode/src/dnodeShell.c
+++ b/src/dnode/src/dnodeShell.c
@@ -19,6 +19,7 @@
#include "taosdef.h"
#include "taosmsg.h"
#include "tglobal.h"
+#include "tutil.h"
#include "http.h"
#include "mnode.h"
#include "dnode.h"
@@ -108,10 +109,13 @@ void dnodeCleanupShell() {
}
void dnodeProcessMsgFromShell(SRpcMsg *pMsg, SRpcIpSet *pIpSet) {
- SRpcMsg rpcMsg;
- rpcMsg.handle = pMsg->handle;
- rpcMsg.pCont = NULL;
- rpcMsg.contLen = 0;
+ SRpcMsg rpcMsg = {
+ .handle = pMsg->handle,
+ .pCont = NULL,
+ .contLen = 0
+ };
+
+ if (pMsg->pCont == NULL) return;
if (dnodeGetRunStatus() != TSDB_DNODE_RUN_STATUS_RUNING) {
dError("RPC %p, shell msg:%s is ignored since dnode not running", pMsg->handle, taosMsg[pMsg->msgType]);
@@ -143,7 +147,7 @@ static int dnodeRetrieveUserAuthInfo(char *user, char *spi, char *encrypt, char
if (code != TSDB_CODE_RPC_NOT_READY) return code;
SDMAuthMsg *pMsg = rpcMallocCont(sizeof(SDMAuthMsg));
- strcpy(pMsg->user, user);
+ tstrncpy(pMsg->user, user, sizeof(pMsg->user));
SRpcMsg rpcMsg = {0};
rpcMsg.pCont = pMsg;
@@ -201,7 +205,7 @@ void *dnodeSendCfgTableToRecv(int32_t vgId, int32_t sid) {
int16_t numOfTags = htons(pTable->numOfTags);
int32_t sid = htonl(pTable->sid);
uint64_t uid = htobe64(pTable->uid);
- dPrint("table:%s, numOfColumns:%d numOfTags:%d sid:%d uid:%d", pTable->tableId, numOfColumns, numOfTags, sid, uid);
+ dPrint("table:%s, numOfColumns:%d numOfTags:%d sid:%d uid:%" PRIu64, pTable->tableId, numOfColumns, numOfTags, sid, uid);
return rpcRsp.pCont;
}
diff --git a/src/dnode/src/dnodeSystem.c b/src/dnode/src/dnodeSystem.c
index 0a983362c2..2fa738e480 100644
--- a/src/dnode/src/dnodeSystem.c
+++ b/src/dnode/src/dnodeSystem.c
@@ -29,11 +29,11 @@ int32_t main(int32_t argc, char *argv[]) {
for (int32_t i = 1; i < argc; ++i) {
if (strcmp(argv[i], "-c") == 0) {
if (i < argc - 1) {
- if (strlen(argv[++i]) > TSDB_FILENAME_LEN - 1) {
+ if (strlen(argv[++i]) >= TSDB_FILENAME_LEN) {
printf("config file path overflow");
exit(EXIT_FAILURE);
}
- strcpy(configDir, argv[i]);
+ tstrncpy(configDir, argv[i], TSDB_FILENAME_LEN);
} else {
printf("'-c' requires a parameter, default:%s\n", configDir);
exit(EXIT_FAILURE);
diff --git a/src/dnode/src/dnodeVRead.c b/src/dnode/src/dnodeVRead.c
index 2f9e9a0af9..cd18ae6dda 100644
--- a/src/dnode/src/dnodeVRead.c
+++ b/src/dnode/src/dnodeVRead.c
@@ -26,13 +26,6 @@
#include "dnodeVRead.h"
#include "vnode.h"
-typedef struct {
- SRspRet rspRet;
- void *pCont;
- int32_t contLen;
- SRpcMsg rpcMsg;
-} SReadMsg;
-
typedef struct {
pthread_t thread; // thread
int32_t workerId; // worker ID
@@ -218,7 +211,7 @@ static void *dnodeProcessReadQueue(void *param) {
}
dTrace("%p, msg:%s will be processed in vread queue", pReadMsg->rpcMsg.ahandle, taosMsg[pReadMsg->rpcMsg.msgType]);
- int32_t code = vnodeProcessRead(pVnode, pReadMsg->rpcMsg.msgType, pReadMsg->pCont, pReadMsg->contLen, &pReadMsg->rspRet);
+ int32_t code = vnodeProcessRead(pVnode, pReadMsg);
dnodeSendRpcReadRsp(pVnode, pReadMsg, code);
taosFreeQitem(pReadMsg);
}
diff --git a/src/inc/query.h b/src/inc/query.h
index cdadd4759f..49ee5248f5 100644
--- a/src/inc/query.h
+++ b/src/inc/query.h
@@ -70,13 +70,20 @@ int32_t qRetrieveQueryResultInfo(qinfo_t qinfo);
int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp** pRsp, int32_t* contLen);
/**
- * Decide if more results will be produced or not
+ * Decide if more results will be produced or not, NOTE: this function will increase the ref count of QInfo,
+ * so it can be only called once for each retrieve
*
* @param qinfo
* @return
*/
bool qHasMoreResultsToRetrieve(qinfo_t qinfo);
+/**
+ * kill current ongoing query and free query handle automatically
+ * @param qinfo
+ */
+int32_t qKillQuery(qinfo_t qinfo);
+
#ifdef __cplusplus
}
#endif
diff --git a/src/inc/taos.h b/src/inc/taos.h
index 947cbe6759..bf6942be8b 100644
--- a/src/inc/taos.h
+++ b/src/inc/taos.h
@@ -53,9 +53,9 @@ typedef enum {
} TSDB_OPTION;
typedef struct taosField {
- char name[64];
- short bytes;
+ char name[65];
uint8_t type;
+ short bytes;
} TAOS_FIELD;
#ifdef _TD_GO_DLL_
diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h
index c75fc70d75..7490de90d0 100644
--- a/src/inc/taosdef.h
+++ b/src/inc/taosdef.h
@@ -186,30 +186,31 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
#define TSDB_BINARY_OP_MULTIPLY 14
#define TSDB_BINARY_OP_DIVIDE 15
#define TSDB_BINARY_OP_REMAINDER 16
-#define TSDB_USERID_LEN 9
#define TS_PATH_DELIMITER_LEN 1
-#define TSDB_METER_ID_LEN_MARGIN 10
-#define TSDB_TABLE_ID_LEN (TSDB_DB_NAME_LEN+TSDB_TABLE_NAME_LEN+2*TS_PATH_DELIMITER_LEN+TSDB_USERID_LEN+TSDB_METER_ID_LEN_MARGIN) //TSDB_DB_NAME_LEN+TSDB_TABLE_NAME_LEN+2*strlen(TS_PATH_DELIMITER)+strlen(USERID)
#define TSDB_UNI_LEN 24
#define TSDB_USER_LEN TSDB_UNI_LEN
-#define TSDB_ACCT_LEN TSDB_UNI_LEN
+// ACCOUNT is a 32 bit positive integer
+// this is the length of its string representation
+// including the terminator zero
+#define TSDB_ACCT_LEN 11
#define TSDB_PASSWORD_LEN TSDB_UNI_LEN
#define TSDB_MAX_COLUMNS 1024
#define TSDB_MIN_COLUMNS 2 //PRIMARY COLUMN(timestamp) + other columns
#define TSDB_NODE_NAME_LEN 64
-#define TSDB_TABLE_NAME_LEN 192
-#define TSDB_DB_NAME_LEN 32
-#define TSDB_COL_NAME_LEN 64
+#define TSDB_TABLE_NAME_LEN 193 // it is a null-terminated string
+#define TSDB_DB_NAME_LEN 33
+#define TSDB_TABLE_ID_LEN (TSDB_ACCT_LEN + TSDB_DB_NAME_LEN + TSDB_TABLE_NAME_LEN)
+#define TSDB_COL_NAME_LEN 65
#define TSDB_MAX_SAVED_SQL_LEN TSDB_MAX_COLUMNS * 64
#define TSDB_MAX_SQL_LEN TSDB_PAYLOAD_SIZE
#define TSDB_MAX_SQL_SHOW_LEN 256
-#define TSDB_MAX_ALLOWED_SQL_LEN (8*1024*1024U) // sql length should be less than 6mb
+#define TSDB_MAX_ALLOWED_SQL_LEN (8*1024*1024U) // sql length should be less than 8mb
-#define TSDB_MAX_BYTES_PER_ROW TSDB_MAX_COLUMNS * 64
-#define TSDB_MAX_TAGS_LEN 65536
+#define TSDB_MAX_BYTES_PER_ROW 65535
+#define TSDB_MAX_TAGS_LEN 65535
#define TSDB_MAX_TAGS 128
#define TSDB_AUTH_LEN 16
diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h
index 5379b371ef..ac2af75742 100644
--- a/src/inc/taoserror.h
+++ b/src/inc/taoserror.h
@@ -71,6 +71,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_COM_OPS_NOT_SUPPORT, 0, 0x0100, "operations
TAOS_DEFINE_ERROR(TSDB_CODE_COM_MEMORY_CORRUPTED, 0, 0x0101, "memory corrupted")
TAOS_DEFINE_ERROR(TSDB_CODE_COM_OUT_OF_MEMORY, 0, 0x0102, "out of memory")
TAOS_DEFINE_ERROR(TSDB_CODE_COM_INVALID_CFG_MSG, 0, 0x0103, "invalid config message")
+TAOS_DEFINE_ERROR(TSDB_CODE_COM_FILE_CORRUPTED, 0, 0x0104, "file is corrupted")
//client
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_SQL, 0, 0x0200, "invalid sql")
diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h
index 88c6f9cf26..8e732f0cac 100644
--- a/src/inc/taosmsg.h
+++ b/src/inc/taosmsg.h
@@ -224,7 +224,7 @@ typedef struct {
typedef struct SSchema {
uint8_t type;
- char name[TSDB_COL_NAME_LEN + 1];
+ char name[TSDB_COL_NAME_LEN];
int16_t colId;
int16_t bytes;
} SSchema;
@@ -243,14 +243,14 @@ typedef struct {
uint64_t uid;
uint64_t superTableUid;
uint64_t createdTime;
- char tableId[TSDB_TABLE_ID_LEN + 1];
- char superTableId[TSDB_TABLE_ID_LEN + 1];
+ char tableId[TSDB_TABLE_ID_LEN];
+ char superTableId[TSDB_TABLE_ID_LEN];
char data[];
} SMDCreateTableMsg;
typedef struct {
- char tableId[TSDB_TABLE_ID_LEN + 1];
- char db[TSDB_DB_NAME_LEN + 1];
+ char tableId[TSDB_TABLE_ID_LEN];
+ char db[TSDB_ACCT_LEN + TSDB_DB_NAME_LEN];
int8_t igExists;
int8_t getMeta;
int16_t numOfTags;
@@ -262,13 +262,13 @@ typedef struct {
} SCMCreateTableMsg;
typedef struct {
- char tableId[TSDB_TABLE_ID_LEN + 1];
+ char tableId[TSDB_TABLE_ID_LEN];
int8_t igNotExists;
} SCMDropTableMsg;
typedef struct {
- char tableId[TSDB_TABLE_ID_LEN + 1];
- char db[TSDB_DB_NAME_LEN + 1];
+ char tableId[TSDB_TABLE_ID_LEN];
+ char db[TSDB_ACCT_LEN + TSDB_DB_NAME_LEN];
int16_t type; /* operation type */
int16_t numOfCols; /* number of schema */
int32_t tagValLen;
@@ -292,11 +292,11 @@ typedef struct {
typedef struct {
char clientVersion[TSDB_VERSION_LEN];
char msgVersion[TSDB_VERSION_LEN];
- char db[TSDB_TABLE_ID_LEN + 1];
+ char db[TSDB_TABLE_ID_LEN];
} SCMConnectMsg;
typedef struct {
- char acctId[TSDB_ACCT_LEN + 1];
+ char acctId[TSDB_ACCT_LEN];
char serverVersion[TSDB_VERSION_LEN];
int8_t writeAuth;
int8_t superAuth;
@@ -321,18 +321,18 @@ typedef struct {
} SAcctCfg;
typedef struct {
- char user[TSDB_USER_LEN + 1];
- char pass[TSDB_KEY_LEN + 1];
+ char user[TSDB_USER_LEN];
+ char pass[TSDB_KEY_LEN];
SAcctCfg cfg;
} SCMCreateAcctMsg, SCMAlterAcctMsg;
typedef struct {
- char user[TSDB_USER_LEN + 1];
+ char user[TSDB_USER_LEN];
} SCMDropUserMsg, SCMDropAcctMsg;
typedef struct {
- char user[TSDB_USER_LEN + 1];
- char pass[TSDB_KEY_LEN + 1];
+ char user[TSDB_USER_LEN];
+ char pass[TSDB_KEY_LEN];
int8_t privilege;
int8_t flag;
} SCMCreateUserMsg, SCMAlterUserMsg;
@@ -342,14 +342,14 @@ typedef struct {
int32_t vgId;
int32_t sid;
uint64_t uid;
- char tableId[TSDB_TABLE_ID_LEN + 1];
+ char tableId[TSDB_TABLE_ID_LEN];
} SMDDropTableMsg;
typedef struct {
int32_t contLen;
int32_t vgId;
uint64_t uid;
- char tableId[TSDB_TABLE_ID_LEN + 1];
+ char tableId[TSDB_TABLE_ID_LEN];
} SMDDropSTableMsg;
typedef struct {
@@ -501,8 +501,7 @@ typedef struct {
} SVnodeLoad;
typedef struct {
- char acct[TSDB_USER_LEN + 1];
- char db[TSDB_DB_NAME_LEN + 1];
+ char db[TSDB_ACCT_LEN + TSDB_DB_NAME_LEN];
int32_t cacheBlockSize; //MB
int32_t totalBlocks;
int32_t maxTables;
@@ -521,7 +520,7 @@ typedef struct {
} SCMCreateDbMsg, SCMAlterDbMsg;
typedef struct {
- char db[TSDB_TABLE_ID_LEN + 1];
+ char db[TSDB_TABLE_ID_LEN];
uint8_t ignoreNotExists;
} SCMDropDbMsg, SCMUseDbMsg;
@@ -606,13 +605,13 @@ typedef struct {
} SMDVnodeDesc;
typedef struct {
- char db[TSDB_DB_NAME_LEN + 1];
+ char db[TSDB_DB_NAME_LEN];
SMDVnodeCfg cfg;
SMDVnodeDesc nodes[TSDB_MAX_REPLICA];
} SMDCreateVnodeMsg;
typedef struct {
- char tableId[TSDB_TABLE_ID_LEN + 1];
+ char tableId[TSDB_TABLE_ID_LEN];
int16_t createFlag;
char tags[];
} SCMTableInfoMsg;
@@ -639,7 +638,7 @@ typedef struct {
typedef struct STableMetaMsg {
int32_t contLen;
- char tableId[TSDB_TABLE_ID_LEN + 1]; // table id
+ char tableId[TSDB_TABLE_ID_LEN]; // table id
uint8_t numOfTags;
uint8_t precision;
uint8_t tableType;
@@ -660,7 +659,7 @@ typedef struct SMultiTableMeta {
typedef struct {
int32_t dataLen;
- char name[TSDB_TABLE_ID_LEN + 1];
+ char name[TSDB_TABLE_ID_LEN];
char data[TSDB_MAX_TAGS_LEN];
} STagData;
@@ -671,7 +670,7 @@ typedef struct {
*/
typedef struct {
int8_t type;
- char db[TSDB_DB_NAME_LEN + 1];
+ char db[TSDB_ACCT_LEN + TSDB_DB_NAME_LEN];
uint16_t payloadLen;
char payload[];
} SCMShowMsg;
@@ -746,15 +745,15 @@ typedef struct {
uint64_t uid;
uint64_t stime; // stream starting time
int32_t status;
- char tableId[TSDB_TABLE_ID_LEN + 1];
+ char tableId[TSDB_TABLE_ID_LEN];
} SMDAlterStreamMsg;
typedef struct {
- char user[TSDB_USER_LEN + 1];
+ char user[TSDB_USER_LEN];
char spi;
char encrypt;
- char secret[TSDB_KEY_LEN + 1];
- char ckey[TSDB_KEY_LEN + 1];
+ char secret[TSDB_KEY_LEN];
+ char ckey[TSDB_KEY_LEN];
} SDMAuthMsg, SDMAuthRsp;
#pragma pack(pop)
diff --git a/src/inc/tcq.h b/src/inc/tcq.h
index 9d987da468..32b75674c3 100644
--- a/src/inc/tcq.h
+++ b/src/inc/tcq.h
@@ -27,7 +27,7 @@ typedef struct {
int vgId;
char user[TSDB_USER_LEN];
char pass[TSDB_PASSWORD_LEN];
- char db[TSDB_DB_NAME_LEN + 1];
+ char db[TSDB_DB_NAME_LEN];
FCqWrite cqWrite;
} SCqCfg;
diff --git a/src/inc/trpc.h b/src/inc/trpc.h
index 5c5c77c251..6c5d7fa889 100644
--- a/src/inc/trpc.h
+++ b/src/inc/trpc.h
@@ -78,11 +78,13 @@ void rpcClose(void *);
void *rpcMallocCont(int contLen);
void rpcFreeCont(void *pCont);
void *rpcReallocCont(void *ptr, int contLen);
-void rpcSendRequest(void *thandle, const SRpcIpSet *pIpSet, const SRpcMsg *pMsg);
+void *rpcSendRequest(void *thandle, const SRpcIpSet *pIpSet, const SRpcMsg *pMsg);
void rpcSendResponse(const SRpcMsg *pMsg);
void rpcSendRedirectRsp(void *pConn, const SRpcIpSet *pIpSet);
int rpcGetConnInfo(void *thandle, SRpcConnInfo *pInfo);
void rpcSendRecv(void *shandle, SRpcIpSet *pIpSet, const SRpcMsg *pReq, SRpcMsg *pRsp);
+int rpcReportProgress(void *pConn, char *pCont, int contLen);
+void rpcCanelRequest(void *pContext);
#ifdef __cplusplus
}
diff --git a/src/inc/tsdb.h b/src/inc/tsdb.h
index a678f213bb..add85d9438 100644
--- a/src/inc/tsdb.h
+++ b/src/inc/tsdb.h
@@ -65,6 +65,13 @@ typedef struct {
int8_t compression;
} STsdbCfg;
+// --------- TSDB REPOSITORY USAGE STATISTICS
+typedef struct {
+ int64_t totalStorage; // total bytes occupie
+ int64_t compStorage;
+ int64_t pointsWritten; // total data points written
+} STsdbStat;
+
typedef void TsdbRepoT; // use void to hide implementation details from outside
void tsdbSetDefaultCfg(STsdbCfg *pCfg);
@@ -306,6 +313,15 @@ int32_t tsdbGetOneTableGroup(TsdbRepoT *tsdb, uint64_t uid, STableGroupInfo *pGr
*/
void tsdbCleanupQueryHandle(TsdbQueryHandleT queryHandle);
+/**
+ * get the statistics of repo usage
+ * @param repo. point to the tsdbrepo
+ * @param totalPoints. total data point written
+ * @param totalStorage. total bytes took by the tsdb
+ * @param compStorage. total bytes took by the tsdb after compressed
+ */
+void tsdbReportStat(void *repo, int64_t *totalPoints, int64_t *totalStorage, int64_t *compStorage);
+
#ifdef __cplusplus
}
#endif
diff --git a/src/inc/vnode.h b/src/inc/vnode.h
index 069f99263d..0da1f51e27 100644
--- a/src/inc/vnode.h
+++ b/src/inc/vnode.h
@@ -34,6 +34,13 @@ typedef struct {
void *qhandle; //used by query and retrieve msg
} SRspRet;
+typedef struct {
+ SRspRet rspRet;
+ void *pCont;
+ int32_t contLen;
+ SRpcMsg rpcMsg;
+} SReadMsg;
+
int32_t vnodeCreate(SMDCreateVnodeMsg *pVnodeCfg);
int32_t vnodeDrop(int32_t vgId);
int32_t vnodeOpen(int32_t vgId, char *rootDir);
@@ -52,7 +59,7 @@ void* vnodeGetWal(void *pVnode);
int32_t vnodeProcessWrite(void *pVnode, int qtype, void *pHead, void *item);
void vnodeBuildStatusMsg(void * param);
-int32_t vnodeProcessRead(void *pVnode, int msgType, void *pCont, int32_t contLen, SRspRet *ret);
+int32_t vnodeProcessRead(void *pVnode, SReadMsg *pReadMsg);
#ifdef __cplusplus
}
diff --git a/src/kit/shell/src/shellDarwin.c b/src/kit/shell/src/shellDarwin.c
index 439ca6edad..1a75a2aa85 100644
--- a/src/kit/shell/src/shellDarwin.c
+++ b/src/kit/shell/src/shellDarwin.c
@@ -97,7 +97,7 @@ void shellParseArgument(int argc, char *argv[], struct arguments *arguments) {
}
} else if (strcmp(argv[i], "-c") == 0) {
if (i < argc - 1) {
- if (strlen(argv[++i]) > TSDB_FILENAME_LEN - 1) {
+ if (strlen(argv[++i]) >= TSDB_FILENAME_LEN) {
fprintf(stderr, "config file path: %s overflow max len %d\n", argv[i], TSDB_FILENAME_LEN - 1);
exit(EXIT_FAILURE);
}
diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c
index f04607f6fa..7705db4b27 100644
--- a/src/kit/shell/src/shellEngine.c
+++ b/src/kit/shell/src/shellEngine.c
@@ -469,7 +469,6 @@ static int dumpResultToFile(const char* fname, TAOS_RES* result) {
} while( row != NULL);
fclose(fp);
- taos_free_result(result);
return numOfRows;
}
diff --git a/src/kit/shell/src/shellLinux.c b/src/kit/shell/src/shellLinux.c
index 7d035126c0..829ceb9e5f 100644
--- a/src/kit/shell/src/shellLinux.c
+++ b/src/kit/shell/src/shellLinux.c
@@ -81,7 +81,7 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
fprintf(stderr, "Invalid path %s\n", arg);
return -1;
}
- if (strlen(full_path.we_wordv[0]) > TSDB_FILENAME_LEN - 1) {
+ if (strlen(full_path.we_wordv[0]) >= TSDB_FILENAME_LEN) {
fprintf(stderr, "config file path: %s overflow max len %d\n", full_path.we_wordv[0], TSDB_FILENAME_LEN - 1);
wordfree(&full_path);
return -1;
diff --git a/src/kit/shell/src/shellWindows.c b/src/kit/shell/src/shellWindows.c
index cf96bce5a8..8a7996d682 100644
--- a/src/kit/shell/src/shellWindows.c
+++ b/src/kit/shell/src/shellWindows.c
@@ -77,7 +77,7 @@ void shellParseArgument(int argc, char *argv[], struct arguments *arguments) {
}
} else if (strcmp(argv[i], "-c") == 0) {
if (i < argc - 1) {
- if (strlen(argv[++i]) > TSDB_FILENAME_LEN - 1) {
+ if (strlen(argv[++i]) >= TSDB_FILENAME_LEN) {
fprintf(stderr, "config file path: %s overflow max len %d\n", argv[i], TSDB_FILENAME_LEN - 1);
exit(EXIT_FAILURE);
}
diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c
index ca0af96145..ab5781e74f 100644
--- a/src/kit/taosdemo/taosdemo.c
+++ b/src/kit/taosdemo/taosdemo.c
@@ -297,7 +297,7 @@ void *deleteTable();
void *asyncWrite(void *sarg);
-void generateData(char *res, char **data_type, int num_of_cols, int64_t timestamp, int len_of_binary);
+int generateData(char *res, char **data_type, int num_of_cols, int64_t timestamp, int len_of_binary);
void rand_string(char *str, int size);
@@ -710,7 +710,7 @@ void *readTable(void *sarg) {
int32_t code = taos_errno(pSql);
if (code != 0) {
- fprintf(stderr, "Failed to query:%s\n", taos_errstr(taos));
+ fprintf(stderr, "Failed to query:%s\n", taos_errstr(pSql));
taos_free_result(pSql);
taos_close(taos);
exit(EXIT_FAILURE);
@@ -756,7 +756,7 @@ void *readMetric(void *sarg) {
for (int j = 0; j < n; j++) {
char condition[BUFFER_SIZE - 30] = "\0";
- char tempS[BUFFER_SIZE] = "\0";
+ char tempS[64] = "\0";
int m = 10 < num_of_tables ? 10 : num_of_tables;
@@ -779,7 +779,7 @@ void *readMetric(void *sarg) {
int32_t code = taos_errno(pSql);
if (code != 0) {
- fprintf(stderr, "Failed to query:%s\n", taos_errstr(taos));
+ fprintf(stderr, "Failed to query:%s\n", taos_errstr(pSql));
taos_free_result(pSql);
taos_close(taos);
exit(1);
@@ -817,8 +817,10 @@ void queryDB(TAOS *taos, char *command) {
i--;
}
- if (i == 0) {
- fprintf(stderr, "Failed to run %s, reason: %s\n", command, taos_errstr(taos));
+ if (code != 0) {
+ fprintf(stderr, "Failed to run %s, reason: %s\n", command, taos_errstr(pSql));
+ taos_free_result(pSql);
+
taos_close(taos);
exit(EXIT_FAILURE);
}
@@ -844,14 +846,19 @@ void *syncWrite(void *sarg) {
int k;
for (k = 0; k < winfo->nrecords_per_request;) {
int rand_num = rand() % 100;
- if (winfo->data_of_order ==1 && rand_num < winfo->data_of_rate)
- {
+ int len = -1;
+ if (winfo->data_of_order ==1 && rand_num < winfo->data_of_rate) {
long d = tmp_time - rand() % 1000000 + rand_num;
- generateData(data, data_type, ncols_per_record, d, len_of_binary);
- } else
- {
- generateData(data, data_type, ncols_per_record, tmp_time += 1000, len_of_binary);
+ len = generateData(data, data_type, ncols_per_record, d, len_of_binary);
+ } else {
+ len = generateData(data, data_type, ncols_per_record, tmp_time += 1000, len_of_binary);
}
+
+ //assert(len + pstr - buffer < BUFFER_SIZE);
+ if (len + pstr - buffer >= BUFFER_SIZE) { // too long
+ break;
+ }
+
pstr += sprintf(pstr, " %s", data);
inserted++;
k++;
@@ -914,7 +921,7 @@ void callBack(void *param, TAOS_RES *res, int code) {
int64_t tmp_time = tb_info->timestamp;
if (code < 0) {
- fprintf(stderr, "failed to insert data %d:reason; %s\n", code, taos_errstr(tb_info->taos));
+ fprintf(stderr, "failed to insert data %d:reason; %s\n", code, taos_errstr(res));
exit(EXIT_FAILURE);
}
@@ -966,7 +973,7 @@ double getCurrentTime() {
return tv.tv_sec + tv.tv_usec / 1E6;
}
-void generateData(char *res, char **data_type, int num_of_cols, int64_t timestamp, int len_of_binary) {
+int32_t generateData(char *res, char **data_type, int num_of_cols, int64_t timestamp, int len_of_binary) {
memset(res, 0, MAX_DATA_SIZE);
char *pstr = res;
pstr += sprintf(pstr, "(%" PRId64, timestamp);
@@ -1000,9 +1007,16 @@ void generateData(char *res, char **data_type, int num_of_cols, int64_t timestam
rand_string(s, len_of_binary);
pstr += sprintf(pstr, ", \"%s\"", s);
}
+
+ if (pstr - res > MAX_DATA_SIZE) {
+ perror("column length too long, abort");
+ exit(-1);
+ }
}
pstr += sprintf(pstr, ")");
+
+ return pstr - res;
}
static const char charset[] = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJK1234567890";
diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c
index d96f80ed20..ee792c5116 100644
--- a/src/kit/taosdump/taosdump.c
+++ b/src/kit/taosdump/taosdump.c
@@ -88,21 +88,21 @@ enum _describe_table_index {
};
typedef struct {
- char field[TSDB_COL_NAME_LEN + 1];
+ char field[TSDB_COL_NAME_LEN];
char type[16];
int length;
char note[128];
} SColDes;
typedef struct {
- char name[TSDB_COL_NAME_LEN + 1];
+ char name[TSDB_COL_NAME_LEN];
SColDes cols[];
} STableDef;
extern char version[];
typedef struct {
- char name[TSDB_DB_NAME_LEN + 1];
+ char name[TSDB_DB_NAME_LEN];
int32_t replica;
int32_t days;
int32_t keep;
@@ -177,8 +177,8 @@ typedef struct SDumpArguments {
char *password;
uint16_t port;
// output file
- char output[TSDB_FILENAME_LEN + 1];
- char input[TSDB_FILENAME_LEN + 1];
+ char output[TSDB_FILENAME_LEN];
+ char input[TSDB_FILENAME_LEN];
char *encode;
// dump unit option
bool all_databases;
@@ -643,6 +643,8 @@ int taosDumpDb(SDbInfo *dbInfo, SDumpArguments *arguments, FILE *fp) {
lseek(fd, 0, SEEK_SET);
while (read(fd, &tableRecord, sizeof(STableRecord)) > 0) {
+ tableRecord.name[sizeof(tableRecord.name) - 1] = 0;
+ tableRecord.metric[sizeof(tableRecord.metric) - 1] = 0;
taosDumpTable(tableRecord.name, tableRecord.metric, arguments, fp);
}
@@ -902,6 +904,8 @@ int32_t taosDumpMetric(char *metric, SDumpArguments *arguments, FILE *fp) {
lseek(fd, 0, SEEK_SET);
while (read(fd, &tableRecord, sizeof(STableRecord)) > 0) {
+ tableRecord.name[sizeof(tableRecord.name) - 1] = 0;
+ tableRecord.metric[sizeof(tableRecord.metric) - 1] = 0;
taosDumpTable(tableRecord.name, tableRecord.metric, arguments, fp);
}
diff --git a/src/mnode/inc/mnodeDef.h b/src/mnode/inc/mnodeDef.h
index cf9058b9cf..2baf28f88f 100644
--- a/src/mnode/inc/mnodeDef.h
+++ b/src/mnode/inc/mnodeDef.h
@@ -32,8 +32,8 @@ struct SMnodeObj;
typedef struct SDnodeObj {
int32_t dnodeId;
uint16_t dnodePort;
- char dnodeFqdn[TSDB_FQDN_LEN + 1];
- char dnodeEp[TSDB_EP_LEN + 1];
+ char dnodeFqdn[TSDB_FQDN_LEN];
+ char dnodeEp[TSDB_EP_LEN];
int64_t createdTime;
uint32_t lastAccess;
int32_t openVnodes;
@@ -115,7 +115,7 @@ typedef struct {
typedef struct SVgObj {
uint32_t vgId;
- char dbName[TSDB_DB_NAME_LEN + 1];
+ char dbName[TSDB_DB_NAME_LEN];
int64_t createdTime;
SVnodeGid vnodeGid[TSDB_MAX_REPLICA];
int32_t numOfVnodes;
@@ -154,8 +154,8 @@ typedef struct {
} SDbCfg;
typedef struct SDbObj {
- char name[TSDB_DB_NAME_LEN + 1];
- char acct[TSDB_USER_LEN + 1];
+ char name[TSDB_ACCT_LEN + TSDB_DB_NAME_LEN];
+ char acct[TSDB_USER_LEN];
int64_t createdTime;
int32_t cfgVersion;
SDbCfg cfg;
@@ -172,9 +172,9 @@ typedef struct SDbObj {
} SDbObj;
typedef struct SUserObj {
- char user[TSDB_USER_LEN + 1];
- char pass[TSDB_KEY_LEN + 1];
- char acct[TSDB_USER_LEN + 1];
+ char user[TSDB_USER_LEN];
+ char pass[TSDB_KEY_LEN];
+ char acct[TSDB_USER_LEN];
int64_t createdTime;
int8_t superAuth;
int8_t writeAuth;
@@ -203,8 +203,8 @@ typedef struct {
} SAcctInfo;
typedef struct SAcctObj {
- char user[TSDB_USER_LEN + 1];
- char pass[TSDB_KEY_LEN + 1];
+ char user[TSDB_USER_LEN];
+ char pass[TSDB_KEY_LEN];
SAcctCfg cfg;
int32_t acctId;
int64_t createdTime;
@@ -219,7 +219,7 @@ typedef struct SAcctObj {
typedef struct {
int8_t type;
int32_t index;
- char db[TSDB_DB_NAME_LEN + 1];
+ char db[TSDB_DB_NAME_LEN];
void * pIter;
int16_t numOfColumns;
int32_t rowSize;
diff --git a/src/mnode/inc/mnodeProfile.h b/src/mnode/inc/mnodeProfile.h
index 30745db035..c9f7cc8e2a 100644
--- a/src/mnode/inc/mnodeProfile.h
+++ b/src/mnode/inc/mnodeProfile.h
@@ -22,7 +22,7 @@ extern "C" {
#include "mnodeDef.h"
typedef struct {
- char user[TSDB_USER_LEN + 1];
+ char user[TSDB_USER_LEN];
int8_t killed;
uint16_t port;
uint32_t ip;
diff --git a/src/mnode/src/mnodeDb.c b/src/mnode/src/mnodeDb.c
index 90569b4a95..69783defc7 100644
--- a/src/mnode/src/mnodeDb.c
+++ b/src/mnode/src/mnodeDb.c
@@ -84,9 +84,12 @@ static int32_t mnodeDbActionDelete(SSdbOper *pOper) {
mnodeDropAllChildTables(pDb);
mnodeDropAllSuperTables(pDb);
mnodeDropAllDbVgroups(pDb);
- mnodeDropDbFromAcct(pAcct, pDb);
- mnodeDecAcctRef(pAcct);
-
+
+ if (pAcct) {
+ mnodeDropDbFromAcct(pAcct, pDb);
+ mnodeDecAcctRef(pAcct);
+ }
+
return TSDB_CODE_SUCCESS;
}
@@ -328,8 +331,8 @@ static int32_t mnodeCreateDb(SAcctObj *pAcct, SCMCreateDbMsg *pCreate) {
if (code != 0) return code;
pDb = calloc(1, sizeof(SDbObj));
- strncpy(pDb->name, pCreate->db, TSDB_DB_NAME_LEN);
- strncpy(pDb->acct, pAcct->user, TSDB_USER_LEN);
+ tstrncpy(pDb->name, pCreate->db, sizeof(pDb->name));
+ tstrncpy(pDb->acct, pAcct->user, sizeof(pDb->acct));
pDb->createdTime = taosGetTimestampMs();
pDb->cfg = (SDbCfg) {
.cacheBlockSize = pCreate->cacheBlockSize,
@@ -373,7 +376,7 @@ static int32_t mnodeCreateDb(SAcctObj *pAcct, SCMCreateDbMsg *pCreate) {
}
bool mnodeCheckIsMonitorDB(char *db, char *monitordb) {
- char dbName[TSDB_DB_NAME_LEN + 1] = {0};
+ char dbName[TSDB_DB_NAME_LEN] = {0};
extractDBName(db, dbName);
size_t len = strlen(dbName);
@@ -453,7 +456,7 @@ static int32_t mnodeGetDbMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn
SUserObj *pUser = mnodeGetUserFromConn(pConn);
if (pUser == NULL) return 0;
- pShow->bytes[cols] = TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE;
+ pShow->bytes[cols] = (TSDB_DB_NAME_LEN - 1) + VARSTR_HEADER_SIZE;
pSchema[cols].type = TSDB_DATA_TYPE_BINARY;
strcpy(pSchema[cols].name, "name");
pSchema[cols].bytes = htons(pShow->bytes[cols]);
@@ -610,7 +613,7 @@ static int32_t mnodeRetrieveDbs(SShowObj *pShow, char *data, int32_t rows, void
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
char* name = mnodeGetDbStr(pDb->name);
- STR_WITH_MAXSIZE_TO_VARSTR(pWrite, name, TSDB_DB_NAME_LEN);
+ STR_WITH_MAXSIZE_TO_VARSTR(pWrite, name, TSDB_DB_NAME_LEN - 1);
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
@@ -882,7 +885,7 @@ static SDbCfg mnodeGetAlterDbOption(SDbObj *pDb, SCMAlterDbMsg *pAlter) {
}
if (pDb->cfg.replications - replications >= 2) {
- mError("db:%s, replica number can't change from 3 to 1", pDb->name, replications);
+ mError("db:%s, replica number can't change from %d to %d", pDb->name, pDb->cfg.replications, replications);
terrno = TSDB_CODE_MND_INVALID_DB_OPTION;
}
}
diff --git a/src/mnode/src/mnodeDnode.c b/src/mnode/src/mnodeDnode.c
index 6fd565b5e6..d2af86badb 100644
--- a/src/mnode/src/mnodeDnode.c
+++ b/src/mnode/src/mnodeDnode.c
@@ -90,7 +90,7 @@ static int32_t mnodeDnodeActionDelete(SSdbOper *pOper) {
static int32_t mnodeDnodeActionUpdate(SSdbOper *pOper) {
SDnodeObj *pDnode = pOper->pObj;
SDnodeObj *pSaved = mnodeGetDnode(pDnode->dnodeId);
- if (pDnode != pSaved) {
+ if (pDnode != pSaved && pDnode != NULL && pSaved != NULL) {
memcpy(pSaved, pDnode, pOper->rowSize);
free(pDnode);
}
@@ -237,7 +237,9 @@ void mnodeUpdateDnode(SDnodeObj *pDnode) {
.pObj = pDnode
};
- sdbUpdateRow(&oper);
+ if (sdbUpdateRow(&oper) != 0) {
+ mError("dnodeId:%d, failed update", pDnode->dnodeId);
+ }
}
static int32_t mnodeProcessCfgDnodeMsg(SMnodeMsg *pMsg) {
@@ -383,7 +385,7 @@ static int32_t mnodeCreateDnode(char *ep) {
pDnode->createdTime = taosGetTimestampMs();
pDnode->status = TAOS_DN_STATUS_OFFLINE;
pDnode->totalVnodes = TSDB_INVALID_VNODE_NUM;
- strcpy(pDnode->dnodeEp, ep);
+ tstrncpy(pDnode->dnodeEp, ep, TSDB_EP_LEN);
taosGetFqdnPortFromEp(ep, pDnode->dnodeFqdn, &pDnode->dnodePort);
SSdbOper oper = {
diff --git a/src/mnode/src/mnodeMain.c b/src/mnode/src/mnodeMain.c
index 57bb1b2bac..96dc700783 100644
--- a/src/mnode/src/mnodeMain.c
+++ b/src/mnode/src/mnodeMain.c
@@ -134,8 +134,12 @@ void mnodeStopSystem() {
}
mnodeCleanupSystem();
- mPrint("mnode file is removed");
- remove(tsMnodeDir);
+
+ if (remove(tsMnodeDir) != 0) {
+ mPrint("failed to remove mnode file, reason:%s", strerror(errno));
+ } else {
+ mPrint("mnode file is removed");
+ }
}
static void mnodeInitTimer() {
diff --git a/src/mnode/src/mnodeProfile.c b/src/mnode/src/mnodeProfile.c
index ed0e717c43..4720fb0ddc 100644
--- a/src/mnode/src/mnodeProfile.c
+++ b/src/mnode/src/mnodeProfile.c
@@ -67,7 +67,7 @@ int32_t mnodeInitProfile() {
mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_KILL_STREAM, mnodeProcessKillStreamMsg);
mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_KILL_CONN, mnodeProcessKillConnectionMsg);
- tsMnodeConnCache = taosCacheInitWithCb(tsMnodeTmr, CONN_CHECK_TIME, mnodeFreeConn);
+ tsMnodeConnCache = taosCacheInitWithCb(CONN_CHECK_TIME, mnodeFreeConn);
return 0;
}
@@ -97,7 +97,7 @@ SConnObj *mnodeCreateConn(char *user, uint32_t ip, uint16_t port) {
.connId = connId,
.stime = taosGetTimestampMs()
};
- strcpy(connObj.user, user);
+ tstrncpy(connObj.user, user, sizeof(connObj.user));
char key[10];
sprintf(key, "%u", connId);
@@ -222,7 +222,7 @@ static int32_t mnodeRetrieveConns(SShowObj *pShow, char *data, int32_t rows, voi
SConnObj *pConnObj = NULL;
int32_t cols = 0;
char * pWrite;
- char ipStr[TSDB_IPv4ADDR_LEN + 7];
+ char ipStr[TSDB_IPv4ADDR_LEN + 6];
while (numOfRows < rows) {
pShow->pIter = mnodeGetNextConn(pShow->pIter, &pConnObj);
@@ -235,12 +235,14 @@ static int32_t mnodeRetrieveConns(SShowObj *pShow, char *data, int32_t rows, voi
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
- STR_WITH_MAXSIZE_TO_VARSTR(pWrite, pConnObj->user, TSDB_USER_LEN);
+ size_t size = sizeof(pConnObj->user);
+ STR_WITH_MAXSIZE_TO_VARSTR(pWrite, pConnObj->user, size);
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
- snprintf(ipStr, TSDB_IPv4ADDR_LEN + 6, "%s:%u", taosIpStr(pConnObj->ip), pConnObj->port);
- STR_WITH_MAXSIZE_TO_VARSTR(pWrite, ipStr, TSDB_IPv4ADDR_LEN + 6);
+ snprintf(ipStr, sizeof(ipStr), "%s:%u", taosIpStr(pConnObj->ip), pConnObj->port);
+ size = sizeof(ipStr);
+ STR_WITH_MAXSIZE_TO_VARSTR(pWrite, ipStr, size);
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
@@ -342,7 +344,7 @@ static int32_t mnodeRetrieveQueries(SShowObj *pShow, char *data, int32_t rows, v
SConnObj *pConnObj = NULL;
int32_t cols = 0;
char * pWrite;
- char ipStr[TSDB_IPv4ADDR_LEN + 7];
+ char ipStr[TSDB_IPv4ADDR_LEN + 6];
while (numOfRows < rows) {
pShow->pIter = mnodeGetNextConn(pShow->pIter, &pConnObj);
@@ -358,12 +360,14 @@ static int32_t mnodeRetrieveQueries(SShowObj *pShow, char *data, int32_t rows, v
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
- STR_WITH_MAXSIZE_TO_VARSTR(pWrite, pConnObj->user, TSDB_USER_LEN);
+ size_t size = sizeof(pConnObj->user);
+ STR_WITH_MAXSIZE_TO_VARSTR(pWrite, pConnObj->user, size);
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
- snprintf(ipStr, TSDB_IPv4ADDR_LEN + 6, "%s:%u", taosIpStr(pConnObj->ip), pConnObj->port);
- STR_WITH_MAXSIZE_TO_VARSTR(pWrite, ipStr, TSDB_IPv4ADDR_LEN + 6);
+ snprintf(ipStr, sizeof(ipStr), "%s:%u", taosIpStr(pConnObj->ip), pConnObj->port);
+ size = sizeof(ipStr);
+ STR_WITH_MAXSIZE_TO_VARSTR(pWrite, ipStr, size);
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
@@ -463,7 +467,7 @@ static int32_t mnodeRetrieveStreams(SShowObj *pShow, char *data, int32_t rows, v
SConnObj *pConnObj = NULL;
int32_t cols = 0;
char * pWrite;
- char ipStr[TSDB_IPv4ADDR_LEN + 7];
+ char ipStr[TSDB_IPv4ADDR_LEN + 6];
while (numOfRows < rows) {
pShow->pIter = mnodeGetNextConn(pShow->pIter, &pConnObj);
@@ -479,12 +483,14 @@ static int32_t mnodeRetrieveStreams(SShowObj *pShow, char *data, int32_t rows, v
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
- STR_WITH_MAXSIZE_TO_VARSTR(pWrite, pConnObj->user, TSDB_USER_LEN);
+ size_t size = sizeof(pConnObj->user);
+ STR_WITH_MAXSIZE_TO_VARSTR(pWrite, pConnObj->user, size);
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
- snprintf(ipStr, TSDB_IPv4ADDR_LEN + 6, "%s:%u", taosIpStr(pConnObj->ip), pConnObj->port);
- STR_WITH_MAXSIZE_TO_VARSTR(pWrite, ipStr, TSDB_IPv4ADDR_LEN + 6);
+ snprintf(ipStr, sizeof(ipStr), "%s:%u", taosIpStr(pConnObj->ip), pConnObj->port);
+ size = sizeof(ipStr);
+ STR_WITH_MAXSIZE_TO_VARSTR(pWrite, ipStr, size);
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
diff --git a/src/mnode/src/mnodeSdb.c b/src/mnode/src/mnodeSdb.c
index b9033cdf3c..659ac159a8 100644
--- a/src/mnode/src/mnodeSdb.c
+++ b/src/mnode/src/mnodeSdb.c
@@ -30,6 +30,8 @@
#include "mnodeDnode.h"
#include "mnodeSdb.h"
+#define SDB_TABLE_LEN 12
+
typedef enum {
SDB_ACTION_INSERT,
SDB_ACTION_DELETE,
@@ -43,7 +45,7 @@ typedef enum {
} ESdbStatus;
typedef struct _SSdbTable {
- char tableName[TSDB_DB_NAME_LEN + 1];
+ char tableName[SDB_TABLE_LEN];
ESdbTable tableId;
ESdbKey keyType;
int32_t hashSessions;
@@ -174,10 +176,10 @@ static void sdbRestoreTables() {
totalRows += pTable->numOfRows;
numOfTables++;
- sdbTrace("table:%s, is restored, numOfRows:%d", pTable->tableName, pTable->numOfRows);
+ sdbTrace("table:%s, is restored, numOfRows:%" PRId64, pTable->tableName, pTable->numOfRows);
}
- sdbTrace("sdb is restored, version:%d totalRows:%d numOfTables:%d", tsSdbObj.version, totalRows, numOfTables);
+ sdbTrace("sdb is restored, version:%" PRId64 " totalRows:%d numOfTables:%d", tsSdbObj.version, totalRows, numOfTables);
}
void sdbUpdateMnodeRoles() {
@@ -449,7 +451,7 @@ static int32_t sdbInsertHash(SSdbTable *pTable, SSdbOper *pOper) {
pthread_mutex_unlock(&pTable->mutex);
- sdbTrace("table:%s, insert record:%s to hash, rowSize:%d vnumOfRows:%d version:%" PRIu64, pTable->tableName,
+ sdbTrace("table:%s, insert record:%s to hash, rowSize:%d numOfRows:%" PRId64 " version:%" PRIu64, pTable->tableName,
sdbGetKeyStrFromObj(pTable, pOper->pObj), pOper->rowSize, pTable->numOfRows, sdbGetVersion());
(*pTable->insertFp)(pOper);
@@ -473,7 +475,7 @@ static int32_t sdbDeleteHash(SSdbTable *pTable, SSdbOper *pOper) {
pTable->numOfRows--;
pthread_mutex_unlock(&pTable->mutex);
- sdbTrace("table:%s, delete record:%s from hash, numOfRows:%d version:%" PRIu64, pTable->tableName,
+ sdbTrace("table:%s, delete record:%s from hash, numOfRows:%" PRId64 "version:%" PRIu64, pTable->tableName,
sdbGetKeyStrFromObj(pTable, pOper->pObj), pTable->numOfRows, sdbGetVersion());
int8_t *updateEnd = pOper->pObj + pTable->refCountPos - 1;
@@ -484,7 +486,7 @@ static int32_t sdbDeleteHash(SSdbTable *pTable, SSdbOper *pOper) {
}
static int32_t sdbUpdateHash(SSdbTable *pTable, SSdbOper *pOper) {
- sdbTrace("table:%s, update record:%s in hash, numOfRows:%d version:%" PRIu64, pTable->tableName,
+ sdbTrace("table:%s, update record:%s in hash, numOfRows:%" PRId64 " version:%" PRIu64, pTable->tableName,
sdbGetKeyStrFromObj(pTable, pOper->pObj), pTable->numOfRows, sdbGetVersion());
(*pTable->updateFp)(pOper);
@@ -717,7 +719,7 @@ void *sdbOpenTable(SSdbTableDesc *pDesc) {
if (pTable == NULL) return NULL;
- strcpy(pTable->tableName, pDesc->tableName);
+ tstrncpy(pTable->tableName, pDesc->tableName, SDB_TABLE_LEN);
pTable->keyType = pDesc->keyType;
pTable->tableId = pDesc->tableId;
pTable->hashSessions = pDesc->hashSessions;
diff --git a/src/mnode/src/mnodeShow.c b/src/mnode/src/mnodeShow.c
index 30f491ec03..0ff4da42c5 100644
--- a/src/mnode/src/mnodeShow.c
+++ b/src/mnode/src/mnodeShow.c
@@ -65,7 +65,7 @@ int32_t mnodeInitShow() {
mnodeAddReadMsgHandle(TSDB_MSG_TYPE_CM_CONNECT, mnodeProcessConnectMsg);
mnodeAddReadMsgHandle(TSDB_MSG_TYPE_CM_USE_DB, mnodeProcessUseMsg);
- tsMnodeShowCache = taosCacheInitWithCb(tsMnodeTmr, 10, mnodeFreeShowObj);
+ tsMnodeShowCache = taosCacheInitWithCb(10, mnodeFreeShowObj);
return 0;
}
@@ -138,15 +138,16 @@ static int32_t mnodeProcessShowMsg(SMnodeMsg *pMsg) {
}
pShowRsp->qhandle = htobe64((uint64_t) pShow);
- mTrace("%p, show type:%s, start to get meta", pShow, mnodeGetShowType(pShowMsg->type));
int32_t code = (*tsMnodeShowMetaFp[pShowMsg->type])(&pShowRsp->tableMeta, pShow, pMsg->rpcMsg.handle);
- if (code == 0) {
+ mTrace("%p, show type:%s index:%d, get meta finished, rows:%d cols:%d result:%s", pShow,
+ mnodeGetShowType(pShowMsg->type), pShow->index, pShow->numOfRows, pShow->numOfColumns, tstrerror(code));
+
+ if (code == TSDB_CODE_SUCCESS) {
pMsg->rpcRsp.rsp = pShowRsp;
pMsg->rpcRsp.len = sizeof(SCMShowRsp) + sizeof(SSchema) * pShow->numOfColumns;
mnodeReleaseShowObj(pShow, false);
return TSDB_CODE_SUCCESS;
} else {
- mError("%p, show type:%s, failed to get meta, reason:%s", pShow, mnodeGetShowType(pShowMsg->type), tstrerror(code));
rpcFreeCont(pShowRsp);
mnodeReleaseShowObj(pShow, true);
return code;
@@ -161,8 +162,7 @@ static int32_t mnodeProcessRetrieveMsg(SMnodeMsg *pMsg) {
pRetrieve->qhandle = htobe64(pRetrieve->qhandle);
SShowObj *pShow = (SShowObj *)pRetrieve->qhandle;
- mTrace("%p, show type:%s, retrieve data", pShow, mnodeGetShowType(pShow->type));
-
+
/*
* in case of server restart, apps may hold qhandle created by server before
* restart, which is actually invalid, therefore, signature check is required.
@@ -171,7 +171,10 @@ static int32_t mnodeProcessRetrieveMsg(SMnodeMsg *pMsg) {
mError("%p, show is invalid", pShow);
return TSDB_CODE_MND_INVALID_SHOWOBJ;
}
-
+
+ mTrace("%p, show type:%s index:%d, start retrieve data, numOfReads:%d numOfRows:%d", pShow,
+ mnodeGetShowType(pShow->type), pShow->index, pShow->numOfReads, pShow->numOfRows);
+
if (mnodeCheckShowFinished(pShow)) {
mTrace("%p, show is already read finished, numOfReads:%d numOfRows:%d", pShow, pShow->numOfReads, pShow->numOfRows);
pShow->numOfReads = pShow->numOfRows;
@@ -198,6 +201,9 @@ static int32_t mnodeProcessRetrieveMsg(SMnodeMsg *pMsg) {
if ((pRetrieve->free & TSDB_QUERY_TYPE_FREE_RESOURCE) != TSDB_QUERY_TYPE_FREE_RESOURCE)
rowsRead = (*tsMnodeShowRetrieveFp[pShow->type])(pShow, pRsp->data, rowsToRead, pMsg->rpcMsg.handle);
+ mTrace("%p, show type:%s index:%d, stop retrieve data, rowsRead:%d rowsToRead:%d", pShow,
+ mnodeGetShowType(pShow->type), pShow->index, rowsRead, rowsToRead);
+
if (rowsRead < 0) {
rpcFreeCont(pRsp);
mnodeReleaseShowObj(pShow, false);
@@ -211,7 +217,7 @@ static int32_t mnodeProcessRetrieveMsg(SMnodeMsg *pMsg) {
pMsg->rpcRsp.rsp = pRsp;
pMsg->rpcRsp.len = size;
- if (rowsToRead == 0 || (rowsRead == rowsToRead && pShow->numOfRows - pShow->numOfReads == rowsToRead)) {
+ if (rowsToRead == 0 || (rowsRead == rowsToRead && pShow->numOfRows == pShow->numOfReads)) {
pRsp->completed = 1;
mnodeReleaseShowObj(pShow, true);
} else {
@@ -324,6 +330,7 @@ static int32_t mnodeProcessConnectMsg(SMnodeMsg *pMsg) {
connect_over:
if (code != TSDB_CODE_SUCCESS) {
+ rpcFreeCont(pConnectRsp);
mLError("user:%s login from %s, result:%s", connInfo.user, taosIpStr(connInfo.clientIp), tstrerror(code));
} else {
mLPrint("user:%s login from %s, result:%s", connInfo.user, taosIpStr(connInfo.clientIp), tstrerror(code));
diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c
index 119ba1627d..0ff198bf8f 100644
--- a/src/mnode/src/mnodeTable.c
+++ b/src/mnode/src/mnodeTable.c
@@ -157,10 +157,12 @@ static int32_t mnodeChildTableActionDelete(SSdbOper *pOper) {
if (pDb != NULL) pAcct = mnodeGetAcct(pDb->acct);
if (pTable->info.type == TSDB_CHILD_TABLE) {
- grantRestore(TSDB_GRANT_TIMESERIES, pTable->superTable->numOfColumns - 1);
- if (pAcct != NULL) pAcct->acctInfo.numOfTimeSeries -= (pTable->superTable->numOfColumns - 1);
- mnodeRemoveTableFromStable(pTable->superTable, pTable);
- mnodeDecTableRef(pTable->superTable);
+ if (pTable->superTable) {
+ grantRestore(TSDB_GRANT_TIMESERIES, pTable->superTable->numOfColumns - 1);
+ if (pAcct != NULL) pAcct->acctInfo.numOfTimeSeries -= (pTable->superTable->numOfColumns - 1);
+ mnodeRemoveTableFromStable(pTable->superTable, pTable);
+ mnodeDecTableRef(pTable->superTable);
+ }
} else {
grantRestore(TSDB_GRANT_TIMESERIES, pTable->numOfColumns - 1);
if (pAcct != NULL) pAcct->acctInfo.numOfTimeSeries -= (pTable->numOfColumns - 1);
@@ -201,7 +203,7 @@ static int32_t mnodeChildTableActionEncode(SSdbOper *pOper) {
assert(pTable != NULL && pOper->rowData != NULL);
int32_t len = strlen(pTable->info.tableId);
- if (len > TSDB_TABLE_ID_LEN) return TSDB_CODE_MND_INVALID_TABLE_ID;
+ if (len >= TSDB_TABLE_ID_LEN) return TSDB_CODE_MND_INVALID_TABLE_ID;
memcpy(pOper->rowData, pTable->info.tableId, len);
memset(pOper->rowData + len, 0, 1);
@@ -232,7 +234,7 @@ static int32_t mnodeChildTableActionDecode(SSdbOper *pOper) {
if (pTable == NULL) return TSDB_CODE_MND_OUT_OF_MEMORY;
int32_t len = strlen(pOper->rowData);
- if (len > TSDB_TABLE_ID_LEN) {
+ if (len >= TSDB_TABLE_ID_LEN) {
free(pTable);
return TSDB_CODE_MND_INVALID_TABLE_ID;
}
@@ -453,7 +455,7 @@ static int32_t mnodeSuperTableActionEncode(SSdbOper *pOper) {
assert(pOper->pObj != NULL && pOper->rowData != NULL);
int32_t len = strlen(pStable->info.tableId);
- if (len > TSDB_TABLE_ID_LEN) len = TSDB_CODE_MND_INVALID_TABLE_ID;
+ if (len >= TSDB_TABLE_ID_LEN) len = TSDB_CODE_MND_INVALID_TABLE_ID;
memcpy(pOper->rowData, pStable->info.tableId, len);
memset(pOper->rowData + len, 0, 1);
@@ -477,7 +479,7 @@ static int32_t mnodeSuperTableActionDecode(SSdbOper *pOper) {
if (pStable == NULL) return TSDB_CODE_MND_OUT_OF_MEMORY;
int32_t len = strlen(pOper->rowData);
- if (len > TSDB_TABLE_ID_LEN){
+ if (len >= TSDB_TABLE_ID_LEN){
free(pStable);
return TSDB_CODE_MND_INVALID_TABLE_ID;
}
@@ -951,7 +953,7 @@ static int32_t mnodeModifySuperTableTagName(SSuperTableObj *pStable, char *oldTa
// update
SSchema *schema = (SSchema *) (pStable->schema + pStable->numOfColumns + col);
- strncpy(schema->name, newTagName, TSDB_COL_NAME_LEN);
+ tstrncpy(schema->name, newTagName, sizeof(schema->name));
SSdbOper oper = {
.type = SDB_OPER_GLOBAL,
@@ -1004,7 +1006,7 @@ static int32_t mnodeAddSuperTableColumn(SDbObj *pDb, SSuperTableObj *pStable, SS
sizeof(SSchema) * pStable->numOfTags);
memcpy(pStable->schema + pStable->numOfColumns, schema, sizeof(SSchema) * ncols);
- SSchema *tschema = (SSchema *) (pStable->schema + sizeof(SSchema) * pStable->numOfColumns);
+ SSchema *tschema = (SSchema *) (pStable->schema + pStable->numOfColumns);
for (int32_t i = 0; i < ncols; i++) {
tschema[i].colId = pStable->nextColId++;
}
@@ -1078,8 +1080,9 @@ static int32_t mnodeGetShowSuperTableMeta(STableMetaMsg *pMeta, SShowObj *pShow,
int32_t cols = 0;
SSchema *pSchema = pMeta->schema;
- pShow->bytes[cols] = TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE;
- pSchema[cols].type = TSDB_DATA_TYPE_BINARY;
+ SSchema tbnameSchema = tGetTableNameColumnSchema();
+ pShow->bytes[cols] = tbnameSchema.bytes;
+ pSchema[cols].type = tbnameSchema.type;
strcpy(pSchema[cols].name, "name");
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
@@ -1138,7 +1141,7 @@ int32_t mnodeRetrieveShowSuperTables(SShowObj *pShow, char *data, int32_t rows,
prefixLen = strlen(prefix);
SPatternCompareInfo info = PATTERN_COMPARE_INFO_INITIALIZER;
- char stableName[TSDB_TABLE_NAME_LEN + 1] = {0};
+ char stableName[TSDB_TABLE_NAME_LEN] = {0};
while (numOfRows < rows) {
pShow->pIter = mnodeGetNextSuperTable(pShow->pIter, &pTable);
@@ -1151,7 +1154,7 @@ int32_t mnodeRetrieveShowSuperTables(SShowObj *pShow, char *data, int32_t rows,
memset(stableName, 0, tListLen(stableName));
mnodeExtractTableName(pTable->info.tableId, stableName);
- if (pShow->payloadLen > 0 && patternMatch(pShow->payload, stableName, TSDB_TABLE_NAME_LEN, &info) != TSDB_PATTERN_MATCH) {
+ if (pShow->payloadLen > 0 && patternMatch(pShow->payload, stableName, sizeof(stableName) - 1, &info) != TSDB_PATTERN_MATCH) {
mnodeDecTableRef(pTable);
continue;
}
@@ -1160,7 +1163,7 @@ int32_t mnodeRetrieveShowSuperTables(SShowObj *pShow, char *data, int32_t rows,
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
- int16_t len = strnlen(stableName, TSDB_DB_NAME_LEN);
+ int16_t len = strnlen(stableName, TSDB_DB_NAME_LEN - 1);
*(int16_t*) pWrite = len;
pWrite += sizeof(int16_t); // todo refactor
@@ -1228,7 +1231,7 @@ static int32_t mnodeSetSchemaFromSuperTable(SSchema *pSchema, SSuperTableObj *pT
assert(numOfCols <= TSDB_MAX_COLUMNS);
for (int32_t i = 0; i < numOfCols; ++i) {
- strncpy(pSchema->name, pTable->schema[i].name, TSDB_COL_NAME_LEN);
+ tstrncpy(pSchema->name, pTable->schema[i].name, sizeof(pSchema->name));
pSchema->type = pTable->schema[i].type;
pSchema->bytes = htons(pTable->schema[i].bytes);
pSchema->colId = htons(pTable->schema[i].colId);
@@ -1249,12 +1252,12 @@ static int32_t mnodeGetSuperTableMeta(SMnodeMsg *pMsg) {
pMeta->numOfColumns = htons((int16_t)pTable->numOfColumns);
pMeta->tableType = pTable->info.type;
pMeta->contLen = sizeof(STableMetaMsg) + mnodeSetSchemaFromSuperTable(pMeta->schema, pTable);
- strncpy(pMeta->tableId, pTable->info.tableId, TSDB_TABLE_ID_LEN);
+ tstrncpy(pMeta->tableId, pTable->info.tableId, sizeof(pMeta->tableId));
+ pMsg->rpcRsp.len = pMeta->contLen;
pMeta->contLen = htons(pMeta->contLen);
pMsg->rpcRsp.rsp = pMeta;
- pMsg->rpcRsp.len = pMeta->contLen;
mTrace("stable:%s, uid:%" PRIu64 " table meta is retrieved", pTable->info.tableId, pTable->uid);
return TSDB_CODE_SUCCESS;
@@ -1769,8 +1772,9 @@ static int32_t mnodeAutoCreateChildTable(SMnodeMsg *pMsg) {
return TSDB_CODE_MND_OUT_OF_MEMORY;
}
- strncpy(pCreateMsg->tableId, pInfo->tableId, tListLen(pInfo->tableId));
- strcpy(pCreateMsg->db, pMsg->pDb->name);
+ size_t size = sizeof(pInfo->tableId);
+ tstrncpy(pCreateMsg->tableId, pInfo->tableId, size);
+ tstrncpy(pCreateMsg->db, pMsg->pDb->name, sizeof(pCreateMsg->db));
pCreateMsg->igExists = 1;
pCreateMsg->getMeta = 1;
pCreateMsg->contLen = htonl(contLen);
@@ -2032,7 +2036,7 @@ static int32_t mnodeProcessMultiTableMetaMsg(SMnodeMsg *pMsg) {
pMultiMeta->numOfTables = 0;
for (int32_t t = 0; t < pInfo->numOfTables; ++t) {
- char * tableId = (char *)(pInfo->tableIds + t * TSDB_TABLE_ID_LEN + 1);
+ char * tableId = (char *)(pInfo->tableIds + t * TSDB_TABLE_ID_LEN);
SChildTableObj *pTable = mnodeGetChildTable(tableId);
if (pTable == NULL) continue;
@@ -2079,8 +2083,9 @@ static int32_t mnodeGetShowTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, void
int32_t cols = 0;
SSchema *pSchema = pMeta->schema;
- pShow->bytes[cols] = TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE;
- pSchema[cols].type = TSDB_DATA_TYPE_BINARY;
+ SSchema s = tGetTableNameColumnSchema();
+ pShow->bytes[cols] = s.bytes;
+ pSchema[cols].type = s.type;
strcpy(pSchema[cols].name, "table_name");
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
@@ -2097,8 +2102,9 @@ static int32_t mnodeGetShowTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, void
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
- pShow->bytes[cols] = TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE;
- pSchema[cols].type = TSDB_DATA_TYPE_BINARY;
+ SSchema tbCol = tGetTableNameColumnSchema();
+ pShow->bytes[cols] = tbCol.bytes;
+ pSchema[cols].type = tbCol.type;
strcpy(pSchema[cols].name, "stable_name");
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
@@ -2141,12 +2147,12 @@ static int32_t mnodeRetrieveShowTables(SShowObj *pShow, char *data, int32_t rows
continue;
}
- char tableName[TSDB_TABLE_NAME_LEN + 1] = {0};
+ char tableName[TSDB_TABLE_NAME_LEN] = {0};
// pattern compare for table name
mnodeExtractTableName(pTable->info.tableId, tableName);
- if (pShow->payloadLen > 0 && patternMatch(pShow->payload, tableName, TSDB_TABLE_NAME_LEN, &info) != TSDB_PATTERN_MATCH) {
+ if (pShow->payloadLen > 0 && patternMatch(pShow->payload, tableName, sizeof(tableName) - 1, &info) != TSDB_PATTERN_MATCH) {
mnodeDecTableRef(pTable);
continue;
}
@@ -2155,7 +2161,7 @@ static int32_t mnodeRetrieveShowTables(SShowObj *pShow, char *data, int32_t rows
char *pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
- STR_WITH_MAXSIZE_TO_VARSTR(pWrite, tableName, TSDB_TABLE_NAME_LEN);
+ STR_WITH_MAXSIZE_TO_VARSTR(pWrite, tableName, sizeof(tableName) - 1);
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
@@ -2173,10 +2179,10 @@ static int32_t mnodeRetrieveShowTables(SShowObj *pShow, char *data, int32_t rows
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
- memset(tableName, 0, tListLen(tableName));
+ memset(tableName, 0, sizeof(tableName));
if (pTable->info.type == TSDB_CHILD_TABLE) {
mnodeExtractTableName(pTable->superTable->info.tableId, tableName);
- STR_WITH_MAXSIZE_TO_VARSTR(pWrite, tableName, TSDB_TABLE_NAME_LEN);
+ STR_WITH_MAXSIZE_TO_VARSTR(pWrite, tableName, sizeof(tableName) - 1);
}
cols++;
@@ -2268,8 +2274,9 @@ static int32_t mnodeGetStreamTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, vo
int32_t cols = 0;
SSchema *pSchema = pMeta->schema;
- pShow->bytes[cols] = TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE;
- pSchema[cols].type = TSDB_DATA_TYPE_BINARY;
+ SSchema tbnameColSchema = tGetTableNameColumnSchema();
+ pShow->bytes[cols] = tbnameColSchema.bytes;
+ pSchema[cols].type = tbnameColSchema.type;
strcpy(pSchema[cols].name, "table_name");
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
@@ -2331,12 +2338,12 @@ static int32_t mnodeRetrieveStreamTables(SShowObj *pShow, char *data, int32_t ro
continue;
}
- char tableName[TSDB_TABLE_NAME_LEN + 1] = {0};
+ char tableName[TSDB_TABLE_NAME_LEN] = {0};
// pattern compare for table name
mnodeExtractTableName(pTable->info.tableId, tableName);
- if (pShow->payloadLen > 0 && patternMatch(pShow->payload, tableName, TSDB_TABLE_NAME_LEN, &info) != TSDB_PATTERN_MATCH) {
+ if (pShow->payloadLen > 0 && patternMatch(pShow->payload, tableName, sizeof(tableName) - 1, &info) != TSDB_PATTERN_MATCH) {
mnodeDecTableRef(pTable);
continue;
}
@@ -2345,7 +2352,7 @@ static int32_t mnodeRetrieveStreamTables(SShowObj *pShow, char *data, int32_t ro
char *pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
- STR_WITH_MAXSIZE_TO_VARSTR(pWrite, tableName, TSDB_TABLE_NAME_LEN);
+ STR_WITH_MAXSIZE_TO_VARSTR(pWrite, tableName, sizeof(tableName) - 1);
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
diff --git a/src/mnode/src/mnodeUser.c b/src/mnode/src/mnodeUser.c
index aab0847a6b..95457c83a0 100644
--- a/src/mnode/src/mnodeUser.c
+++ b/src/mnode/src/mnodeUser.c
@@ -315,7 +315,8 @@ static int32_t mnodeRetrieveUsers(SShowObj *pShow, char *data, int32_t rows, voi
cols = 0;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
- STR_WITH_MAXSIZE_TO_VARSTR(pWrite, pUser->user, TSDB_USER_LEN);
+ size_t size = sizeof(pUser->user);
+ STR_WITH_MAXSIZE_TO_VARSTR(pWrite, pUser->user, size);
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
@@ -336,7 +337,7 @@ static int32_t mnodeRetrieveUsers(SShowObj *pShow, char *data, int32_t rows, voi
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
- STR_WITH_MAXSIZE_TO_VARSTR(pWrite, pUser->acct, TSDB_USER_LEN);
+ STR_WITH_MAXSIZE_TO_VARSTR(pWrite, pUser->acct, sizeof(pUser->user));
cols++;
numOfRows++;
diff --git a/src/os/darwin/src/darwinPlatform.c b/src/os/darwin/src/darwinPlatform.c
index 05290c43f4..cae65a2927 100644
--- a/src/os/darwin/src/darwinPlatform.c
+++ b/src/os/darwin/src/darwinPlatform.c
@@ -189,7 +189,7 @@ void taosGetSystemLocale() {
if (cfg_locale && cfg_locale->cfgStatus < TAOS_CFG_CSTATUS_DEFAULT) {
char *locale = setlocale(LC_CTYPE, "chs");
if (locale != NULL) {
- strncpy(tsLocale, locale, sizeof(tsLocale) / sizeof(tsLocale[0]));
+ tstrncpy(tsLocale, locale, sizeof(tsLocale));
cfg_locale->cfgStatus = TAOS_CFG_CSTATUS_DEFAULT;
uPrint("locale not configured, set to default:%s", tsLocale);
}
diff --git a/src/os/linux/src/linuxSysPara.c b/src/os/linux/src/linuxSysPara.c
index b270eb14cf..0e76ab0046 100644
--- a/src/os/linux/src/linuxSysPara.c
+++ b/src/os/linux/src/linuxSysPara.c
@@ -160,7 +160,7 @@ static void taosGetSystemTimezone() {
/* load time zone string from /etc/timezone */
FILE *f = fopen("/etc/timezone", "r");
- char buf[64] = {0};
+ char buf[65] = {0};
if (f != NULL) {
fread(buf, 64, 1, f);
fclose(f);
@@ -229,7 +229,7 @@ static void taosGetSystemLocale() { // get and set default locale
uError("can't get locale from system, set it to en_US.UTF-8");
strcpy(tsLocale, "en_US.UTF-8");
} else {
- tstrncpy(tsLocale, locale, tListLen(tsLocale));
+ tstrncpy(tsLocale, locale, sizeof(tsLocale));
uError("locale not configured, set to system default:%s", tsLocale);
}
}
@@ -242,7 +242,7 @@ static void taosGetSystemLocale() { // get and set default locale
str++;
char *revisedCharset = taosCharsetReplace(str);
- strncpy(tsCharset, revisedCharset, tListLen(tsCharset));
+ tstrncpy(tsCharset, revisedCharset, sizeof(tsCharset));
free(revisedCharset);
uWarn("charset not configured, set to system default:%s", tsCharset);
@@ -332,33 +332,42 @@ bool taosGetDisk() {
}
static bool taosGetCardInfo(int64_t *bytes) {
+ *bytes = 0;
FILE *fp = fopen(tsSysNetFile, "r");
if (fp == NULL) {
uError("open file:%s failed", tsSysNetFile);
return false;
}
- int64_t rbytes, rpackts, tbytes, tpackets;
- int64_t nouse1, nouse2, nouse3, nouse4, nouse5, nouse6;
- char nouse0[200] = {0};
- size_t len;
- char * line = NULL;
- *bytes = 0;
+ size_t len = 2048;
+ char * line = calloc(1, len);
while (!feof(fp)) {
- tfree(line);
- len = 0;
+ memset(line, 0, len);
+
+ int64_t rbytes = 0;
+ int64_t rpackts = 0;
+ int64_t tbytes = 0;
+ int64_t tpackets = 0;
+ int64_t nouse1 = 0;
+ int64_t nouse2 = 0;
+ int64_t nouse3 = 0;
+ int64_t nouse4 = 0;
+ int64_t nouse5 = 0;
+ int64_t nouse6 = 0;
+ char nouse0[200] = {0};
+
getline(&line, &len, fp);
- if (line == NULL) {
- break;
- }
+ line[len - 1] = 0;
+
if (strstr(line, "lo:") != NULL) {
continue;
}
sscanf(line,
- "%s %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64,
+ "%s %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64
+ " %" PRId64,
nouse0, &rbytes, &rpackts, &nouse1, &nouse2, &nouse3, &nouse4, &nouse5, &nouse6, &tbytes, &tpackets);
*bytes += (rbytes + tbytes);
}
diff --git a/src/os/windows/src/twindows.c b/src/os/windows/src/twindows.c
index 76b354f784..82bcc69b11 100644
--- a/src/os/windows/src/twindows.c
+++ b/src/os/windows/src/twindows.c
@@ -226,7 +226,7 @@ void taosGetSystemLocale() {
if (cfg_locale && cfg_locale->cfgStatus < TAOS_CFG_CSTATUS_DEFAULT) {
char *locale = setlocale(LC_CTYPE, "chs");
if (locale != NULL) {
- strncpy(tsLocale, locale, sizeof(tsLocale) / sizeof(tsLocale[0]));
+ tstrncpy(tsLocale, locale, sizeof(tsLocale));
cfg_locale->cfgStatus = TAOS_CFG_CSTATUS_DEFAULT;
uPrint("locale not configured, set to default:%s", tsLocale);
}
diff --git a/src/plugins/http/inc/httpHandle.h b/src/plugins/http/inc/httpHandle.h
index 9be2796a96..b888543137 100644
--- a/src/plugins/http/inc/httpHandle.h
+++ b/src/plugins/http/inc/httpHandle.h
@@ -67,7 +67,7 @@
#define HTTP_COMPRESS_IDENTITY 0
#define HTTP_COMPRESS_GZIP 2
-#define HTTP_SESSION_ID_LEN (TSDB_USER_LEN * 2 + 1)
+#define HTTP_SESSION_ID_LEN (TSDB_USER_LEN + TSDB_PASSWORD_LEN)
typedef enum {
HTTP_CONTEXT_STATE_READY,
@@ -84,7 +84,7 @@ typedef struct {
int expire;
int access;
void *taos;
- char id[HTTP_SESSION_ID_LEN + 1];
+ char id[HTTP_SESSION_ID_LEN];
} HttpSession;
typedef enum {
diff --git a/src/plugins/http/src/gcHandle.c b/src/plugins/http/src/gcHandle.c
index 4120980123..176e16301b 100644
--- a/src/plugins/http/src/gcHandle.c
+++ b/src/plugins/http/src/gcHandle.c
@@ -48,7 +48,7 @@ void gcInitHandle(HttpServer* pServer) { httpAddMethod(pServer, &gcDecodeMethod)
bool gcGetUserFromUrl(HttpContext* pContext) {
HttpParser* pParser = &pContext->parser;
- if (pParser->path[GC_USER_URL_POS].len > TSDB_USER_LEN - 1 || pParser->path[GC_USER_URL_POS].len <= 0) {
+ if (pParser->path[GC_USER_URL_POS].len >= TSDB_USER_LEN || pParser->path[GC_USER_URL_POS].len <= 0) {
return false;
}
@@ -58,7 +58,7 @@ bool gcGetUserFromUrl(HttpContext* pContext) {
bool gcGetPassFromUrl(HttpContext* pContext) {
HttpParser* pParser = &pContext->parser;
- if (pParser->path[GC_PASS_URL_POS].len > TSDB_PASSWORD_LEN - 1 || pParser->path[GC_PASS_URL_POS].len <= 0) {
+ if (pParser->path[GC_PASS_URL_POS].len >= TSDB_PASSWORD_LEN || pParser->path[GC_PASS_URL_POS].len <= 0) {
return false;
}
diff --git a/src/plugins/http/src/httpAuth.c b/src/plugins/http/src/httpAuth.c
index 594f51e275..cf2ce5ddd9 100644
--- a/src/plugins/http/src/httpAuth.c
+++ b/src/plugins/http/src/httpAuth.c
@@ -88,8 +88,8 @@ bool httpParseTaosdAuthToken(HttpContext *pContext, char *token, int len) {
free(base64);
return false;
} else {
- strncpy(pContext->user, descrypt, TSDB_USER_LEN);
- strncpy(pContext->pass, descrypt + TSDB_USER_LEN, TSDB_PASSWORD_LEN);
+ tstrncpy(pContext->user, descrypt, sizeof(pContext->user));
+ tstrncpy(pContext->pass, descrypt + TSDB_USER_LEN, sizeof(pContext->pass));
httpTrace("context:%p, fd:%d, ip:%s, taosd token:%s parsed success, user:%s", pContext, pContext->fd,
pContext->ipstr, token, pContext->user);
@@ -100,14 +100,17 @@ bool httpParseTaosdAuthToken(HttpContext *pContext, char *token, int len) {
}
bool httpGenTaosdAuthToken(HttpContext *pContext, char *token, int maxLen) {
- char buffer[TSDB_USER_LEN + TSDB_PASSWORD_LEN] = {0};
- strncpy(buffer, pContext->user, TSDB_USER_LEN);
- strncpy(buffer + TSDB_USER_LEN, pContext->pass, TSDB_PASSWORD_LEN);
+ char buffer[sizeof(pContext->user) + sizeof(pContext->pass)] = {0};
+ size_t size = sizeof(pContext->user);
+ tstrncpy(buffer, pContext->user, size);
+ size = sizeof(pContext->pass);
+ tstrncpy(buffer + sizeof(pContext->user), pContext->pass, size);
char *encrypt = taosDesEncode(KEY_DES_4, buffer, TSDB_USER_LEN + TSDB_PASSWORD_LEN);
char *base64 = base64_encode((const unsigned char *)encrypt, TSDB_USER_LEN + TSDB_PASSWORD_LEN);
- strncpy(token, base64, (size_t)strlen(base64));
+ size_t len = strlen(base64);
+ tstrncpy(token, base64, len + 1);
free(encrypt);
free(base64);
diff --git a/src/plugins/http/src/httpSql.c b/src/plugins/http/src/httpSql.c
index 6ff93b3e8a..efd1aac767 100644
--- a/src/plugins/http/src/httpSql.c
+++ b/src/plugins/http/src/httpSql.c
@@ -87,7 +87,7 @@ void httpProcessMultiSqlCallBack(void *param, TAOS_RES *result, int code) {
}
if (code < 0) {
- if (encode->checkFinishedFp != NULL && !encode->checkFinishedFp(pContext, singleCmd, -code)) {
+ if (encode->checkFinishedFp != NULL && !encode->checkFinishedFp(pContext, singleCmd, code)) {
singleCmd->code = code;
httpTrace("context:%p, fd:%d, ip:%s, user:%s, process pos jump to:%d, last code:%s, last sql:%s",
pContext, pContext->fd, pContext->ipstr, pContext->user, multiCmds->pos + 1, tstrerror(code), sql);
diff --git a/src/plugins/http/src/httpUtil.c b/src/plugins/http/src/httpUtil.c
index b91b89e21c..1fb63ea2fc 100644
--- a/src/plugins/http/src/httpUtil.c
+++ b/src/plugins/http/src/httpUtil.c
@@ -307,7 +307,7 @@ void httpTrimTableName(char *name) {
for (int i = 0; name[i] != 0; i++) {
if (name[i] == ' ' || name[i] == ':' || name[i] == '.' || name[i] == '-' || name[i] == '/' || name[i] == '\'')
name[i] = '_';
- if (i == TSDB_TABLE_NAME_LEN + 1) {
+ if (i == TSDB_TABLE_NAME_LEN) {
name[i] = 0;
break;
}
@@ -323,7 +323,7 @@ int httpShrinkTableName(HttpContext *pContext, int pos, char *name) {
len++;
}
- if (len < TSDB_TABLE_NAME_LEN) {
+ if (len < TSDB_TABLE_NAME_LEN - 1) {
return pos;
}
diff --git a/src/plugins/http/src/restHandle.c b/src/plugins/http/src/restHandle.c
index d481a654d8..93094fa287 100644
--- a/src/plugins/http/src/restHandle.c
+++ b/src/plugins/http/src/restHandle.c
@@ -61,7 +61,7 @@ void restInitHandle(HttpServer* pServer) {
bool restGetUserFromUrl(HttpContext* pContext) {
HttpParser* pParser = &pContext->parser;
- if (pParser->path[REST_USER_URL_POS].len > TSDB_USER_LEN - 1 || pParser->path[REST_USER_URL_POS].len <= 0) {
+ if (pParser->path[REST_USER_URL_POS].len >= TSDB_USER_LEN || pParser->path[REST_USER_URL_POS].len <= 0) {
return false;
}
@@ -71,7 +71,7 @@ bool restGetUserFromUrl(HttpContext* pContext) {
bool restGetPassFromUrl(HttpContext* pContext) {
HttpParser* pParser = &pContext->parser;
- if (pParser->path[REST_PASS_URL_POS].len > TSDB_PASSWORD_LEN - 1 || pParser->path[REST_PASS_URL_POS].len <= 0) {
+ if (pParser->path[REST_PASS_URL_POS].len >= TSDB_PASSWORD_LEN || pParser->path[REST_PASS_URL_POS].len <= 0) {
return false;
}
diff --git a/src/plugins/http/src/tgHandle.c b/src/plugins/http/src/tgHandle.c
index c6a2230bfb..b85f27d175 100644
--- a/src/plugins/http/src/tgHandle.c
+++ b/src/plugins/http/src/tgHandle.c
@@ -209,7 +209,7 @@ void tgParseSchemaMetric(cJSON *metric) {
goto ParseEnd;
}
int nameLen = (int)strlen(field->valuestring);
- if (nameLen == 0 || nameLen > TSDB_TABLE_NAME_LEN) {
+ if (nameLen == 0 || nameLen >= TSDB_TABLE_NAME_LEN) {
parsedOk = false;
goto ParseEnd;
}
@@ -306,21 +306,21 @@ void tgCleanupHandle() {
bool tgGetUserFromUrl(HttpContext *pContext) {
HttpParser *pParser = &pContext->parser;
- if (pParser->path[TG_USER_URL_POS].len > TSDB_USER_LEN - 1 || pParser->path[TG_USER_URL_POS].len <= 0) {
+ if (pParser->path[TG_USER_URL_POS].len >= TSDB_USER_LEN || pParser->path[TG_USER_URL_POS].len <= 0) {
return false;
}
- tstrncpy(pContext->user, pParser->path[TG_USER_URL_POS].pos, TSDB_USER_LEN);
+ tstrncpy(pContext->user, pParser->path[TG_USER_URL_POS].pos, sizeof(pContext->user));
return true;
}
bool tgGetPassFromUrl(HttpContext *pContext) {
HttpParser *pParser = &pContext->parser;
- if (pParser->path[TG_PASS_URL_POS].len > TSDB_PASSWORD_LEN - 1 || pParser->path[TG_PASS_URL_POS].len <= 0) {
+ if (pParser->path[TG_PASS_URL_POS].len >= TSDB_PASSWORD_LEN || pParser->path[TG_PASS_URL_POS].len <= 0) {
return false;
}
- tstrncpy(pContext->pass, pParser->path[TG_PASS_URL_POS].pos, TSDB_PASSWORD_LEN);
+ tstrncpy(pContext->pass, pParser->path[TG_PASS_URL_POS].pos, sizeof(pContext->pass));
return true;
}
@@ -409,7 +409,7 @@ bool tgProcessSingleMetric(HttpContext *pContext, cJSON *metric, char *db) {
httpSendErrorResp(pContext, HTTP_TG_METRIC_NAME_NULL);
return false;
}
- if (nameLen >= TSDB_TABLE_NAME_LEN - 7) {
+ if (nameLen >= TSDB_TABLE_NAME_LEN - 8) {
httpSendErrorResp(pContext, HTTP_TG_METRIC_NAME_LONG);
return false;
}
@@ -498,7 +498,7 @@ bool tgProcessSingleMetric(HttpContext *pContext, cJSON *metric, char *db) {
return false;
}
- if (strlen(host->valuestring) >= TSDB_TABLE_NAME_LEN) {
+ if (strlen(host->valuestring) >= TSDB_TABLE_NAME_LEN - 1) {
httpSendErrorResp(pContext, HTTP_TG_TABLE_SIZE);
return false;
}
diff --git a/src/plugins/monitor/src/monitorMain.c b/src/plugins/monitor/src/monitorMain.c
index 01e276da64..735c77ae21 100644
--- a/src/plugins/monitor/src/monitorMain.c
+++ b/src/plugins/monitor/src/monitorMain.c
@@ -156,7 +156,7 @@ static void dnodeBuildMonitorSql(char *sql, int32_t cmd) {
", io_read float, io_write float"
", req_http int, req_select int, req_insert int"
") tags (dnodeid int, fqdn binary(%d))",
- tsMonitorDbName, TSDB_FQDN_LEN + 1);
+ tsMonitorDbName, TSDB_FQDN_LEN);
} else if (cmd == MONITOR_CMD_CREATE_TB_DN) {
snprintf(sql, SQL_LENGTH, "create table if not exists %s.dn%d using %s.dn tags(%d, '%s')", tsMonitorDbName,
dnodeGetDnodeId(), tsMonitorDbName, dnodeGetDnodeId(), tsLocalEp);
@@ -175,7 +175,7 @@ static void dnodeBuildMonitorSql(char *sql, int32_t cmd) {
", totalConns smallint, maxConns smallint"
", accessState smallint"
") tags (acctId binary(%d))",
- tsMonitorDbName, TSDB_USER_LEN + 1);
+ tsMonitorDbName, TSDB_USER_LEN);
} else if (cmd == MONITOR_CMD_CREATE_TB_ACCT_ROOT) {
snprintf(sql, SQL_LENGTH, "create table if not exists %s.acct_%s using %s.acct tags('%s')", tsMonitorDbName, "root",
tsMonitorDbName, "root");
@@ -183,7 +183,7 @@ static void dnodeBuildMonitorSql(char *sql, int32_t cmd) {
snprintf(sql, SQL_LENGTH,
"create table if not exists %s.slowquery(ts timestamp, username "
"binary(%d), created_time timestamp, time bigint, sql binary(%d))",
- tsMonitorDbName, TSDB_TABLE_ID_LEN, TSDB_SLOW_QUERY_SQL_LEN);
+ tsMonitorDbName, TSDB_TABLE_ID_LEN - 1, TSDB_SLOW_QUERY_SQL_LEN);
} else if (cmd == MONITOR_CMD_CREATE_TB_LOG) {
snprintf(sql, SQL_LENGTH,
"create table if not exists %s.log(ts timestamp, level tinyint, "
diff --git a/src/query/inc/qfill.h b/src/query/inc/qfill.h
index 9ea9c8f7cf..da1cd8e5de 100644
--- a/src/query/inc/qfill.h
+++ b/src/query/inc/qfill.h
@@ -50,7 +50,8 @@ typedef struct SFillInfo {
char * nextValues; // next row of data
char** pData; // original result data block involved in filling data
int32_t capacityInRows; // data buffer size in rows
-
+ int8_t slidingUnit; // sliding time unit
+ int8_t precision; // time resoluation
SFillColInfo* pFillCol; // column info for fill operations
} SFillInfo;
@@ -61,12 +62,13 @@ typedef struct SPoint {
int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, char timeUnit, int16_t precision);
-SFillInfo* taosInitFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_t capacity,
- int32_t numOfCols, int64_t slidingTime, int32_t fillType, SFillColInfo* pFillCol);
+SFillInfo* taosInitFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_t capacity, int32_t numOfCols,
+ int64_t slidingTime, int8_t slidingUnit, int8_t precision, int32_t fillType,
+ SFillColInfo* pFillCol);
void taosResetFillInfo(SFillInfo* pFillInfo, TSKEY startTimestamp);
-void taosDestoryFillInfo(SFillInfo *pFillInfo);
+void* taosDestoryFillInfo(SFillInfo *pFillInfo);
void taosFillSetStartInfo(SFillInfo* pFillInfo, int32_t numOfRows, TSKEY endKey);
@@ -74,9 +76,7 @@ void taosFillCopyInputDataFromFilePage(SFillInfo* pFillInfo, tFilePage** pInput)
void taosFillCopyInputDataFromOneFilePage(SFillInfo* pFillInfo, tFilePage* pInput);
-TSKEY taosGetRevisedEndKey(TSKEY ekey, int32_t order, int64_t timeInterval, int8_t slidingTimeUnit, int8_t precision);
-
-int64_t taosGetNumOfResultWithFill(SFillInfo* pFillInfo, int32_t numOfRows, int64_t ekey, int32_t maxNumOfRows);
+int64_t getFilledNumOfRes(SFillInfo* pFillInfo, int64_t ekey, int32_t maxNumOfRows);
int32_t taosNumOfRemainRows(SFillInfo *pFillInfo);
diff --git a/src/query/inc/tsqlfunction.h b/src/query/inc/tsqlfunction.h
index aa8e83da38..340f6bc4f3 100644
--- a/src/query/inc/tsqlfunction.h
+++ b/src/query/inc/tsqlfunction.h
@@ -112,7 +112,7 @@ enum {
#define QUERY_IS_STABLE_QUERY(type) (((type)&TSDB_QUERY_TYPE_STABLE_QUERY) != 0)
#define QUERY_IS_JOIN_QUERY(type) (TSDB_QUERY_HAS_TYPE(type, TSDB_QUERY_TYPE_JOIN_QUERY))
-#define QUERY_IS_PROJECTION_QUERY(type) (((type)&TSDB_QUERY_TYPE_PROJECTION_QUERY) != 0)
+#define QUERY_IS_PROJECTION_QUERY(type) (((type)&TSDB_QUERY_TYPE_PROJECTION_QUERY) != 0)
#define QUERY_IS_FREE_RESOURCE(type) (((type)&TSDB_QUERY_TYPE_FREE_RESOURCE) != 0)
typedef struct SArithmeticSupport {
diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c
index 26502a7408..a1af147b62 100644
--- a/src/query/src/qExecutor.c
+++ b/src/query/src/qExecutor.c
@@ -12,8 +12,8 @@
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see .
*/
-#include "qfill.h"
#include "os.h"
+#include "qfill.h"
#include "hash.h"
#include "hashfunc.h"
@@ -354,7 +354,7 @@ static SWindowResult *doSetTimeWindowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SWin
int16_t bytes) {
SQuery *pQuery = pRuntimeEnv->pQuery;
- int32_t *p1 = (int32_t *)taosHashGet(pWindowResInfo->hashList, pData, bytes);
+ int32_t *p1 = (int32_t *) taosHashGet(pWindowResInfo->hashList, pData, bytes);
if (p1 != NULL) {
pWindowResInfo->curIndex = *p1;
} else { // more than the capacity, reallocate the resources
@@ -484,7 +484,7 @@ static int32_t setWindowOutputBufByKey(SQueryRuntimeEnv *pRuntimeEnv, SWindowRes
// set time window for current result
pWindowRes->window = *win;
-
+
setWindowResOutputBufInitCtx(pRuntimeEnv, pWindowRes);
return TSDB_CODE_SUCCESS;
}
@@ -685,14 +685,14 @@ static int32_t getNextQualifiedWindow(SQueryRuntimeEnv *pRuntimeEnv, STimeWindow
SDataBlockInfo *pDataBlockInfo, TSKEY *primaryKeys,
__block_search_fn_t searchFn) {
SQuery *pQuery = pRuntimeEnv->pQuery;
-
+
// tumbling time window query, a special case of sliding time window query
if (pQuery->slidingTime == pQuery->intervalTime) {
// todo opt
}
-
+
getNextTimeWindow(pQuery, pNextWin);
-
+
// next time window is not in current block
if ((pNextWin->skey > pDataBlockInfo->window.ekey && QUERY_IS_ASC_QUERY(pQuery)) ||
(pNextWin->ekey < pDataBlockInfo->window.skey && !QUERY_IS_ASC_QUERY(pQuery))) {
@@ -720,7 +720,7 @@ static int32_t getNextQualifiedWindow(SQueryRuntimeEnv *pRuntimeEnv, STimeWindow
*/
if (QUERY_IS_ASC_QUERY(pQuery) && primaryKeys[startPos] > pNextWin->ekey) {
TSKEY next = primaryKeys[startPos];
-
+
pNextWin->ekey += ((next - pNextWin->ekey + pQuery->slidingTime - 1)/pQuery->slidingTime) * pQuery->slidingTime;
pNextWin->skey = pNextWin->ekey - pQuery->intervalTime + 1;
} else if ((!QUERY_IS_ASC_QUERY(pQuery)) && primaryKeys[startPos] < pNextWin->skey) {
@@ -729,7 +729,7 @@ static int32_t getNextQualifiedWindow(SQueryRuntimeEnv *pRuntimeEnv, STimeWindow
pNextWin->skey -= ((pNextWin->skey - next + pQuery->slidingTime - 1) / pQuery->slidingTime) * pQuery->slidingTime;
pNextWin->ekey = pNextWin->skey + pQuery->intervalTime - 1;
}
-
+
return startPos;
}
@@ -919,12 +919,25 @@ static int32_t setGroupResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, char *pDat
SDiskbasedResultBuf *pResultBuf = pRuntimeEnv->pResultBuf;
+ int64_t v = -1;
+ // not assign result buffer yet, add new result buffer
+ switch(type) {
+ case TSDB_DATA_TYPE_BOOL:
+ case TSDB_DATA_TYPE_TINYINT: v = GET_INT8_VAL(pData); break;
+ case TSDB_DATA_TYPE_SMALLINT: v = GET_INT16_VAL(pData); break;
+ case TSDB_DATA_TYPE_INT: v = GET_INT32_VAL(pData); break;
+ case TSDB_DATA_TYPE_BIGINT: v = GET_INT64_VAL(pData); break;
+ }
+
+// assert(pRuntimeEnv->windowResInfo.hashList->size <= 2);
SWindowResult *pWindowRes = doSetTimeWindowFromKey(pRuntimeEnv, &pRuntimeEnv->windowResInfo, pData, bytes);
if (pWindowRes == NULL) {
return -1;
}
- // not assign result buffer yet, add new result buffer
+ pWindowRes->window.skey = v;
+ pWindowRes->window.ekey = v;
+
if (pWindowRes->pos.pageId == -1) {
int32_t ret = addNewWindowResultBuf(pWindowRes, pResultBuf, GROUPRESULTID, pRuntimeEnv->numOfRowsPerPage);
if (ret != 0) {
@@ -1022,12 +1035,16 @@ static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx
return false;
}
- if (functionId == TSDB_FUNC_LAST_DST || functionId == TSDB_FUNC_LAST) {
- return !QUERY_IS_ASC_QUERY(pQuery);
- } else if (functionId == TSDB_FUNC_FIRST_DST || functionId == TSDB_FUNC_FIRST) {
+ if (functionId == TSDB_FUNC_FIRST_DST || functionId == TSDB_FUNC_FIRST) {
return QUERY_IS_ASC_QUERY(pQuery);
}
-
+
+ // todo add comments
+ if ((functionId == TSDB_FUNC_LAST_DST || functionId == TSDB_FUNC_LAST)) {
+ return pCtx->param[0].i64Key == pQuery->order.order;
+// return !QUERY_IS_ASC_QUERY(pQuery);
+ }
+
// in the supplementary scan, only the following functions need to be executed
if (IS_REVERSE_SCAN(pRuntimeEnv)) {
return false;
@@ -1079,7 +1096,7 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS
int32_t j = 0;
int32_t offset = -1;
-
+
for (j = 0; j < pDataBlockInfo->rows; ++j) {
offset = GET_COL_DATA_POS(pQuery, j, step);
@@ -1368,8 +1385,10 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int16_t order
int32_t index = pSqlFuncMsg->colInfo.colIndex;
if (TSDB_COL_IS_TAG(pIndex->flag)) {
if (pIndex->colId == TSDB_TBNAME_COLUMN_INDEX) { // todo refactor
- pCtx->inputBytes = TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE;
- pCtx->inputType = TSDB_DATA_TYPE_BINARY;
+ SSchema s = tGetTableNameColumnSchema();
+
+ pCtx->inputBytes = s.bytes;
+ pCtx->inputType = s.type;
} else {
pCtx->inputBytes = pQuery->tagColList[index].bytes;
pCtx->inputType = pQuery->tagColList[index].type;
@@ -1466,7 +1485,7 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) {
tfree(pRuntimeEnv->pCtx);
}
- taosDestoryFillInfo(pRuntimeEnv->pFillInfo);
+ pRuntimeEnv->pFillInfo = taosDestoryFillInfo(pRuntimeEnv->pFillInfo);
destroyResultBuf(pRuntimeEnv->pResultBuf, pQInfo);
tsdbCleanupQueryHandle(pRuntimeEnv->pQueryHandle);
@@ -1477,19 +1496,6 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) {
static bool isQueryKilled(SQInfo *pQInfo) {
return (pQInfo->code == TSDB_CODE_TSC_QUERY_CANCELLED);
-#if 0
- /*
- * check if the queried meter is going to be deleted.
- * if it will be deleted soon, stop current query ASAP.
- */
- SMeterObj *pMeterObj = pQInfo->pObj;
- if (vnodeIsMeterState(pMeterObj, TSDB_METER_STATE_DROPPING)) {
- pQInfo->killed = 1;
- return true;
- }
-
- return (pQInfo->killed == 1);
-#endif
}
static void setQueryKilled(SQInfo *pQInfo) { pQInfo->code = TSDB_CODE_TSC_QUERY_CANCELLED; }
@@ -1572,10 +1578,14 @@ static bool needReverseScan(SQuery *pQuery) {
continue;
}
- if (((functionId == TSDB_FUNC_LAST || functionId == TSDB_FUNC_LAST_DST) && QUERY_IS_ASC_QUERY(pQuery)) ||
- ((functionId == TSDB_FUNC_FIRST || functionId == TSDB_FUNC_FIRST_DST) && !QUERY_IS_ASC_QUERY(pQuery))) {
+ if ((functionId == TSDB_FUNC_FIRST || functionId == TSDB_FUNC_FIRST_DST) && !QUERY_IS_ASC_QUERY(pQuery)) {
return true;
}
+
+ if (functionId == TSDB_FUNC_LAST || functionId == TSDB_FUNC_LAST_DST) {
+ int32_t order = pQuery->pSelectExpr[i].base.arg->argValue.i64;
+ return order != pQuery->order.order;
+ }
}
return false;
@@ -2028,6 +2038,34 @@ int32_t binarySearchForKey(char *pValue, int num, TSKEY key, int order) {
return midPos;
}
+static void ensureOutputBufferSimple(SQueryRuntimeEnv* pRuntimeEnv, int32_t capacity) {
+ SQuery* pQuery = pRuntimeEnv->pQuery;
+
+ if (capacity < pQuery->rec.capacity) {
+ return;
+ }
+
+ for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
+ int32_t bytes = pQuery->pSelectExpr[i].bytes;
+ assert(bytes > 0 && capacity > 0);
+
+ char *tmp = realloc(pQuery->sdata[i], bytes * capacity + sizeof(tFilePage));
+ if (tmp == NULL) { // todo handle the oom
+ assert(0);
+ } else {
+ pQuery->sdata[i] = (tFilePage *)tmp;
+ }
+
+ // set the pCtx output buffer position
+ pRuntimeEnv->pCtx[i].aOutputBuf = pQuery->sdata[i]->data;
+ }
+
+ qTrace("QInfo:%p realloc output buffer to inc output buffer from: %d rows to:%d rows", GET_QINFO_ADDR(pRuntimeEnv),
+ pQuery->rec.capacity, capacity);
+
+ pQuery->rec.capacity = capacity;
+}
+
static void ensureOutputBuffer(SQueryRuntimeEnv* pRuntimeEnv, SDataBlockInfo* pBlockInfo) {
// in case of prj/diff query, ensure the output buffer is sufficient to accommodate the results of current block
SQuery* pQuery = pRuntimeEnv->pQuery;
@@ -2070,7 +2108,7 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) {
SQuery *pQuery = pRuntimeEnv->pQuery;
STableQueryInfo* pTableQueryInfo = pQuery->current;
SQueryCostInfo* summary = &pRuntimeEnv->summary;
-
+
qTrace("QInfo:%p query start, qrange:%" PRId64 "-%" PRId64 ", lastkey:%" PRId64 ", order:%d",
GET_QINFO_ADDR(pRuntimeEnv), pTableQueryInfo->win.skey, pTableQueryInfo->win.ekey, pTableQueryInfo->lastKey,
pQuery->order.order);
@@ -2111,7 +2149,7 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) {
SDataStatis *pStatis = NULL;
SArray *pDataBlock = loadDataBlockOnDemand(pRuntimeEnv, pQueryHandle, &blockInfo, &pStatis);
-
+
// query start position can not move into tableApplyFunctionsOnBlock due to limit/offset condition
pQuery->pos = QUERY_IS_ASC_QUERY(pQuery)? 0 : blockInfo.rows - 1;
int32_t numOfRes = tableApplyFunctionsOnBlock(pRuntimeEnv, &blockInfo, pStatis, binarySearchForKey, pDataBlock);
@@ -2500,7 +2538,7 @@ int64_t getNumOfResultWindowRes(SQuery *pQuery, SWindowResult *pWindowRes) {
SResultInfo *pResultInfo = &pWindowRes->resultInfo[j];
assert(pResultInfo != NULL);
-
+
if (pResultInfo->numOfRes > 0) {
return pResultInfo->numOfRes;
}
@@ -2549,7 +2587,7 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) {
assert(pQInfo->numOfGroupResultPages == 0);
return 0;
} else if (numOfTables == 1) { // no need to merge results since only one table in each group
-
+
}
SCompSupporter cs = {pTableList, posList, pQInfo};
@@ -2638,7 +2676,7 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) {
#endif
qTrace("QInfo:%p result merge completed for group:%d, elapsed time:%" PRId64 " ms", pQInfo, pQInfo->groupIndex, endt - startt);
-
+
tfree(pTableList);
tfree(posList);
tfree(pTree);
@@ -2868,12 +2906,12 @@ void initCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv) {
for (int32_t j = 0; j < pQuery->numOfOutput; ++j) {
int32_t functionId = pQuery->pSelectExpr[j].base.functionId;
pRuntimeEnv->pCtx[j].currentStage = 0;
-
+
SResultInfo* pResInfo = GET_RES_INFO(&pRuntimeEnv->pCtx[j]);
if (pResInfo->initialized) {
continue;
}
-
+
aAggs[functionId].init(&pRuntimeEnv->pCtx[j]);
}
}
@@ -2914,8 +2952,7 @@ void skipResults(SQueryRuntimeEnv *pRuntimeEnv) {
pRuntimeEnv->pCtx[i].ptsOutputBuf = pRuntimeEnv->pCtx[0].aOutputBuf;
}
}
-
-
+
updateNumOfResult(pRuntimeEnv, pQuery->rec.rows);
}
}
@@ -3052,7 +3089,7 @@ static void clearEnvAfterReverseScan(SQueryRuntimeEnv *pRuntimeEnv, SQueryStatus
pQuery->window = pTableQueryInfo->win;
}
-void scanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv, TSKEY start) {
+void scanOneTableDataBlocks(SQueryRuntimeEnv *pRuntimeEnv, TSKEY start) {
SQInfo *pQInfo = (SQInfo *) GET_QINFO_ADDR(pRuntimeEnv);
SQuery *pQuery = pRuntimeEnv->pQuery;
STableQueryInfo *pTableQueryInfo = pQuery->current;
@@ -3246,7 +3283,7 @@ void setWindowResOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *pResult
for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
SQLFunctionCtx *pCtx = &pRuntimeEnv->pCtx[i];
pCtx->aOutputBuf = getPosInResultPage(pRuntimeEnv, i, pResult);
-
+
int32_t functionId = pQuery->pSelectExpr[i].base.functionId;
if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) {
pCtx->ptsOutputBuf = pRuntimeEnv->pCtx[0].aOutputBuf;
@@ -3266,7 +3303,7 @@ void setWindowResOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *pResult
void setWindowResOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *pResult) {
SQuery *pQuery = pRuntimeEnv->pQuery;
-
+
// Note: pResult->pos[i]->num == 0, there is only fixed number of results for each group
for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
SQLFunctionCtx *pCtx = &pRuntimeEnv->pCtx[i];
@@ -3275,21 +3312,21 @@ void setWindowResOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *
if (pCtx->resultInfo->complete) {
continue;
}
-
+
pCtx->aOutputBuf = getPosInResultPage(pRuntimeEnv, i, pResult);
pCtx->currentStage = 0;
-
+
int32_t functionId = pCtx->functionId;
if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) {
pCtx->ptsOutputBuf = pRuntimeEnv->pCtx[0].aOutputBuf;
}
-
+
/*
* set the output buffer information and intermediate buffer
* not all queries require the interResultBuf, such as COUNT
*/
pCtx->resultInfo->superTableQ = pRuntimeEnv->stableQuery; // set super table query flag
-
+
if (!pCtx->resultInfo->initialized) {
aAggs[functionId].init(pCtx);
}
@@ -3494,18 +3531,32 @@ void copyFromWindowResToSData(SQInfo *pQInfo, SWindowResult *result) {
assert(pQuery->rec.rows <= pQuery->rec.capacity);
}
-static void updateWindowResNumOfRes(SQueryRuntimeEnv *pRuntimeEnv, STableQueryInfo *pTableQueryInfo) {
+static UNUSED_FUNC void updateWindowResNumOfRes(SQueryRuntimeEnv *pRuntimeEnv, STableQueryInfo *pTableQueryInfo) {
SQuery *pQuery = pRuntimeEnv->pQuery;
// update the number of result for each, only update the number of rows for the corresponding window result.
if (pQuery->intervalTime == 0) {
- int32_t g = pTableQueryInfo->groupIndex;
- assert(pRuntimeEnv->windowResInfo.size > 0);
- SWindowResult *pWindowRes = doSetTimeWindowFromKey(pRuntimeEnv, &pRuntimeEnv->windowResInfo, (char *)&g, sizeof(g));
- if (pWindowRes->numOfRows == 0) {
- pWindowRes->numOfRows = getNumOfResult(pRuntimeEnv);
+ for (int32_t i = 0; i < pRuntimeEnv->windowResInfo.size; ++i) {
+ SWindowResult *pResult = &pRuntimeEnv->windowResInfo.pResult[i];
+
+ for (int32_t j = 0; j < pQuery->numOfOutput; ++j) {
+ int32_t functionId = pRuntimeEnv->pCtx[j].functionId;
+ if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TAGPRJ) {
+ continue;
+ }
+
+ pResult->numOfRows = MAX(pResult->numOfRows, pResult->resultInfo[j].numOfRes);
+ }
}
+
+// int32_t g = pTableQueryInfo->groupIndex;
+// assert(pRuntimeEnv->windowResInfo.size > 0);
+//
+// SWindowResult *pWindowRes = doSetTimeWindowFromKey(pRuntimeEnv, &pRuntimeEnv->windowResInfo, (char *)&g, sizeof(g));
+// if (pWindowRes->numOfRows == 0) {
+// pWindowRes->numOfRows = getNumOfResult(pRuntimeEnv);
+// }
}
}
@@ -3517,7 +3568,7 @@ void stableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBlockInfo *
SWindowResInfo * pWindowResInfo = &pTableQueryInfo->windowResInfo;
pQuery->pos = QUERY_IS_ASC_QUERY(pQuery)? 0 : pDataBlockInfo->rows - 1;
- if (pQuery->numOfFilterCols > 0 || pRuntimeEnv->pTSBuf != NULL) {
+ if (pQuery->numOfFilterCols > 0 || pRuntimeEnv->pTSBuf != NULL || isGroupbyNormalCol(pQuery->pGroupbyExpr)) {
rowwiseApplyFunctions(pRuntimeEnv, pStatis, pDataBlockInfo, pWindowResInfo, pDataBlock);
} else {
blockwiseApplyFunctions(pRuntimeEnv, pStatis, pDataBlockInfo, pWindowResInfo, searchFn, pDataBlock);
@@ -3557,9 +3608,7 @@ bool queryHasRemainResults(SQueryRuntimeEnv* pRuntimeEnv) {
* first result row in the actual result set will fill nothing.
*/
if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) {
- TSKEY ekey = taosGetRevisedEndKey(pQuery->window.ekey, pQuery->order.order, pQuery->slidingTime,
- pQuery->slidingTimeUnit, pQuery->precision);
- int32_t numOfTotal = taosGetNumOfResultWithFill(pFillInfo, remain, ekey, pQuery->rec.capacity);
+ int32_t numOfTotal = getFilledNumOfRes(pFillInfo, pQuery->window.ekey, pQuery->rec.capacity);
return numOfTotal > 0;
}
@@ -3601,7 +3650,7 @@ static void doCopyQueryResultToMsg(SQInfo *pQInfo, int32_t numOfRows, char *data
}
}
-int32_t doFillGapsInResults(SQueryRuntimeEnv* pRuntimeEnv, tFilePage **pDst, int32_t numOfRows, int32_t *numOfInterpo) {
+int32_t doFillGapsInResults(SQueryRuntimeEnv* pRuntimeEnv, tFilePage **pDst, int32_t *numOfInterpo) {
SQInfo* pQInfo = GET_QINFO_ADDR(pRuntimeEnv);
SQuery *pQuery = pRuntimeEnv->pQuery;
SFillInfo* pFillInfo = pRuntimeEnv->pFillInfo;
@@ -4013,7 +4062,8 @@ int32_t doInitQInfo(SQInfo *pQInfo, void *param, void *tsdb, int32_t vgId, bool
if (pQuery->fillType != TSDB_FILL_NONE && !isPointInterpoQuery(pQuery)) {
SFillColInfo* pColInfo = taosCreateFillColInfo(pQuery);
pRuntimeEnv->pFillInfo = taosInitFillInfo(pQuery->order.order, 0, 0, pQuery->rec.capacity, pQuery->numOfOutput,
- pQuery->slidingTime, pQuery->fillType, pColInfo);
+ pQuery->slidingTime, pQuery->slidingTimeUnit, pQuery->precision,
+ pQuery->fillType, pColInfo);
}
// todo refactor
@@ -4080,21 +4130,22 @@ static int64_t scanMultiTableDataBlocks(SQInfo *pQInfo) {
SDataStatis *pStatis = NULL;
SArray *pDataBlock = loadDataBlockOnDemand(pRuntimeEnv, pQueryHandle, &blockInfo, &pStatis);
- if (!isIntervalQuery(pQuery)) {
- int32_t step = QUERY_IS_ASC_QUERY(pQuery)? 1:-1;
- setExecutionContext(pQInfo, &pTableQueryInfo->id, pTableQueryInfo->groupIndex, blockInfo.window.ekey + step);
- } else { // interval query
- TSKEY nextKey = blockInfo.window.skey;
- setIntervalQueryRange(pQInfo, nextKey);
- /*int32_t ret = */setAdditionalInfo(pQInfo, &pTableQueryInfo->id, pTableQueryInfo);
+ if (!isGroupbyNormalCol(pQuery->pGroupbyExpr)) {
+ if (!isIntervalQuery(pQuery)) {
+ int32_t step = QUERY_IS_ASC_QUERY(pQuery)? 1:-1;
+ setExecutionContext(pQInfo, &pTableQueryInfo->id, pTableQueryInfo->groupIndex, blockInfo.window.ekey + step);
+ } else { // interval query
+ TSKEY nextKey = blockInfo.window.skey;
+ setIntervalQueryRange(pQInfo, nextKey);
+ /*int32_t ret = */setAdditionalInfo(pQInfo, &pTableQueryInfo->id, pTableQueryInfo);
+ }
}
summary->totalRows += blockInfo.rows;
stableApplyFunctionsOnBlock(pRuntimeEnv, &blockInfo, pStatis, pDataBlock, binarySearchForKey);
qTrace("QInfo:%p check data block, uid:%"PRId64", tid:%d, brange:%" PRId64 "-%" PRId64 ", numOfRows:%d, lastKey:%" PRId64,
- GET_QINFO_ADDR(pRuntimeEnv), blockInfo.uid, blockInfo.tid, blockInfo.window.skey, blockInfo.window.ekey,
- blockInfo.rows, pQuery->current->lastKey);
+ pQInfo, blockInfo.uid, blockInfo.tid, blockInfo.window.skey, blockInfo.window.ekey, blockInfo.rows, pQuery->current->lastKey);
}
int64_t et = taosGetTimestampMs();
@@ -4219,7 +4270,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) {
// here we simply set the first table as current table
pQuery->current = ((SGroupItem*) taosArrayGet(group, 0))->info;
- scanAllDataBlocks(pRuntimeEnv, pQuery->current->lastKey);
+ scanOneTableDataBlocks(pRuntimeEnv, pQuery->current->lastKey);
int64_t numOfRes = getNumOfResult(pRuntimeEnv);
if (numOfRes > 0) {
@@ -4232,10 +4283,84 @@ static void sequentialTableProcess(SQInfo *pQInfo) {
// enable execution for next table, when handling the projection query
enableExecutionForNextTable(pRuntimeEnv);
+
+ if (pQuery->rec.rows >= pQuery->rec.capacity) {
+ setQueryStatus(pQuery, QUERY_RESBUF_FULL);
+ break;
+ }
+ }
+ } else if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) { // group-by on normal columns query
+ while (pQInfo->groupIndex < numOfGroups) {
+ SArray* group = taosArrayGetP(pQInfo->groupInfo.pGroupList, pQInfo->groupIndex);
+
+ qTrace("QInfo:%p group by normal columns group:%d, total group:%d", pQInfo, pQInfo->groupIndex, numOfGroups);
+
+ STsdbQueryCond cond = {
+ .twindow = pQuery->window,
+ .colList = pQuery->colList,
+ .order = pQuery->order.order,
+ .numOfCols = pQuery->numOfCols,
+ };
+
+ SArray *g1 = taosArrayInit(1, POINTER_BYTES);
+ SArray *tx = taosArrayClone(group);
+ taosArrayPush(g1, &tx);
+
+ STableGroupInfo gp = {.numOfTables = taosArrayGetSize(tx), .pGroupList = g1};
+
+ // include only current table
+ if (pRuntimeEnv->pQueryHandle != NULL) {
+ tsdbCleanupQueryHandle(pRuntimeEnv->pQueryHandle);
+ pRuntimeEnv->pQueryHandle = NULL;
+ }
+
+ pRuntimeEnv->pQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &gp, pQInfo);
+
+ SArray* s = tsdbGetQueriedTableIdList(pRuntimeEnv->pQueryHandle);
+ assert(taosArrayGetSize(s) >= 1);
+
+ setTagVal(pRuntimeEnv, (STableId*) taosArrayGet(s, 0), pQInfo->tsdb);
+
+ // here we simply set the first table as current table
+ scanMultiTableDataBlocks(pQInfo);
+ pQInfo->groupIndex += 1;
+
+ SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo;
+
+ // no results generated for current group, continue to try the next group
+ if (pWindowResInfo->size <= 0) {
+ continue;
+ }
+
+ for (int32_t i = 0; i < pWindowResInfo->size; ++i) {
+ SWindowStatus *pStatus = &pWindowResInfo->pResult[i].status;
+ pStatus->closed = true; // enable return all results for group by normal columns
+
+ SWindowResult *pResult = &pWindowResInfo->pResult[i];
+ for (int32_t j = 0; j < pQuery->numOfOutput; ++j) {
+ pResult->numOfRows = MAX(pResult->numOfRows, pResult->resultInfo[j].numOfRes);
+ }
+ }
+
+ qTrace("QInfo:%p generated groupby columns results %d rows for group %d completed", pQInfo, pWindowResInfo->size,
+ pQInfo->groupIndex);
+ int32_t currentGroupIndex = pQInfo->groupIndex;
+
+ pQuery->rec.rows = 0;
+ pQInfo->groupIndex = 0;
+
+ ensureOutputBufferSimple(pRuntimeEnv, pWindowResInfo->size);
+ copyFromWindowResToSData(pQInfo, pWindowResInfo->pResult);
+
+ pQInfo->groupIndex = currentGroupIndex; //restore the group index
+ assert(pQuery->rec.rows == pWindowResInfo->size);
+
+ clearClosedTimeWindow(pRuntimeEnv);
+ break;
}
} else {
/*
- * 1. super table projection query, 2. group-by on normal columns query, 3. ts-comp query
+ * 1. super table projection query, 2. ts-comp query
* if the subgroup index is larger than 0, results generated by group by tbname,k is existed.
* we need to return it to client in the first place.
*/
@@ -4282,7 +4407,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) {
}
}
- scanAllDataBlocks(pRuntimeEnv, pQuery->current->lastKey);
+ scanOneTableDataBlocks(pRuntimeEnv, pQuery->current->lastKey);
skipResults(pRuntimeEnv);
// the limitation of output result is reached, set the query completed
@@ -4348,25 +4473,6 @@ static void sequentialTableProcess(SQInfo *pQInfo) {
pRuntimeEnv->cur = pRuntimeEnv->pTSBuf->cur;
}
- // todo refactor
- if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) {
- SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo;
-
- for (int32_t i = 0; i < pWindowResInfo->size; ++i) {
- SWindowStatus *pStatus = &pWindowResInfo->pResult[i].status;
- pStatus->closed = true; // enable return all results for group by normal columns
-
- SWindowResult *pResult = &pWindowResInfo->pResult[i];
- for (int32_t j = 0; j < pQuery->numOfOutput; ++j) {
- pResult->numOfRows = MAX(pResult->numOfRows, pResult->resultInfo[j].numOfRes);
- }
- }
-
- pQInfo->groupIndex = 0;
- pQuery->rec.rows = 0;
- copyFromWindowResToSData(pQInfo, pWindowResInfo->pResult);
- }
-
qTrace(
"QInfo %p numOfTables:%d, index:%d, numOfGroups:%d, %d points returned, total:%"PRId64", offset:%" PRId64,
pQInfo, pQInfo->groupInfo.numOfTables, pQInfo->tableIndex, numOfGroups, pQuery->rec.rows, pQuery->rec.total,
@@ -4448,7 +4554,6 @@ static void multiTableQueryProcess(SQInfo *pQInfo) {
*/
if (isIntervalQuery(pQuery)) {
copyResToQueryResultBuf(pQInfo, pQuery);
-
#ifdef _DEBUG_VIEW
displayInterResult(pQuery->sdata, pRuntimeEnv, pQuery->sdata[0]->num);
#endif
@@ -4469,7 +4574,7 @@ static void multiTableQueryProcess(SQInfo *pQInfo) {
// query error occurred or query is killed, abort current execution
if (pQInfo->code != TSDB_CODE_SUCCESS || isQueryKilled(pQInfo)) {
- qTrace("QInfo:%p query killed or error occurred, code:%d, abort", pQInfo, pQInfo->code);
+ qTrace("QInfo:%p query killed or error occurred, code:%s, abort", pQInfo, tstrerror(pQInfo->code));
return;
}
@@ -4490,7 +4595,7 @@ static void multiTableQueryProcess(SQInfo *pQInfo) {
setQueryStatus(pQuery, QUERY_COMPLETED);
if (pQInfo->code != TSDB_CODE_SUCCESS || isQueryKilled(pQInfo)) {
- qTrace("QInfo:%p query killed or error occurred, code:%d, abort", pQInfo, pQInfo->code);
+ qTrace("QInfo:%p query killed or error occurred, code:%s, abort", pQInfo, tstrerror(pQInfo->code));
return;
}
@@ -4526,7 +4631,7 @@ static void tableFixedOutputProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo)
pQuery->current = pTableInfo; // set current query table info
- scanAllDataBlocks(pRuntimeEnv, pTableInfo->lastKey);
+ scanOneTableDataBlocks(pRuntimeEnv, pTableInfo->lastKey);
finalizeQueryResult(pRuntimeEnv);
if (isQueryKilled(pQInfo)) {
@@ -4559,7 +4664,7 @@ static void tableMultiOutputProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo)
}
while (1) {
- scanAllDataBlocks(pRuntimeEnv, pQuery->current->lastKey);
+ scanOneTableDataBlocks(pRuntimeEnv, pQuery->current->lastKey);
finalizeQueryResult(pRuntimeEnv);
if (isQueryKilled(pQInfo)) {
@@ -4606,7 +4711,7 @@ static void tableIntervalProcessImpl(SQueryRuntimeEnv *pRuntimeEnv, TSKEY start)
SQuery *pQuery = pRuntimeEnv->pQuery;
while (1) {
- scanAllDataBlocks(pRuntimeEnv, start);
+ scanOneTableDataBlocks(pRuntimeEnv, start);
if (isQueryKilled(GET_QINFO_ADDR(pRuntimeEnv))) {
return;
@@ -4666,13 +4771,11 @@ static void tableIntervalProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) {
limitResults(pRuntimeEnv);
break;
} else {
- TSKEY ekey = taosGetRevisedEndKey(pQuery->window.ekey, pQuery->order.order, pQuery->slidingTime,
- pQuery->slidingTimeUnit, pQuery->precision);
- taosFillSetStartInfo(pRuntimeEnv->pFillInfo, pQuery->rec.rows, ekey);
+ taosFillSetStartInfo(pRuntimeEnv->pFillInfo, pQuery->rec.rows, pQuery->window.ekey);
taosFillCopyInputDataFromFilePage(pRuntimeEnv->pFillInfo, (tFilePage**) pQuery->sdata);
numOfInterpo = 0;
- pQuery->rec.rows = doFillGapsInResults(pRuntimeEnv, (tFilePage **)pQuery->sdata, pQuery->rec.rows, &numOfInterpo);
+ pQuery->rec.rows = doFillGapsInResults(pRuntimeEnv, (tFilePage **)pQuery->sdata, &numOfInterpo);
if (pQuery->rec.rows > 0 || Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) {
limitResults(pRuntimeEnv);
break;
@@ -4704,8 +4807,7 @@ static void tableQueryImpl(SQInfo *pQInfo) {
* So, we do keep in this procedure instead of launching retrieve procedure for next results.
*/
int32_t numOfInterpo = 0;
- int32_t remain = taosNumOfRemainRows(pRuntimeEnv->pFillInfo);
- pQuery->rec.rows = doFillGapsInResults(pRuntimeEnv, (tFilePage **)pQuery->sdata, remain, &numOfInterpo);
+ pQuery->rec.rows = doFillGapsInResults(pRuntimeEnv, (tFilePage **)pQuery->sdata, &numOfInterpo);
if (pQuery->rec.rows > 0) {
limitResults(pRuntimeEnv);
@@ -4869,7 +4971,7 @@ static bool validateQuerySourceCols(SQueryTableMsg *pQueryMsg, SSqlFuncMsg** pEx
(pFuncMsg->functionId == TSDB_FUNC_COUNT && pFuncMsg->colInfo.colId == TSDB_TBNAME_COLUMN_INDEX)) {
continue;
}
-
+
return false;
}
}
@@ -5147,9 +5249,10 @@ static int32_t createQFunctionExprFromMsg(SQueryTableMsg *pQueryMsg, SExprInfo *
type = TSDB_DATA_TYPE_DOUBLE;
bytes = tDataTypeDesc[type].nSize;
} else if (pExprs[i].base.colInfo.colId == TSDB_TBNAME_COLUMN_INDEX && pExprs[i].base.functionId == TSDB_FUNC_TAGPRJ) { // parse the normal column
- type = TSDB_DATA_TYPE_BINARY;
- bytes = TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE;
- } else {
+ SSchema s = tGetTableNameColumnSchema();
+ type = s.type;
+ bytes = s.bytes;
+ } else{
int32_t j = getColumnIndexInSource(pQueryMsg, &pExprs[i].base, pTagCols);
assert(j < pQueryMsg->numOfCols || j < pQueryMsg->numOfTags || j == TSDB_TBNAME_COLUMN_INDEX);
@@ -5158,10 +5261,11 @@ static int32_t createQFunctionExprFromMsg(SQueryTableMsg *pQueryMsg, SExprInfo *
type = pCol->type;
bytes = pCol->bytes;
} else {
- type = TSDB_DATA_TYPE_BINARY;
- bytes = TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE;
- }
+ SSchema s = tGetTableNameColumnSchema();
+ type = s.type;
+ bytes = s.bytes;
+ }
}
int32_t param = pExprs[i].base.arg[0].argValue.i64;
@@ -5242,7 +5346,7 @@ static int32_t createFilterInfo(void *pQInfo, SQuery *pQuery) {
if (pQuery->colList[i].numOfFilters > 0) {
SSingleColumnFilterInfo *pFilterInfo = &pQuery->pFilterInfo[j];
- memcpy(&pFilterInfo->info, &pQuery->colList[i], sizeof(SColumnInfoData));
+ memcpy(&pFilterInfo->info, &pQuery->colList[i], sizeof(SColumnInfo));
pFilterInfo->info = pQuery->colList[i];
pFilterInfo->numOfFilters = pQuery->colList[i].numOfFilters;
@@ -5826,20 +5930,38 @@ _over:
//pQInfo already freed in initQInfo, but *pQInfo may not pointer to null;
if (code != TSDB_CODE_SUCCESS) {
*pQInfo = NULL;
+ } else {
+ SQInfo* pq = (SQInfo*) (*pQInfo);
+
+ T_REF_INC(pq);
+ T_REF_INC(pq);
}
// if failed to add ref for all meters in this query, abort current query
return code;
}
-void qDestroyQueryInfo(qinfo_t pQInfo) {
+static void doDestoryQueryInfo(SQInfo* pQInfo) {
+ assert(pQInfo != NULL);
qTrace("QInfo:%p query completed", pQInfo);
-
- // print the query cost summary
- queryCostStatis(pQInfo);
+ queryCostStatis(pQInfo); // print the query cost summary
freeQInfo(pQInfo);
}
+void qDestroyQueryInfo(qinfo_t qHandle) {
+ SQInfo* pQInfo = (SQInfo*) qHandle;
+ if (!isValidQInfo(pQInfo)) {
+ return;
+ }
+
+ int16_t ref = T_REF_DEC(pQInfo);
+ qTrace("QInfo:%p dec refCount, value:%d", pQInfo, ref);
+
+ if (ref == 0) {
+ doDestoryQueryInfo(pQInfo);
+ }
+}
+
void qTableQuery(qinfo_t qinfo) {
SQInfo *pQInfo = (SQInfo *)qinfo;
@@ -5850,6 +5972,7 @@ void qTableQuery(qinfo_t qinfo) {
if (isQueryKilled(pQInfo)) {
qTrace("QInfo:%p it is already killed, abort", pQInfo);
+ qDestroyQueryInfo(pQInfo);
return;
}
@@ -5865,7 +5988,7 @@ void qTableQuery(qinfo_t qinfo) {
}
sem_post(&pQInfo->dataReady);
- // vnodeDecRefCount(pQInfo);
+ qDestroyQueryInfo(pQInfo);
}
int32_t qRetrieveQueryResultInfo(qinfo_t qinfo) {
@@ -5891,20 +6014,29 @@ int32_t qRetrieveQueryResultInfo(qinfo_t qinfo) {
bool qHasMoreResultsToRetrieve(qinfo_t qinfo) {
SQInfo *pQInfo = (SQInfo *)qinfo;
- if (pQInfo == NULL || pQInfo->signature != pQInfo || pQInfo->code != TSDB_CODE_SUCCESS) {
+ if (!isValidQInfo(pQInfo) || pQInfo->code != TSDB_CODE_SUCCESS) {
+ qTrace("QInfo:%p invalid qhandle or error occurs, abort query, code:%x", pQInfo, pQInfo->code);
return false;
}
SQuery *pQuery = pQInfo->runtimeEnv.pQuery;
+ bool ret = false;
if (Q_STATUS_EQUAL(pQuery->status, QUERY_OVER)) {
- return false;
+ ret = false;
} else if (Q_STATUS_EQUAL(pQuery->status, QUERY_RESBUF_FULL)) {
- return true;
+ ret = true;
} else if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) {
- return true;
+ ret = true;
} else {
assert(0);
}
+
+ if (ret) {
+ T_REF_INC(pQInfo);
+ qTrace("QInfo:%p has more results waits for client retrieve", pQInfo);
+ }
+
+ return ret;
}
int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *contLen) {
@@ -5949,31 +6081,44 @@ int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *co
return code;
}
+int32_t qKillQuery(qinfo_t qinfo) {
+ SQInfo *pQInfo = (SQInfo *)qinfo;
+
+ if (pQInfo == NULL || !isValidQInfo(pQInfo)) {
+ return TSDB_CODE_QRY_INVALID_QHANDLE;
+ }
+
+ setQueryKilled(pQInfo);
+ qDestroyQueryInfo(pQInfo);
+
+ return TSDB_CODE_SUCCESS;
+}
+
static void buildTagQueryResult(SQInfo* pQInfo) {
SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv;
SQuery * pQuery = pRuntimeEnv->pQuery;
size_t numOfGroup = taosArrayGetSize(pQInfo->groupInfo.pGroupList);
assert(numOfGroup == 0 || numOfGroup == 1);
-
+
if (numOfGroup == 0) {
return;
}
SArray* pa = taosArrayGetP(pQInfo->groupInfo.pGroupList, 0);
-
+
size_t num = taosArrayGetSize(pa);
assert(num == pQInfo->groupInfo.numOfTables);
-
+
int32_t count = 0;
int32_t functionId = pQuery->pSelectExpr[0].base.functionId;
if (functionId == TSDB_FUNC_TID_TAG) { // return the tags & table Id
assert(pQuery->numOfOutput == 1);
-
+
SExprInfo* pExprInfo = &pQuery->pSelectExpr[0];
int32_t rsize = pExprInfo->bytes;
count = 0;
-
+
while(pQInfo->tableIndex < num && count < pQuery->rec.capacity) {
int32_t i = pQInfo->tableIndex++;
SGroupItem *item = taosArrayGet(pa, i);
@@ -6015,12 +6160,12 @@ static void buildTagQueryResult(SQInfo* pQInfo) {
}
}
}
-
+
count += 1;
}
-
+
qTrace("QInfo:%p create (tableId, tag) info completed, rows:%d", pQInfo, count);
-
+
} else if (functionId == TSDB_FUNC_COUNT) {// handle the "count(tbname)" query
*(int64_t*) pQuery->sdata[0]->data = num;
@@ -6029,16 +6174,17 @@ static void buildTagQueryResult(SQInfo* pQInfo) {
qTrace("QInfo:%p create count(tbname) query, res:%d rows:1", pQInfo, count);
} else { // return only the tags|table name etc.
count = 0;
+ SSchema tbnameSchema = tGetTableNameColumnSchema();
while(pQInfo->tableIndex < num && count < pQuery->rec.capacity) {
int32_t i = pQInfo->tableIndex++;
-
+
SExprInfo* pExprInfo = pQuery->pSelectExpr;
SGroupItem* item = taosArrayGet(pa, i);
for(int32_t j = 0; j < pQuery->numOfOutput; ++j) {
if (pExprInfo[j].base.colInfo.colId == TSDB_TBNAME_COLUMN_INDEX) {
char* data = tsdbGetTableName(pQInfo->tsdb, &item->id);
- char* dst = pQuery->sdata[j]->data + count * (TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE);
+ char* dst = pQuery->sdata[j]->data + count * tbnameSchema.bytes;
memcpy(dst, data, varDataTLen(data));
} else {// todo refactor
int16_t type = pExprInfo[j].type;
@@ -6046,7 +6192,7 @@ static void buildTagQueryResult(SQInfo* pQInfo) {
char* data = tsdbGetTableTagVal(pQInfo->tsdb, &item->id, pExprInfo[j].base.colInfo.colId, type, bytes);
char* dst = pQuery->sdata[j]->data + count * pExprInfo[j].bytes;
-
+
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
if (data == NULL) {
setVardataNull(dst, type);
@@ -6064,7 +6210,7 @@ static void buildTagQueryResult(SQInfo* pQInfo) {
}
count += 1;
}
-
+
qTrace("QInfo:%p create tag values results completed, rows:%d", pQInfo, count);
}
diff --git a/src/query/src/qUtil.c b/src/query/src/qUtil.c
index 143d86d5db..aa5550efcb 100644
--- a/src/query/src/qUtil.c
+++ b/src/query/src/qUtil.c
@@ -113,7 +113,9 @@ void clearFirstNTimeWindow(SQueryRuntimeEnv *pRuntimeEnv, int32_t num) {
for (int32_t i = 0; i < num; ++i) {
SWindowResult *pResult = &pWindowResInfo->pResult[i];
if (pResult->status.closed) { // remove the window slot from hash table
- taosHashRemove(pWindowResInfo->hashList, (const char *)&pResult->window.skey, TSDB_KEYSIZE);
+ taosHashRemove(pWindowResInfo->hashList, (const char *)&pResult->window.skey, pWindowResInfo->type);
+ printf("remove ============>%ld, remain size:%ld\n", pResult->window.skey, pWindowResInfo->hashList->size);
+
} else {
break;
}
@@ -133,14 +135,16 @@ void clearFirstNTimeWindow(SQueryRuntimeEnv *pRuntimeEnv, int32_t num) {
}
pWindowResInfo->size = remain;
-
+ printf("---------------size:%ld\n", taosHashGetSize(pWindowResInfo->hashList));
for (int32_t k = 0; k < pWindowResInfo->size; ++k) {
SWindowResult *pResult = &pWindowResInfo->pResult[k];
- int32_t *p = (int32_t *)taosHashGet(pWindowResInfo->hashList, (const char *)&pResult->window.skey, TSDB_KEYSIZE);
+ int32_t *p = (int32_t *)taosHashGet(pWindowResInfo->hashList, (const char *)&pResult->window.skey,
+ tDataTypeDesc[pWindowResInfo->type].nSize);
int32_t v = (*p - num);
assert(v >= 0 && v <= pWindowResInfo->size);
- taosHashPut(pWindowResInfo->hashList, (char *)&pResult->window.skey, TSDB_KEYSIZE, (char *)&v, sizeof(int32_t));
+ taosHashPut(pWindowResInfo->hashList, (char *)&pResult->window.skey, tDataTypeDesc[pWindowResInfo->type].nSize,
+ (char *)&v, sizeof(int32_t));
}
pWindowResInfo->curIndex = -1;
diff --git a/src/query/src/qast.c b/src/query/src/qast.c
index 0a0fe56ebd..1db965e8a3 100644
--- a/src/query/src/qast.c
+++ b/src/query/src/qast.c
@@ -138,7 +138,7 @@ static tExprNode *tExprNodeCreate(SSchema *pSchema, int32_t numOfCols, SSQLToken
memcpy(pNode->pSchema, &pSchema[i], sizeof(SSchema));
} else {
pNode->pSchema->type = TSDB_DATA_TYPE_BINARY;
- pNode->pSchema->bytes = TSDB_TABLE_NAME_LEN;
+ pNode->pSchema->bytes = TSDB_TABLE_NAME_LEN - 1;
strcpy(pNode->pSchema->name, TSQL_TBNAME_L);
pNode->pSchema->colId = -1;
}
@@ -1127,7 +1127,7 @@ tExprNode* exprTreeFromTableName(const char* tbnameCond) {
left->pSchema = pSchema;
pSchema->type = TSDB_DATA_TYPE_BINARY;
- pSchema->bytes = TSDB_TABLE_NAME_LEN;
+ pSchema->bytes = TSDB_TABLE_NAME_LEN - 1;
strcpy(pSchema->name, TSQL_TBNAME_L);
pSchema->colId = -1;
diff --git a/src/query/src/qfill.c b/src/query/src/qfill.c
index 7b3ea5c1f0..59bf7b423c 100644
--- a/src/query/src/qfill.c
+++ b/src/query/src/qfill.c
@@ -13,8 +13,8 @@
* along with this program. If not, see .
*/
-#include "qfill.h"
#include "os.h"
+#include "qfill.h"
#include "qextbuffer.h"
#include "taosdef.h"
#include "taosmsg.h"
@@ -58,7 +58,7 @@ int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, ch
}
SFillInfo* taosInitFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_t capacity, int32_t numOfCols,
- int64_t slidingTime, int32_t fillType, SFillColInfo* pFillCol) {
+ int64_t slidingTime, int8_t slidingUnit, int8_t precision, int32_t fillType, SFillColInfo* pFillCol) {
if (fillType == TSDB_FILL_NONE) {
return NULL;
}
@@ -72,8 +72,10 @@ SFillInfo* taosInitFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_
pFillInfo->pFillCol = pFillCol;
pFillInfo->numOfTags = numOfTags;
pFillInfo->numOfCols = numOfCols;
+ pFillInfo->precision = precision;
pFillInfo->slidingTime = slidingTime;
-
+ pFillInfo->slidingUnit = slidingUnit;
+
pFillInfo->pData = malloc(POINTER_BYTES * numOfCols);
int32_t rowsize = 0;
@@ -102,9 +104,9 @@ void taosResetFillInfo(SFillInfo* pFillInfo, TSKEY startTimestamp) {
pFillInfo->numOfTotal = 0;
}
-void taosDestoryFillInfo(SFillInfo* pFillInfo) {
+void* taosDestoryFillInfo(SFillInfo* pFillInfo) {
if (pFillInfo == NULL) {
- return;
+ return NULL;
}
tfree(pFillInfo->prevValues);
@@ -119,6 +121,15 @@ void taosDestoryFillInfo(SFillInfo* pFillInfo) {
tfree(pFillInfo->pFillCol);
tfree(pFillInfo);
+ return NULL;
+}
+
+static TSKEY taosGetRevisedEndKey(TSKEY ekey, int32_t order, int64_t timeInterval, int8_t slidingTimeUnit, int8_t precision) {
+ if (order == TSDB_ORDER_ASC) {
+ return ekey;
+ } else {
+ return taosGetIntervalStartTimestamp(ekey, timeInterval, slidingTimeUnit, precision);
+ }
}
void taosFillSetStartInfo(SFillInfo* pFillInfo, int32_t numOfRows, TSKEY endKey) {
@@ -126,8 +137,10 @@ void taosFillSetStartInfo(SFillInfo* pFillInfo, int32_t numOfRows, TSKEY endKey)
return;
}
+ pFillInfo->endKey = taosGetRevisedEndKey(endKey, pFillInfo->order, pFillInfo->slidingTime, pFillInfo->slidingUnit,
+ pFillInfo->precision);
+
pFillInfo->rowIdx = 0;
- pFillInfo->endKey = endKey;
pFillInfo->numOfRows = numOfRows;
// ensure the space
@@ -165,36 +178,29 @@ void taosFillCopyInputDataFromOneFilePage(SFillInfo* pFillInfo, tFilePage* pInpu
}
}
-TSKEY taosGetRevisedEndKey(TSKEY ekey, int32_t order, int64_t timeInterval, int8_t slidingTimeUnit, int8_t precision) {
- if (order == TSDB_ORDER_ASC) {
- return ekey;
- } else {
- return taosGetIntervalStartTimestamp(ekey, timeInterval, slidingTimeUnit, precision);
- }
-}
+int64_t getFilledNumOfRes(SFillInfo* pFillInfo, TSKEY ekey, int32_t maxNumOfRows) {
+ int64_t* tsList = (int64_t*) pFillInfo->pData[0];
-static int32_t taosGetTotalNumOfFilledRes(SFillInfo* pFillInfo, const TSKEY* tsArray, int32_t remain,
- int64_t nInterval, int64_t ekey) {
-
- if (remain > 0) { // still fill gap within current data block, not generating data after the result set.
- TSKEY lastKey = tsArray[pFillInfo->numOfRows - 1];
- int32_t total = (int32_t)(labs(lastKey - pFillInfo->start) / nInterval) + 1;
+ int32_t numOfRows = taosNumOfRemainRows(pFillInfo);
- assert(total >= remain);
- return total;
+ TSKEY ekey1 = taosGetRevisedEndKey(ekey, pFillInfo->order, pFillInfo->slidingTime, pFillInfo->slidingUnit,
+ pFillInfo->precision);
+
+ int64_t numOfRes = -1;
+ if (numOfRows > 0) { // still fill gap within current data block, not generating data after the result set.
+ TSKEY lastKey = tsList[pFillInfo->numOfRows - 1];
+
+ numOfRes = (int64_t)(labs(lastKey - pFillInfo->start) / pFillInfo->slidingTime) + 1;
+ assert(numOfRes >= numOfRows);
} else { // reach the end of data
- if ((ekey < pFillInfo->start && FILL_IS_ASC_FILL(pFillInfo)) ||
- (ekey > pFillInfo->start && !FILL_IS_ASC_FILL(pFillInfo))) {
+ if ((ekey1 < pFillInfo->start && FILL_IS_ASC_FILL(pFillInfo)) ||
+ (ekey1 > pFillInfo->start && !FILL_IS_ASC_FILL(pFillInfo))) {
return 0;
- } else {
- return (int32_t)(labs(ekey - pFillInfo->start) / nInterval) + 1;
+ } else { // the numOfRes rows are all filled with specified policy
+ numOfRes = (labs(ekey1 - pFillInfo->start) / pFillInfo->slidingTime) + 1;
}
}
-}
-int64_t taosGetNumOfResultWithFill(SFillInfo* pFillInfo, int32_t numOfRows, int64_t ekey, int32_t maxNumOfRows) {
- int32_t numOfRes = taosGetTotalNumOfFilledRes(pFillInfo, (int64_t*) pFillInfo->pData[0], numOfRows,
- pFillInfo->slidingTime, ekey);
return (numOfRes > maxNumOfRows) ? maxNumOfRows : numOfRes;
}
@@ -496,8 +502,8 @@ int32_t generateDataBlockImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t nu
int64_t taosGenerateDataBlock(SFillInfo* pFillInfo, tFilePage** output, int32_t capacity) {
int32_t remain = taosNumOfRemainRows(pFillInfo); // todo use iterator?
- int32_t rows = taosGetNumOfResultWithFill(pFillInfo, remain, pFillInfo->endKey, capacity);
+ int32_t rows = getFilledNumOfRes(pFillInfo, pFillInfo->endKey, capacity);
int32_t numOfRes = generateDataBlockImpl(pFillInfo, output, remain, rows, pFillInfo->pData);
assert(numOfRes == rows);
diff --git a/src/query/src/qtsbuf.c b/src/query/src/qtsbuf.c
index 555ccb7318..869299f309 100644
--- a/src/query/src/qtsbuf.c
+++ b/src/query/src/qtsbuf.c
@@ -51,7 +51,7 @@ STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete) {
return NULL;
}
- strncpy(pTSBuf->path, path, PATH_MAX);
+ tstrncpy(pTSBuf->path, path, sizeof(pTSBuf->path));
pTSBuf->f = fopen(pTSBuf->path, "r+");
if (pTSBuf->f == NULL) {
diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c
index b0a5a0bfc0..dcbcae452d 100644
--- a/src/rpc/src/rpcMain.c
+++ b/src/rpc/src/rpcMain.c
@@ -73,6 +73,7 @@ typedef struct {
SRpcInfo *pRpc; // associated SRpcInfo
SRpcIpSet ipSet; // ip list provided by app
void *ahandle; // handle provided by app
+ struct SRpcConn *pConn; // pConn allocated
char msgType; // message type
uint8_t *pCont; // content provided by app
int32_t contLen; // content length
@@ -339,7 +340,7 @@ void *rpcReallocCont(void *ptr, int contLen) {
return start + sizeof(SRpcReqContext) + sizeof(SRpcHead);
}
-void rpcSendRequest(void *shandle, const SRpcIpSet *pIpSet, const SRpcMsg *pMsg) {
+void *rpcSendRequest(void *shandle, const SRpcIpSet *pIpSet, const SRpcMsg *pMsg) {
SRpcInfo *pRpc = (SRpcInfo *)shandle;
SRpcReqContext *pContext;
@@ -367,7 +368,7 @@ void rpcSendRequest(void *shandle, const SRpcIpSet *pIpSet, const SRpcMsg *pMsg)
rpcSendReqToServer(pRpc, pContext);
- return;
+ return pContext;
}
void rpcSendResponse(const SRpcMsg *pRsp) {
@@ -393,7 +394,6 @@ void rpcSendResponse(const SRpcMsg *pRsp) {
if ( pConn->inType == 0 || pConn->user[0] == 0 ) {
tTrace("%s, connection is already released, rsp wont be sent", pConn->info);
rpcUnlockConn(pConn);
- rpcDecRef(pRpc);
return;
}
@@ -426,6 +426,10 @@ void rpcSendResponse(const SRpcMsg *pRsp) {
rpcSendMsgToPeer(pConn, msg, msgLen);
pConn->secured = 1; // connection shall be secured
+ if (pConn->pReqMsg) rpcFreeCont(pConn->pReqMsg);
+ pConn->pReqMsg = NULL;
+ pConn->reqMsgLen = 0;
+
rpcUnlockConn(pConn);
rpcDecRef(pRpc); // decrease the referene count
@@ -458,7 +462,7 @@ int rpcGetConnInfo(void *thandle, SRpcConnInfo *pInfo) {
pInfo->clientPort = pConn->peerPort;
// pInfo->serverIp = pConn->destIp;
- strncpy(pInfo->user, pConn->user, sizeof(pInfo->user));
+ tstrncpy(pInfo->user, pConn->user, sizeof(pInfo->user));
return 0;
}
@@ -482,6 +486,35 @@ void rpcSendRecv(void *shandle, SRpcIpSet *pIpSet, const SRpcMsg *pMsg, SRpcMsg
return;
}
+// this API is used by server app to keep an APP context in case connection is broken
+int rpcReportProgress(void *handle, char *pCont, int contLen) {
+ SRpcConn *pConn = (SRpcConn *)handle;
+
+ if (pConn->user[0]) {
+ // pReqMsg and reqMsgLen is re-used to store the context from app server
+ pConn->pReqMsg = pCont;
+ pConn->reqMsgLen = contLen;
+ return 0;
+ }
+
+ tTrace("%s, rpc connection is already released", pConn->info);
+ rpcFreeCont(pCont);
+ return -1;
+}
+
+/* todo: cancel process may have race condition, pContext may have been released
+ just before app calls the rpcCancelRequest */
+void rpcCancelRequest(void *handle) {
+ SRpcReqContext *pContext = handle;
+
+ if (pContext->pConn) {
+ tTrace("%s, app trys to cancel request", pContext->pConn->info);
+ rpcCloseConn(pContext->pConn);
+ pContext->pConn = NULL;
+ rpcFreeCont(pContext->pCont);
+ }
+}
+
static void rpcFreeMsg(void *msg) {
if ( msg ) {
char *temp = (char *)msg - sizeof(SRpcReqContext);
@@ -493,7 +526,7 @@ static SRpcConn *rpcOpenConn(SRpcInfo *pRpc, char *peerFqdn, uint16_t peerPort,
SRpcConn *pConn;
uint32_t peerIp = taosGetIpFromFqdn(peerFqdn);
- if (peerIp == -1) {
+ if (peerIp == 0xFFFFFFFF) {
tError("%s, failed to resolve FQDN:%s", pRpc->label, peerFqdn);
terrno = TSDB_CODE_RPC_APP_ERROR;
return NULL;
@@ -542,7 +575,7 @@ static void rpcCloseConn(void *thandle) {
if ( pRpc->connType == TAOS_CONN_SERVER) {
char hashstr[40] = {0};
- size_t size = sprintf(hashstr, "%x:%x:%x:%d", pConn->peerIp, pConn->linkUid, pConn->peerId, pConn->connType);
+ size_t size = snprintf(hashstr, sizeof(hashstr), "%x:%x:%x:%d", pConn->peerIp, pConn->linkUid, pConn->peerId, pConn->connType);
taosHashRemove(pRpc->hash, hashstr, size);
rpcFreeMsg(pConn->pRspMsg); // it may have a response msg saved, but not request msg
@@ -592,7 +625,7 @@ static SRpcConn *rpcAllocateServerConn(SRpcInfo *pRpc, SRecvInfo *pRecv) {
char hashstr[40] = {0};
SRpcHead *pHead = (SRpcHead *)pRecv->msg;
- size_t size = sprintf(hashstr, "%x:%x:%x:%d", pRecv->ip, pHead->linkUid, pHead->sourceId, pRecv->connType);
+ size_t size = snprintf(hashstr, sizeof(hashstr), "%x:%x:%x:%d", pRecv->ip, pHead->linkUid, pHead->sourceId, pRecv->connType);
// check if it is already allocated
SRpcConn **ppConn = (SRpcConn **)(taosHashGet(pRpc->hash, hashstr, size));
@@ -682,7 +715,7 @@ static SRpcConn *rpcSetupConnToServer(SRpcReqContext *pContext) {
if (pConn) {
pConn->tretry = 0;
pConn->ahandle = pContext->ahandle;
- sprintf(pConn->info, "%s %p %p", pRpc->label, pConn, pConn->ahandle);
+ snprintf(pConn->info, sizeof(pConn->info), "%s %p %p", pRpc->label, pConn, pConn->ahandle);
pConn->tretry = 0;
} else {
tError("%s %p, failed to set up connection(%s)", pRpc->label, pContext->ahandle, tstrerror(terrno));
@@ -811,7 +844,7 @@ static SRpcConn *rpcProcessMsgHead(SRpcInfo *pRpc, SRecvInfo *pRecv) {
if (rpcIsReq(pHead->msgType)) {
pConn->ahandle = (void *)pHead->ahandle;
- sprintf(pConn->info, "%s %p %p", pRpc->label, pConn, pConn->ahandle);
+ snprintf(pConn->info, sizeof(pConn->info), "%s %p %p", pRpc->label, pConn, pConn->ahandle);
}
sid = pConn->sid;
@@ -846,6 +879,22 @@ static SRpcConn *rpcProcessMsgHead(SRpcInfo *pRpc, SRecvInfo *pRecv) {
return pConn;
}
+static void rpcReportBrokenLinkToServer(SRpcConn *pConn) {
+ SRpcInfo *pRpc = pConn->pRpc;
+
+ // if there are pending request, notify the app
+ tTrace("%s, notify the server app, connection is gone", pConn->info);
+
+ SRpcMsg rpcMsg;
+ rpcMsg.pCont = pConn->pReqMsg; // pReqMsg is re-used to store the APP context from server
+ rpcMsg.contLen = pConn->reqMsgLen; // reqMsgLen is re-used to store the APP context length
+ rpcMsg.ahandle = pConn->ahandle;
+ rpcMsg.handle = pConn;
+ rpcMsg.msgType = pConn->inType;
+ rpcMsg.code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
+ if (pRpc->cfp) (*(pRpc->cfp))(&rpcMsg, NULL);
+}
+
static void rpcProcessBrokenLink(SRpcConn *pConn) {
if (pConn == NULL) return;
SRpcInfo *pRpc = pConn->pRpc;
@@ -859,19 +908,7 @@ static void rpcProcessBrokenLink(SRpcConn *pConn) {
taosTmrStart(rpcProcessConnError, 0, pContext, pRpc->tmrCtrl);
}
- if (pConn->inType) {
- // if there are pending request, notify the app
- tTrace("%s, connection is gone, notify the app", pConn->info);
-/*
- SRpcMsg rpcMsg;
- rpcMsg.pCont = NULL;
- rpcMsg.contLen = 0;
- rpcMsg.handle = pConn;
- rpcMsg.msgType = pConn->inType;
- rpcMsg.code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
- (*(pRpc->cfp))(&rpcMsg);
-*/
- }
+ if (pConn->inType) rpcReportBrokenLinkToServer(pConn);
rpcUnlockConn(pConn);
rpcCloseConn(pConn);
@@ -920,6 +957,7 @@ static void *rpcProcessMsgFromPeer(SRecvInfo *pRecv) {
static void rpcNotifyClient(SRpcReqContext *pContext, SRpcMsg *pMsg) {
SRpcInfo *pRpc = pContext->pRpc;
+ pContext->pConn = NULL;
if (pContext->pRsp) {
// for synchronous API
memcpy(pContext->pSet, &pContext->ipSet, sizeof(SRpcIpSet));
@@ -1088,6 +1126,7 @@ static void rpcSendReqToServer(SRpcInfo *pRpc, SRpcReqContext *pContext) {
return;
}
+ pContext->pConn = pConn;
pConn->ahandle = pContext->ahandle;
rpcLockConn(pConn);
@@ -1210,23 +1249,10 @@ static void rpcProcessRetryTimer(void *param, void *tmrId) {
static void rpcProcessIdleTimer(void *param, void *tmrId) {
SRpcConn *pConn = (SRpcConn *)param;
- SRpcInfo *pRpc = pConn->pRpc;
if (pConn->user[0]) {
tTrace("%s, close the connection since no activity", pConn->info);
- if (pConn->inType && pRpc->cfp) {
- // if there are pending request, notify the app
- tTrace("%s, notify the app, connection is gone", pConn->info);
-/*
- SRpcMsg rpcMsg;
- rpcMsg.pCont = NULL;
- rpcMsg.contLen = 0;
- rpcMsg.handle = pConn;
- rpcMsg.msgType = pConn->inType;
- rpcMsg.code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
- (*(pRpc->cfp))(&rpcMsg);
-*/
- }
+ if (pConn->inType) rpcReportBrokenLinkToServer(pConn);
rpcCloseConn(pConn);
} else {
tTrace("%s, idle timer:%p not processed", pConn->info, tmrId);
diff --git a/src/tsdb/inc/tsdbMain.h b/src/tsdb/inc/tsdbMain.h
index e06778a872..f52022d79d 100644
--- a/src/tsdb/inc/tsdbMain.h
+++ b/src/tsdb/inc/tsdbMain.h
@@ -161,7 +161,7 @@ typedef struct {
int64_t index;
int numOfCacheBlocks;
SList * memPool;
-} STsdbCachePool;
+} STsdbBufferPool;
typedef struct {
TSKEY keyFirst;
@@ -173,7 +173,7 @@ typedef struct {
typedef struct {
int cacheBlockSize;
int totalCacheBlocks;
- STsdbCachePool pool;
+ STsdbBufferPool pool;
STsdbCacheBlock *curBlock;
SCacheMem * mem;
SCacheMem * imem;
@@ -357,6 +357,8 @@ typedef struct STsdbRepo {
STsdbAppH appH;
+ STsdbStat stat;
+
// The meter meta handle of this TSDB repository
STsdbMeta *tsdbMeta;
diff --git a/src/tsdb/src/tsdbCache.c b/src/tsdb/src/tsdbCache.c
index edc8472b34..24476d8997 100644
--- a/src/tsdb/src/tsdbCache.c
+++ b/src/tsdb/src/tsdbCache.c
@@ -35,7 +35,7 @@ STsdbCache *tsdbInitCache(int cacheBlockSize, int totalBlocks, TsdbRepoT *pRepo)
pCache->totalCacheBlocks = totalBlocks;
pCache->pRepo = pRepo;
- STsdbCachePool *pPool = &(pCache->pool);
+ STsdbBufferPool *pPool = &(pCache->pool);
pPool->index = 0;
pPool->memPool = tdListNew(sizeof(STsdbCacheBlock *));
if (pPool->memPool == NULL) goto _err;
@@ -106,7 +106,7 @@ static void tsdbFreeCacheMem(SCacheMem *mem) {
}
static int tsdbAllocBlockFromPool(STsdbCache *pCache) {
- STsdbCachePool *pPool = &(pCache->pool);
+ STsdbBufferPool *pPool = &(pCache->pool);
tsdbLockRepo(pCache->pRepo);
if (listNEles(pPool->memPool) == 0) {
@@ -170,7 +170,7 @@ int tsdbAlterCacheTotalBlocks(STsdbRepo *pRepo, int totalBlocks) {
}
static int tsdbAddCacheBlockToPool(STsdbCache *pCache) {
- STsdbCachePool *pPool = &pCache->pool;
+ STsdbBufferPool *pPool = &pCache->pool;
STsdbCacheBlock *pBlock = malloc(sizeof(STsdbCacheBlock) + pCache->cacheBlockSize);
if (pBlock == NULL) return -1;
@@ -184,7 +184,7 @@ static int tsdbAddCacheBlockToPool(STsdbCache *pCache) {
}
static int tsdbRemoveCacheBlockFromPool(STsdbCache *pCache) {
- STsdbCachePool *pPool = &pCache->pool;
+ STsdbBufferPool *pPool = &pCache->pool;
STsdbCacheBlock *pBlock = NULL;
ASSERT(pCache->totalCacheBlocks >= 0);
diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c
index 5e32a9e7d7..5526ad0d6e 100644
--- a/src/tsdb/src/tsdbMain.c
+++ b/src/tsdb/src/tsdbMain.c
@@ -953,6 +953,7 @@ static int32_t tdInsertRowToTable(STsdbRepo *pRepo, SDataRow row, STable *pTable
static int32_t tsdbInsertDataToTable(TsdbRepoT *repo, SSubmitBlk *pBlock, TSKEY now, int32_t *affectedrows) {
STsdbRepo *pRepo = (STsdbRepo *)repo;
STsdbMeta *pMeta = pRepo->tsdbMeta;
+ int64_t points = 0;
STableId tableId = {.uid = pBlock->uid, .tid = pBlock->tid};
STable *pTable = tsdbIsValidTableToInsert(pRepo->tsdbMeta, tableId);
@@ -964,7 +965,9 @@ static int32_t tsdbInsertDataToTable(TsdbRepoT *repo, SSubmitBlk *pBlock, TSKEY
// Check schema version
int32_t tversion = pBlock->sversion;
- int16_t nversion = schemaVersion(tsdbGetTableSchema(pMeta, pTable));
+ STSchema * pSchema = tsdbGetTableSchema(pMeta, pTable);
+ ASSERT(pSchema != NULL);
+ int16_t nversion = schemaVersion(pSchema);
if (tversion > nversion) {
tsdbTrace("vgId:%d table:%s tid:%d server schema version %d is older than clien version %d, try to config.",
pRepo->config.tsdbId, varDataVal(pTable->name), pTable->tableId.tid, nversion, tversion);
@@ -1014,7 +1017,10 @@ static int32_t tsdbInsertDataToTable(TsdbRepoT *repo, SSubmitBlk *pBlock, TSKEY
return -1;
}
(*affectedrows)++;
+ points++;
}
+ atomic_fetch_add_64(&(pRepo->stat.pointsWritten), points * (pSchema->numOfCols));
+ atomic_fetch_add_64(&(pRepo->stat.totalStorage), points * pSchema->vlen);
return TSDB_CODE_SUCCESS;
}
@@ -1381,3 +1387,11 @@ uint32_t tsdbGetFileInfo(TsdbRepoT *repo, char *name, uint32_t *index, uint32_t
return magic;
}
+
+void tsdbReportStat(void *repo, int64_t *totalPoints, int64_t *totalStorage, int64_t *compStorage){
+ ASSERT(repo != NULL);
+ STsdbRepo * pRepo = repo;
+ *totalPoints = pRepo->stat.pointsWritten;
+ *totalStorage = pRepo->stat.totalStorage;
+ *compStorage = pRepo->stat.compStorage;
+}
\ No newline at end of file
diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c
index 40e5667893..8eb259d38c 100644
--- a/src/tsdb/src/tsdbMeta.c
+++ b/src/tsdb/src/tsdbMeta.c
@@ -334,7 +334,7 @@ static STable *tsdbNewTable(STableCfg *pCfg, bool isSuper) {
pTable->schema[0] = tdDupSchema(pCfg->schema);
pTable->tagSchema = tdDupSchema(pCfg->tagSchema);
- tsize = strnlen(pCfg->sname, TSDB_TABLE_NAME_LEN);
+ tsize = strnlen(pCfg->sname, TSDB_TABLE_NAME_LEN - 1);
pTable->name = calloc(1, tsize + VARSTR_HEADER_SIZE + 1);
if (pTable->name == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
@@ -355,7 +355,7 @@ static STable *tsdbNewTable(STableCfg *pCfg, bool isSuper) {
pTable->tableId.tid = pCfg->tableId.tid;
pTable->lastKey = TSKEY_INITIAL_VAL;
- tsize = strnlen(pCfg->name, TSDB_TABLE_NAME_LEN);
+ tsize = strnlen(pCfg->name, TSDB_TABLE_NAME_LEN - 1);
pTable->name = calloc(1, tsize + VARSTR_HEADER_SIZE + 1);
if (pTable->name == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
diff --git a/src/tsdb/src/tsdbMetaFile.c b/src/tsdb/src/tsdbMetaFile.c
index 19fcae94e3..921db8674a 100644
--- a/src/tsdb/src/tsdbMetaFile.c
+++ b/src/tsdb/src/tsdbMetaFile.c
@@ -105,7 +105,7 @@ int32_t tsdbInsertMetaRecord(SMetaFile *mfh, uint64_t uid, void *cont, int32_t c
return -1;
}
- fsync(mfh->fd);
+ // fsync(mfh->fd);
mfh->tombSize++;
@@ -132,7 +132,7 @@ int32_t tsdbDeleteMetaRecord(SMetaFile *mfh, uint64_t uid) {
return -1;
}
- fsync(mfh->fd);
+ // fsync(mfh->fd);
mfh->nDel++;
@@ -167,7 +167,7 @@ int32_t tsdbUpdateMetaRecord(SMetaFile *mfh, uint64_t uid, void *cont, int32_t c
return -1;
}
- fsync(mfh->fd);
+ // fsync(mfh->fd);
return 0;
}
diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c
index 86ce44ad7b..a62ad5bbd3 100644
--- a/src/tsdb/src/tsdbRead.c
+++ b/src/tsdb/src/tsdbRead.c
@@ -72,7 +72,7 @@ typedef struct STableCheckInfo {
int32_t compSize;
int32_t numOfBlocks; // number of qualified data blocks not the original blocks
SDataCols* pDataCols;
-
+
int32_t chosen; // indicate which iterator should move forward
bool initBuf; // whether to initialize the in-memory skip list iterator or not
SSkipListIterator* iter; // mem buffer skip list iterator
@@ -233,8 +233,6 @@ TsdbQueryHandleT tsdbQueryRowsInExternalWindow(TsdbRepoT *tsdb, STsdbQueryCond*
STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, groupList, qinfo);
pQueryHandle->type = TSDB_QUERY_TYPE_EXTERNAL;
-// pQueryHandle->outputCapacity = 2; // only allowed two rows to be loaded
-
changeQueryHandleForInterpQuery(pQueryHandle);
return pQueryHandle;
}
@@ -313,14 +311,14 @@ SDataRow getSDataRowInTableMem(STableCheckInfo* pCheckInfo) {
rmem = SL_GET_NODE_DATA(node);
}
}
-
+
if (pCheckInfo->iiter) {
SSkipListNode* node = tSkipListIterGet(pCheckInfo->iiter);
if (node != NULL) {
rimem = SL_GET_NODE_DATA(node);
}
}
-
+
if (rmem != NULL && rimem != NULL) {
if (dataRowKey(rmem) < dataRowKey(rimem)) {
pCheckInfo->chosen = 0;
@@ -335,17 +333,17 @@ SDataRow getSDataRowInTableMem(STableCheckInfo* pCheckInfo) {
return rimem;
}
}
-
+
if (rmem != NULL) {
pCheckInfo->chosen = 0;
return rmem;
}
-
+
if (rimem != NULL) {
pCheckInfo->chosen = 1;
return rimem;
}
-
+
return NULL;
}
@@ -355,11 +353,11 @@ bool moveToNextRow(STableCheckInfo* pCheckInfo) {
if (pCheckInfo->iter != NULL) {
hasNext = tSkipListIterNext(pCheckInfo->iter);
}
-
+
if (hasNext) {
return hasNext;
}
-
+
if (pCheckInfo->iiter != NULL) {
return tSkipListIterGet(pCheckInfo->iiter) != NULL;
}
@@ -368,17 +366,17 @@ bool moveToNextRow(STableCheckInfo* pCheckInfo) {
if (pCheckInfo->iiter != NULL) {
hasNext = tSkipListIterNext(pCheckInfo->iiter);
}
-
+
if (hasNext) {
return hasNext;
}
-
+
if (pCheckInfo->iter != NULL) {
return tSkipListIterGet(pCheckInfo->iter) != NULL;
}
}
}
-
+
return hasNext;
}
@@ -397,7 +395,7 @@ static bool hasMoreDataInCache(STsdbQueryHandle* pHandle) {
if (row == NULL) {
return false;
}
-
+
pCheckInfo->lastKey = dataRowKey(row); // first timestamp in buffer
tsdbTrace("%p uid:%" PRId64", tid:%d check data in buffer from skey:%" PRId64 ", order:%d, %p", pHandle,
pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, pCheckInfo->lastKey, pHandle->order, pHandle->qinfo);
@@ -583,9 +581,9 @@ static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlo
bool blockLoaded = false;
SArray* sa = getDefaultLoadColumns(pQueryHandle, true);
-
+
int64_t st = taosGetTimestampUs();
-
+
if (pCheckInfo->pDataCols == NULL) {
STsdbMeta* pMeta = tsdbGetMeta(pRepo);
pCheckInfo->pDataCols = tdNewDataCols(pMeta->maxRowBytes, pMeta->maxCols, pRepo->config.maxRowsPerFileBlock);
@@ -605,67 +603,32 @@ static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlo
SDataCols* pCols = pQueryHandle->rhelper.pDataCols[0];
assert(pCols->numOfRows != 0);
-
+
taosArrayDestroy(sa);
tfree(data);
-
+
int64_t et = taosGetTimestampUs() - st;
tsdbTrace("%p load file block into buffer, elapsed time:%"PRId64 " us", pQueryHandle, et);
-
+
return blockLoaded;
}
static void handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock, STableCheckInfo* pCheckInfo){
SQueryFilePos* cur = &pQueryHandle->cur;
SDataBlockInfo binfo = getTrueDataBlockInfo(pCheckInfo, pBlock);
+
/*bool hasData = */ initTableMemIterator(pQueryHandle, pCheckInfo);
-
- TSKEY k1 = TSKEY_INITIAL_VAL, k2 = TSKEY_INITIAL_VAL;
- if (pCheckInfo->iter != NULL && tSkipListIterGet(pCheckInfo->iter) != NULL) {
- SSkipListNode* node = tSkipListIterGet(pCheckInfo->iter);
-
- SDataRow row = SL_GET_NODE_DATA(node);
- k1 = dataRowKey(row);
-
- if (k1 == binfo.window.skey) {
- if (tSkipListIterNext(pCheckInfo->iter)) {
- node = tSkipListIterGet(pCheckInfo->iter);
- row = SL_GET_NODE_DATA(node);
- k1 = dataRowKey(row);
- } else {
- k1 = TSKEY_INITIAL_VAL;
- }
- }
- }
-
- if (pCheckInfo->iiter != NULL && tSkipListIterGet(pCheckInfo->iiter) != NULL) {
- SSkipListNode* node = tSkipListIterGet(pCheckInfo->iiter);
-
- SDataRow row = SL_GET_NODE_DATA(node);
- k2 = dataRowKey(row);
-
- if (k2 == binfo.window.skey) {
- if (tSkipListIterNext(pCheckInfo->iiter)) {
- node = tSkipListIterGet(pCheckInfo->iiter);
- row = SL_GET_NODE_DATA(node);
- k2 = dataRowKey(row);
- } else {
- k2 = TSKEY_INITIAL_VAL;
- }
- }
- }
-
+ SDataRow row = getSDataRowInTableMem(pCheckInfo);
+
+ TSKEY key = (row != NULL)? dataRowKey(row):TSKEY_INITIAL_VAL;
cur->pos = ASCENDING_TRAVERSE(pQueryHandle->order)? 0:(binfo.rows-1);
- if ((ASCENDING_TRAVERSE(pQueryHandle->order) &&
- ((k1 != TSKEY_INITIAL_VAL && k1 <= binfo.window.ekey) || (k2 != TSKEY_INITIAL_VAL && k2 <= binfo.window.ekey))) ||
- (!ASCENDING_TRAVERSE(pQueryHandle->order) &&
- ((k1 != TSKEY_INITIAL_VAL && k1 >= binfo.window.skey) || (k2 != TSKEY_INITIAL_VAL && k2 >= binfo.window.skey)))) {
+ if ((ASCENDING_TRAVERSE(pQueryHandle->order) && (key != TSKEY_INITIAL_VAL && key <= binfo.window.ekey)) ||
+ (!ASCENDING_TRAVERSE(pQueryHandle->order) && (key != TSKEY_INITIAL_VAL && key >= binfo.window.skey))) {
- if ((ASCENDING_TRAVERSE(pQueryHandle->order) &&
- ((k1 != TSKEY_INITIAL_VAL && k1 < binfo.window.skey) || (k2 != TSKEY_INITIAL_VAL && k2 < binfo.window.skey))) ||
- (!ASCENDING_TRAVERSE(pQueryHandle->order) &&
- (((k1 != TSKEY_INITIAL_VAL && k1 > binfo.window.skey) || (k2 != TSKEY_INITIAL_VAL && k2 > binfo.window.skey))))) {
+ if ((ASCENDING_TRAVERSE(pQueryHandle->order) && (key != TSKEY_INITIAL_VAL && key < binfo.window.skey)) ||
+ (!ASCENDING_TRAVERSE(pQueryHandle->order) && (key != TSKEY_INITIAL_VAL && key > binfo.window.ekey))) {
+
// do not load file block into buffer
int32_t step = ASCENDING_TRAVERSE(pQueryHandle->order) ? 1 : -1;
@@ -718,7 +681,7 @@ static bool loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock
if (!doLoadFileDataBlock(pQueryHandle, pBlock, pCheckInfo)) {
return false;
}
-
+
SDataCols* pTSCol = pQueryHandle->rhelper.pDataCols[0];
assert(pTSCol->cols->type == TSDB_DATA_TYPE_TIMESTAMP && pTSCol->numOfRows == pBlock->numOfRows);
@@ -756,7 +719,7 @@ static bool loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock
return pQueryHandle->realNumOfRows > 0;
}
-static int vnodeBinarySearchKey(char* pValue, int num, TSKEY key, int order) {
+static int doBinarySearchKey(char* pValue, int num, TSKEY key, int order) {
int firstPos, lastPos, midPos = -1;
int numOfRows;
TSKEY* keyList;
@@ -868,37 +831,63 @@ static int32_t copyDataFromFileBlock(STsdbQueryHandle* pQueryHandle, int32_t cap
return numOfRows + num;
}
-static void copyOneRowFromMem(STsdbQueryHandle* pQueryHandle, STableCheckInfo* pCheckInfo, int32_t capacity,
- int32_t numOfRows, SDataRow row, STSchema* pSchema) {
- int32_t numOfCols = taosArrayGetSize(pQueryHandle->pColumns);
- int32_t numOfTableCols = schemaNCols(pSchema);
-
+static void copyOneRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity, int32_t numOfRows, SDataRow row,
+ STsdbMeta *pMeta, int32_t numOfCols, STable* pTable) {
char* pData = NULL;
- for (int32_t i = 0; i < numOfCols; ++i) {
+
+ // the schema version info is embeded in SDataRow
+ STSchema* pSchema = tsdbGetTableSchemaByVersion(pMeta, pTable, dataRowVersion(row));
+ int32_t numOfRowCols = schemaNCols(pSchema);
+
+ int32_t i = 0, j = 0;
+ while(i < numOfCols && j < numOfRowCols) {
SColumnInfoData* pColInfo = taosArrayGet(pQueryHandle->pColumns, i);
-
+ if (pSchema->columns[j].colId < pColInfo->info.colId) {
+ j++;
+ continue;
+ }
+
if (ASCENDING_TRAVERSE(pQueryHandle->order)) {
pData = pColInfo->pData + numOfRows * pColInfo->info.bytes;
} else {
pData = pColInfo->pData + (capacity - numOfRows - 1) * pColInfo->info.bytes;
}
-
- int32_t offset = 0;
- for (int32_t j = 0; j < numOfTableCols; ++j) {
- if (pColInfo->info.colId == pSchema->columns[j].colId) {
- offset = pSchema->columns[j].offset;
- break;
+
+ if (pSchema->columns[j].colId == pColInfo->info.colId) {
+ void* value = tdGetRowDataOfCol(row, pColInfo->info.type, TD_DATA_ROW_HEAD_SIZE + pSchema->columns[j].offset);
+ if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) {
+ memcpy(pData, value, varDataTLen(value));
+ } else {
+ memcpy(pData, value, pColInfo->info.bytes);
}
+
+ j++;
+ i++;
+ } else { // pColInfo->info.colId < pSchema->columns[j].colId, it is a NULL data
+ if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) {
+ setVardataNull(pData, pColInfo->info.type);
+ } else {
+ setNull(pData, pColInfo->info.type, pColInfo->info.bytes);
+ }
+ i++;
}
-
- assert(offset != -1); // todo handle error
- void* value = tdGetRowDataOfCol(row, pColInfo->info.type, TD_DATA_ROW_HEAD_SIZE + offset);
-
- if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) {
- memcpy(pData, value, varDataTLen(value));
+ }
+
+ while (i < numOfCols) { // the remain columns are all null data
+ SColumnInfoData* pColInfo = taosArrayGet(pQueryHandle->pColumns, i);
+ if (ASCENDING_TRAVERSE(pQueryHandle->order)) {
+ pData = pColInfo->pData + numOfRows * pColInfo->info.bytes;
} else {
- memcpy(pData, value, pColInfo->info.bytes);
+ pData = pColInfo->pData + (capacity - numOfRows - 1) * pColInfo->info.bytes;
}
+
+ if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) {
+ setVardataNull(pData, pColInfo->info.type);
+ } else {
+ setNull(pData, pColInfo->info.type, pColInfo->info.bytes);
+ }
+
+ i++;
}
}
@@ -911,7 +900,16 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
initTableMemIterator(pQueryHandle, pCheckInfo);
SDataCols* pCols = pQueryHandle->rhelper.pDataCols[0];
-
+
+ // for search the endPos, so the order needs to reverse
+ int32_t order = (pQueryHandle->order == TSDB_ORDER_ASC)? TSDB_ORDER_DESC:TSDB_ORDER_ASC;
+
+ int32_t step = ASCENDING_TRAVERSE(pQueryHandle->order)? 1:-1;
+ int32_t numOfCols = taosArrayGetSize(pQueryHandle->pColumns);
+
+ STsdbMeta* pMeta = tsdbGetMeta(pQueryHandle->pTsdb);
+ STable* pTable = pCheckInfo->pTableObj;
+
int32_t endPos = cur->pos;
if (ASCENDING_TRAVERSE(pQueryHandle->order) && pQueryHandle->window.ekey > blockInfo.window.ekey) {
endPos = blockInfo.rows - 1;
@@ -920,8 +918,8 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
endPos = 0;
cur->mixBlock = (cur->pos != blockInfo.rows - 1);
} else {
- int32_t order = (pQueryHandle->order == TSDB_ORDER_ASC)? TSDB_ORDER_DESC:TSDB_ORDER_ASC;
- endPos = vnodeBinarySearchKey(pCols->cols[0].pData, pCols->numOfRows, pQueryHandle->window.ekey, order);
+ assert(pCols->numOfRows > 0);
+ endPos = doBinarySearchKey(pCols->cols[0].pData, pCols->numOfRows, pQueryHandle->window.ekey, order);
cur->mixBlock = true;
}
@@ -933,8 +931,7 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
int32_t numOfRows = 0;
pQueryHandle->cur.win = TSWINDOW_INITIALIZER;
- int32_t step = ASCENDING_TRAVERSE(pQueryHandle->order)? 1:-1;
-
+
// no data in buffer, load data from file directly
if (pCheckInfo->iiter == NULL && pCheckInfo->iter == NULL) {
int32_t start = cur->pos;
@@ -950,12 +947,11 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
// todo opt in case of no data in buffer
numOfRows = copyDataFromFileBlock(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, start, end);
- // if the buffer is not full in case of descending order query, move the data in the front of the buffer
+ // if the buffer is not full in case of descending order query, move the data in the front of the buffer
if (!ASCENDING_TRAVERSE(pQueryHandle->order) && numOfRows < pQueryHandle->outputCapacity) {
int32_t emptySize = pQueryHandle->outputCapacity - numOfRows;
- int32_t reqNumOfCols = taosArrayGetSize(pQueryHandle->pColumns);
-
- for(int32_t i = 0; i < reqNumOfCols; ++i) {
+
+ for(int32_t i = 0; i < numOfCols; ++i) {
SColumnInfoData* pColInfo = taosArrayGet(pQueryHandle->pColumns, i);
memmove(pColInfo->pData, pColInfo->pData + emptySize * pColInfo->info.bytes, numOfRows * pColInfo->info.bytes);
}
@@ -969,20 +965,15 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
pQueryHandle->realNumOfRows = numOfRows;
cur->rows = numOfRows;
return;
- } else if (pCheckInfo->iter != NULL && pCheckInfo->iiter == NULL) {
- // } else if (pCheckInfo->iter == NULL && pCheckInfo->iiter != NULL) {
- // } else { // iter and iiter are all not NULL, three-way merge data block
- STSchema* pSchema = tsdbGetTableSchema(tsdbGetMeta(pQueryHandle->pTsdb), pCheckInfo->pTableObj);
+ } else if (pCheckInfo->iter != NULL || pCheckInfo->iiter != NULL) {
SSkipListNode* node = NULL;
-
do {
- node = tSkipListIterGet(pCheckInfo->iter);
- if (node == NULL) {
+ SDataRow row = getSDataRowInTableMem(pCheckInfo);
+ if (row == NULL) {
break;
}
- SDataRow row = SL_GET_NODE_DATA(node);
- TSKEY key = dataRowKey(row);
+ TSKEY key = dataRowKey(row);
if ((key > pQueryHandle->window.ekey && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
(key < pQueryHandle->window.ekey && !ASCENDING_TRAVERSE(pQueryHandle->order))) {
break;
@@ -995,7 +986,7 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
if ((key < tsArray[pos] && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
(key > tsArray[pos] && !ASCENDING_TRAVERSE(pQueryHandle->order))) {
- copyOneRowFromMem(pQueryHandle, pCheckInfo, pQueryHandle->outputCapacity, numOfRows, row, pSchema);
+ copyOneRowFromMem(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, row, pMeta, numOfCols, pTable);
numOfRows += 1;
if (cur->win.skey == TSKEY_INITIAL_VAL) {
cur->win.skey = key;
@@ -1005,17 +996,16 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
cur->lastKey = key + step;
cur->mixBlock = true;
- tSkipListIterNext(pCheckInfo->iter);
+ moveToNextRow(pCheckInfo);
} else if (key == tsArray[pos]) { // data in buffer has the same timestamp of data in file block, ignore it
- tSkipListIterNext(pCheckInfo->iter);
+ moveToNextRow(pCheckInfo);
} else if ((key > tsArray[pos] && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
(key < tsArray[pos] && !ASCENDING_TRAVERSE(pQueryHandle->order))) {
if (cur->win.skey == TSKEY_INITIAL_VAL) {
cur->win.skey = tsArray[pos];
}
- int32_t order = ASCENDING_TRAVERSE(pQueryHandle->order) ? TSDB_ORDER_DESC : TSDB_ORDER_ASC;
- int32_t end = vnodeBinarySearchKey(pCols->cols[0].pData, pCols->numOfRows, key, order);
+ int32_t end = doBinarySearchKey(pCols->cols[0].pData, pCols->numOfRows, key, order);
if (tsArray[end] == key) { // the value of key in cache equals to the end timestamp value, ignore it
tSkipListIterNext(pCheckInfo->iter);
}
@@ -1093,9 +1083,7 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
// if the buffer is not full in case of descending order query, move the data in the front of the buffer
if (numOfRows < pQueryHandle->outputCapacity) {
int32_t emptySize = pQueryHandle->outputCapacity - numOfRows;
-
- int32_t requiredNumOfCols = taosArrayGetSize(pQueryHandle->pColumns);
- for(int32_t i = 0; i < requiredNumOfCols; ++i) {
+ for(int32_t i = 0; i < numOfCols; ++i) {
SColumnInfoData* pColInfo = taosArrayGet(pQueryHandle->pColumns, i);
memmove(pColInfo->pData, pColInfo->pData + emptySize * pColInfo->info.bytes, numOfRows * pColInfo->info.bytes);
}
@@ -1224,7 +1212,7 @@ static int32_t createDataBlocksInfo(STsdbQueryHandle* pQueryHandle, int32_t numO
*numOfAllocBlocks = numOfBlocks;
int32_t numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo);
-
+
SBlockOrderSupporter sup = {0};
sup.numOfTables = numOfTables;
sup.numOfBlocksPerTable = calloc(1, sizeof(int32_t) * numOfTables);
@@ -1268,17 +1256,17 @@ static int32_t createDataBlocksInfo(STsdbQueryHandle* pQueryHandle, int32_t numO
}
assert(numOfBlocks == cnt);
-
+
// since there is only one table qualified, blocks are not sorted
if (numOfQualTables == 1) {
memcpy(pQueryHandle->pDataBlockInfo, sup.pDataBlockInfo[0], sizeof(STableBlockInfo) * numOfBlocks);
cleanBlockOrderSupporter(&sup, numOfQualTables);
-
+
tsdbTrace("%p create data blocks info struct completed for 1 table, %d blocks not sorted %p ", pQueryHandle, cnt,
pQueryHandle->qinfo);
return TSDB_CODE_SUCCESS;
}
-
+
tsdbTrace("%p create data blocks info struct completed, %d blocks in %d tables %p", pQueryHandle, cnt,
numOfQualTables, pQueryHandle->qinfo);
@@ -1567,9 +1555,6 @@ void changeQueryHandleForLastrowQuery(TsdbQueryHandleT pqHandle) {
for(int32_t i = 0; i < numOfTables; ++i) {
STableCheckInfo* pCheckInfo = taosArrayGet(pQueryHandle->pTableCheckInfo, i);
- if (pCheckInfo->pTableObj->tableId.uid == 12094628167747) {
- printf("abc\n");
- }
if (pCheckInfo->pTableObj->lastKey > key) {
key = pCheckInfo->pTableObj->lastKey;
index = i;
@@ -1652,9 +1637,9 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int
*skey = TSKEY_INITIAL_VAL;
int64_t st = taosGetTimestampUs();
- STSchema* pSchema = tsdbGetTableSchema(tsdbGetMeta(pQueryHandle->pTsdb), pCheckInfo->pTableObj);
- int32_t numOfTableCols = schemaNCols(pSchema);
-
+ STsdbMeta* pMeta = tsdbGetMeta(pQueryHandle->pTsdb);
+ STable* pTable = pCheckInfo->pTableObj;
+
do {
SDataRow row = getSDataRowInTableMem(pCheckInfo);
if (row == NULL) {
@@ -1662,10 +1647,7 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int
}
TSKEY key = dataRowKey(row);
-
- if ((key > maxKey && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
- (key < maxKey && !ASCENDING_TRAVERSE(pQueryHandle->order))) {
-
+ if ((key > maxKey && ASCENDING_TRAVERSE(pQueryHandle->order)) || (key < maxKey && !ASCENDING_TRAVERSE(pQueryHandle->order))) {
tsdbTrace("%p key:%"PRIu64" beyond qrange:%"PRId64" - %"PRId64", no more data in buffer", pQueryHandle, key, pQueryHandle->window.skey,
pQueryHandle->window.ekey);
@@ -1677,59 +1659,8 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int
}
*ekey = key;
- char* pData = NULL;
-
- int32_t i = 0, j = 0;
- while(i < numOfCols && j < numOfTableCols) {
- SColumnInfoData* pColInfo = taosArrayGet(pQueryHandle->pColumns, i);
- if (pSchema->columns[j].colId < pColInfo->info.colId) {
- j++;
- continue;
- }
-
- if (ASCENDING_TRAVERSE(pQueryHandle->order)) {
- pData = pColInfo->pData + numOfRows * pColInfo->info.bytes;
- } else {
- pData = pColInfo->pData + (maxRowsToRead - numOfRows - 1) * pColInfo->info.bytes;
- }
-
- if (pSchema->columns[j].colId == pColInfo->info.colId) {
- void* value = tdGetRowDataOfCol(row, pColInfo->info.type, TD_DATA_ROW_HEAD_SIZE + pSchema->columns[j].offset);
- if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) {
- memcpy(pData, value, varDataTLen(value));
- } else {
- memcpy(pData, value, pColInfo->info.bytes);
- }
-
- j++;
- i++;
- } else { // pColInfo->info.colId < pSchema->columns[j].colId, it is a NULL data
- if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) {
- setVardataNull(pData, pColInfo->info.type);
- } else {
- setNull(pData, pColInfo->info.type, pColInfo->info.bytes);
- }
- i++;
- }
- }
-
- while (i < numOfCols) { // the remain columns are all null data
- SColumnInfoData* pColInfo = taosArrayGet(pQueryHandle->pColumns, i);
- if (ASCENDING_TRAVERSE(pQueryHandle->order)) {
- pData = pColInfo->pData + numOfRows * pColInfo->info.bytes;
- } else {
- pData = pColInfo->pData + (maxRowsToRead - numOfRows - 1) * pColInfo->info.bytes;
- }
-
- if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) {
- setVardataNull(pData, pColInfo->info.type);
- } else {
- setNull(pData, pColInfo->info.type, pColInfo->info.bytes);
- }
-
- i++;
- }
-
+ copyOneRowFromMem(pQueryHandle, maxRowsToRead, numOfRows, row, pMeta, numOfCols, pTable);
+
if (++numOfRows >= maxRowsToRead) {
moveToNextRow(pCheckInfo);
break;
@@ -1752,7 +1683,7 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int
int64_t elapsedTime = taosGetTimestampUs() - st;
tsdbTrace("%p build data block from cache completed, elapsed time:%"PRId64" us, numOfRows:%d, numOfCols:%d", pQueryHandle,
elapsedTime, numOfRows, numOfCols);
-
+
return numOfRows;
}
@@ -2011,7 +1942,7 @@ int32_t tableGroupComparFn(const void *p1, const void *p2, const void *param) {
f1 = (char*) pTable1->name;
f2 = (char*) pTable2->name;
type = TSDB_DATA_TYPE_BINARY;
- bytes = TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE;
+ bytes = tGetTableNameColumnSchema().bytes;
} else {
STColumn* pCol = schemaColAt(pTableGroupSupp->pTagSchema, colIndex);
bytes = pCol->bytes;
diff --git a/src/util/inc/tcache.h b/src/util/inc/tcache.h
index 2369f63f16..17b3823831 100644
--- a/src/util/inc/tcache.h
+++ b/src/util/inc/tcache.h
@@ -37,8 +37,8 @@ typedef struct SCacheDataNode {
uint64_t expiredTime; // expiredTime expiredTime when this element should be remove from cache
uint64_t signature;
uint32_t size; // allocated size for current SCacheDataNode
- uint16_t keySize : 15;
- bool inTrash : 1; // denote if it is in trash or not
+ uint16_t keySize: 15;
+ bool inTrashCan: 1;// denote if it is in trash or not
T_REF_DECLARE()
char *key;
char data[];
@@ -50,46 +50,49 @@ typedef struct STrashElem {
SCacheDataNode *pData;
} STrashElem;
+/*
+ * to accommodate the old data which has the same key value of new one in hashList
+ * when an new node is put into cache, if an existed one with the same key:
+ * 1. if the old one does not be referenced, update it.
+ * 2. otherwise, move the old one to pTrash, addedTime the new one.
+ *
+ * when the node in pTrash does not be referenced, it will be release at the expired expiredTime
+ */
typedef struct {
- int64_t totalSize; // total allocated buffer in this hash table, SCacheObj is not included.
- int64_t refreshTime;
-
- /*
- * to accommodate the old datanode which has the same key value of new one in hashList
- * when an new node is put into cache, if an existed one with the same key:
- * 1. if the old one does not be referenced, update it.
- * 2. otherwise, move the old one to pTrash, addedTime the new one.
- *
- * when the node in pTrash does not be referenced, it will be release at the expired expiredTime
- */
- STrashElem * pTrash;
- void * tmrCtrl;
- void * pTimer;
- SCacheStatis statistics;
- SHashObj * pHashTable;
+ int64_t totalSize; // total allocated buffer in this hash table, SCacheObj is not included.
+ int64_t refreshTime;
+ STrashElem * pTrash;
+ void * tmrCtrl;
+ void * pTimer;
+ SCacheStatis statistics;
+ SHashObj * pHashTable;
_hash_free_fn_t freeFp;
- int numOfElemsInTrash; // number of element in trash
- int16_t deleting; // set the deleting flag to stop refreshing ASAP.
- T_REF_DECLARE()
+ uint32_t numOfElemsInTrash; // number of element in trash
+ uint8_t deleting; // set the deleting flag to stop refreshing ASAP.
+ pthread_t refreshWorker;
#if defined(LINUX)
pthread_rwlock_t lock;
#else
pthread_mutex_t lock;
#endif
-
} SCacheObj;
/**
- *
- * @param maxSessions maximum slots available for hash elements
- * @param tmrCtrl timer ctrl
+ * initialize the cache object
* @param refreshTime refresh operation interval time, the maximum survival time when one element is expired and
* not referenced by other objects
* @return
*/
-SCacheObj *taosCacheInit(void *tmrCtrl, int64_t refreshTimeInSeconds);
-SCacheObj *taosCacheInitWithCb(void *tmrCtrl, int64_t refreshTimeInSeconds, void (*freeCb)(void *data));
+SCacheObj *taosCacheInit(int64_t refreshTimeInSeconds);
+
+/**
+ * initialize the cache object and set the free object callback function
+ * @param refreshTimeInSeconds
+ * @param freeCb
+ * @return
+ */
+SCacheObj *taosCacheInitWithCb(int64_t refreshTimeInSeconds, void (*freeCb)(void *data));
/**
* add data into cache
diff --git a/src/util/inc/tkvstore.h b/src/util/inc/tkvstore.h
index 724c94e21d..a57d0e95cf 100644
--- a/src/util/inc/tkvstore.h
+++ b/src/util/inc/tkvstore.h
@@ -46,11 +46,12 @@ typedef struct {
} SKVStore;
int tdCreateKVStore(char *fname);
-int tdDestroyKVStore();
+int tdDestroyKVStore(char *fname);
SKVStore *tdOpenKVStore(char *fname, iterFunc iFunc, afterFunc aFunc, void *appH);
void tdCloseKVStore(SKVStore *pStore);
int tdKVStoreStartCommit(SKVStore *pStore);
-int tdUpdateRecordInKVStore(SKVStore *pStore, uint64_t uid, void *cont, int contLen);
+int tdUpdateKVStoreRecord(SKVStore *pStore, uint64_t uid, void *cont, int contLen);
+int tdDropKVStoreRecord(SKVStore *pStore, uint64_t uid);
int tdKVStoreEndCommit(SKVStore *pStore);
#ifdef __cplusplus
diff --git a/src/util/inc/ttime.h b/src/util/inc/ttime.h
index 61df65f345..576c9a51f6 100644
--- a/src/util/inc/ttime.h
+++ b/src/util/inc/ttime.h
@@ -56,7 +56,7 @@ static FORCE_INLINE int64_t taosGetTimestamp(int32_t precision) {
int32_t getTimestampInUsFromStr(char* token, int32_t tokenlen, int64_t* ts);
-int32_t taosParseTime(char* timestr, int64_t* time, int32_t len, int32_t timePrec);
+int32_t taosParseTime(char* timestr, int64_t* time, int32_t len, int32_t timePrec, int8_t dayligth);
void deltaToUtcInitOnce();
#ifdef __cplusplus
diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c
index b0291b5cc0..ac06cf4f3f 100644
--- a/src/util/src/tcache.c
+++ b/src/util/src/tcache.c
@@ -77,31 +77,7 @@ static FORCE_INLINE void taosFreeNode(void *data) {
* @param lifespan total survial expiredTime from now
* @return SCacheDataNode
*/
-static SCacheDataNode *taosCreateCacheNode(const char *key, size_t keyLen, const char *pData, size_t size,
- uint64_t duration) {
- size_t totalSize = size + sizeof(SCacheDataNode) + keyLen + 1;
-
- SCacheDataNode *pNewNode = calloc(1, totalSize);
- if (pNewNode == NULL) {
- uError("failed to allocate memory, reason:%s", strerror(errno));
- return NULL;
- }
-
- memcpy(pNewNode->data, pData, size);
-
- pNewNode->key = (char *)pNewNode + sizeof(SCacheDataNode) + size;
- pNewNode->keySize = keyLen;
-
- memcpy(pNewNode->key, key, keyLen);
-
- pNewNode->addedTime = (uint64_t)taosGetTimestampMs();
- pNewNode->expiredTime = pNewNode->addedTime + duration;
-
- pNewNode->signature = (uint64_t)pNewNode;
- pNewNode->size = (uint32_t)totalSize;
-
- return pNewNode;
-}
+static SCacheDataNode *taosCreateCacheNode(const char *key, size_t keyLen, const char *pData, size_t size, uint64_t duration);
/**
* addedTime object node into trash, and this object is closed for referencing if it is addedTime to trash
@@ -109,50 +85,15 @@ static SCacheDataNode *taosCreateCacheNode(const char *key, size_t keyLen, const
* @param pCacheObj Cache object
* @param pNode Cache slot object
*/
-static void taosAddToTrash(SCacheObj *pCacheObj, SCacheDataNode *pNode) {
- if (pNode->inTrash) { /* node is already in trash */
- return;
- }
-
- STrashElem *pElem = calloc(1, sizeof(STrashElem));
- pElem->pData = pNode;
-
- pElem->next = pCacheObj->pTrash;
- if (pCacheObj->pTrash) {
- pCacheObj->pTrash->prev = pElem;
- }
-
- pElem->prev = NULL;
- pCacheObj->pTrash = pElem;
-
- pNode->inTrash = true;
- pCacheObj->numOfElemsInTrash++;
-
- uTrace("key:%s %p move to trash, numOfElem in trash:%d", pNode->key, pNode, pCacheObj->numOfElemsInTrash);
-}
+static void taosAddToTrash(SCacheObj *pCacheObj, SCacheDataNode *pNode);
+
+/**
+ * remove node in trash can
+ * @param pCacheObj
+ * @param pElem
+ */
+static void taosRemoveFromTrashCan(SCacheObj *pCacheObj, STrashElem *pElem);
-static void taosRemoveFromTrash(SCacheObj *pCacheObj, STrashElem *pElem) {
- if (pElem->pData->signature != (uint64_t)pElem->pData) {
- uError("key:sig:%d %p data has been released, ignore", pElem->pData->signature, pElem->pData);
- return;
- }
-
- pCacheObj->numOfElemsInTrash--;
- if (pElem->prev) {
- pElem->prev->next = pElem->next;
- } else { /* pnode is the header, update header */
- pCacheObj->pTrash = pElem->next;
- }
-
- if (pElem->next) {
- pElem->next->prev = pElem->prev;
- }
-
- pElem->pData->signature = 0;
- if (pCacheObj->freeFp) pCacheObj->freeFp(pElem->pData->data);
- free(pElem->pData);
- free(pElem);
-}
/**
* remove nodes in trash with refCount == 0 in cache
* @param pNode
@@ -160,42 +101,7 @@ static void taosRemoveFromTrash(SCacheObj *pCacheObj, STrashElem *pElem) {
* @param force force model, if true, remove data in trash without check refcount.
* may cause corruption. So, forece model only applys before cache is closed
*/
-static void taosTrashEmpty(SCacheObj *pCacheObj, bool force) {
- __cache_wr_lock(pCacheObj);
-
- if (pCacheObj->numOfElemsInTrash == 0) {
- if (pCacheObj->pTrash != NULL) {
- uError("key:inconsistency data in cache, numOfElem in trash:%d", pCacheObj->numOfElemsInTrash);
- }
- pCacheObj->pTrash = NULL;
-
- __cache_unlock(pCacheObj);
- return;
- }
-
- STrashElem *pElem = pCacheObj->pTrash;
-
- while (pElem) {
- T_REF_VAL_CHECK(pElem->pData);
- if (pElem->next == pElem) {
- pElem->next = NULL;
- }
-
- if (force || (T_REF_VAL_GET(pElem->pData) == 0)) {
- uTrace("key:%s %p removed from trash. numOfElem in trash:%d", pElem->pData->key, pElem->pData,
- pCacheObj->numOfElemsInTrash - 1);
- STrashElem *p = pElem;
-
- pElem = pElem->next;
- taosRemoveFromTrash(pCacheObj, p);
- } else {
- pElem = pElem->next;
- }
- }
-
- assert(pCacheObj->numOfElemsInTrash >= 0);
- __cache_unlock(pCacheObj);
-}
+static void taosTrashCanEmpty(SCacheObj *pCacheObj, bool force);
/**
* release node
@@ -304,87 +210,20 @@ static FORCE_INLINE SCacheDataNode *taosAddToCacheImpl(SCacheObj *pCacheObj, con
return pNode;
}
-static void doCleanupDataCache(SCacheObj *pCacheObj) {
- __cache_wr_lock(pCacheObj);
-
- //if (taosHashGetSize(pCacheObj->pHashTable) > 0) {
- taosHashCleanup(pCacheObj->pHashTable);
- //}
-
- __cache_unlock(pCacheObj);
-
- taosTrashEmpty(pCacheObj, true);
- __cache_lock_destroy(pCacheObj);
-
- memset(pCacheObj, 0, sizeof(SCacheObj));
- free(pCacheObj);
-}
+/**
+ * do cleanup the taos cache
+ * @param pCacheObj
+ */
+static void doCleanupDataCache(SCacheObj *pCacheObj);
/**
* refresh cache to remove data in both hash list and trash, if any nodes' refcount == 0, every pCacheObj->refreshTime
* @param handle Cache object handle
*/
-static void taosCacheRefresh(void *handle, void *tmrId) {
- SCacheObj *pCacheObj = (SCacheObj *)handle;
-
- if (pCacheObj == NULL || T_REF_VAL_GET(pCacheObj) == 0) {
- uTrace("object is destroyed. no refresh retry");
- return;
- }
-
- int16_t ref = T_REF_INC(pCacheObj);
- if (ref == 1) {
- T_REF_DEC(pCacheObj);
- return;
- }
-
- // todo add the ref before start the timer
- int32_t num = taosHashGetSize(pCacheObj->pHashTable);
- if (num == 0) {
- ref = T_REF_DEC(pCacheObj);
- if (ref == 0) {
- doCleanupDataCache(pCacheObj);
- } else {
- taosTmrReset(taosCacheRefresh, pCacheObj->refreshTime, pCacheObj, pCacheObj->tmrCtrl, &pCacheObj->pTimer);
- }
- return;
- }
-
- uint64_t expiredTime = taosGetTimestampMs();
- pCacheObj->statistics.refreshCount++;
-
- SHashMutableIterator *pIter = taosHashCreateIter(pCacheObj->pHashTable);
-
- __cache_wr_lock(pCacheObj);
- while (taosHashIterNext(pIter)) {
- if (pCacheObj->deleting == 1) {
- taosHashDestroyIter(pIter);
- break;
- }
-
- SCacheDataNode *pNode = *(SCacheDataNode **)taosHashIterGet(pIter);
- if (pNode->expiredTime <= expiredTime && T_REF_VAL_GET(pNode) <= 0) {
- taosCacheReleaseNode(pCacheObj, pNode);
- }
- }
-
- __cache_unlock(pCacheObj);
-
- taosHashDestroyIter(pIter);
+static void* taosCacheRefresh(void *handle);
- taosTrashEmpty(pCacheObj, false);
-
- ref = T_REF_DEC(pCacheObj);
- if (ref == 0) {
- doCleanupDataCache(pCacheObj);
- return;
- } else {
- taosTmrReset(taosCacheRefresh, pCacheObj->refreshTime, pCacheObj, pCacheObj->tmrCtrl, &pCacheObj->pTimer);
- }
-}
-
-SCacheObj *taosCacheInitWithCb(void *tmrCtrl, int64_t refreshTime, void (*freeCb)(void *data)) {
- if (tmrCtrl == NULL || refreshTime <= 0) {
+SCacheObj *taosCacheInitWithCb(int64_t refreshTime, void (*freeCb)(void *data)) {
+ if (refreshTime <= 0) {
return NULL;
}
@@ -394,7 +233,7 @@ SCacheObj *taosCacheInitWithCb(void *tmrCtrl, int64_t refreshTime, void (*freeCb
return NULL;
}
- pCacheObj->pHashTable = taosHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false);
+ pCacheObj->pHashTable = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false);
if (pCacheObj->pHashTable == NULL) {
free(pCacheObj);
uError("failed to allocate memory, reason:%s", strerror(errno));
@@ -406,25 +245,27 @@ SCacheObj *taosCacheInitWithCb(void *tmrCtrl, int64_t refreshTime, void (*freeCb
pCacheObj->freeFp = freeCb;
pCacheObj->refreshTime = refreshTime * 1000;
- pCacheObj->tmrCtrl = tmrCtrl;
-
- taosTmrReset(taosCacheRefresh, pCacheObj->refreshTime, pCacheObj, pCacheObj->tmrCtrl, &pCacheObj->pTimer);
-
+
if (__cache_lock_init(pCacheObj) != 0) {
- taosTmrStopA(&pCacheObj->pTimer);
taosHashCleanup(pCacheObj->pHashTable);
free(pCacheObj);
uError("failed to init lock, reason:%s", strerror(errno));
return NULL;
}
-
- T_REF_INC(pCacheObj);
+
+ pthread_attr_t thattr;
+ pthread_attr_init(&thattr);
+ pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
+
+ pthread_create(&pCacheObj->refreshWorker, &thattr, taosCacheRefresh, pCacheObj);
+
+ pthread_attr_destroy(&thattr);
return pCacheObj;
}
-SCacheObj *taosCacheInit(void *tmrCtrl, int64_t refreshTime) {
- return taosCacheInitWithCb(tmrCtrl, refreshTime, NULL);
+SCacheObj *taosCacheInit(int64_t refreshTime) {
+ return taosCacheInitWithCb(refreshTime, NULL);
}
void *taosCachePut(SCacheObj *pCacheObj, const char *key, const void *pData, size_t dataSize, int duration) {
@@ -600,16 +441,188 @@ void taosCacheEmpty(SCacheObj *pCacheObj) {
__cache_unlock(pCacheObj);
taosHashDestroyIter(pIter);
- taosTrashEmpty(pCacheObj, false);
+ taosTrashCanEmpty(pCacheObj, false);
}
void taosCacheCleanup(SCacheObj *pCacheObj) {
if (pCacheObj == NULL) {
return;
}
-
- int32_t ref = T_REF_DEC(pCacheObj);
- if (ref == 0) {
- doCleanupDataCache(pCacheObj);
- }
+
+ pCacheObj->deleting = 1;
+ pthread_join(pCacheObj->refreshWorker, NULL);
+
+ doCleanupDataCache(pCacheObj);
+}
+
+SCacheDataNode *taosCreateCacheNode(const char *key, size_t keyLen, const char *pData, size_t size,
+ uint64_t duration) {
+ size_t totalSize = size + sizeof(SCacheDataNode) + keyLen + 1;
+
+ SCacheDataNode *pNewNode = calloc(1, totalSize);
+ if (pNewNode == NULL) {
+ uError("failed to allocate memory, reason:%s", strerror(errno));
+ return NULL;
+ }
+
+ memcpy(pNewNode->data, pData, size);
+
+ pNewNode->key = (char *)pNewNode + sizeof(SCacheDataNode) + size;
+ pNewNode->keySize = keyLen;
+
+ memcpy(pNewNode->key, key, keyLen);
+
+ pNewNode->addedTime = (uint64_t)taosGetTimestampMs();
+ pNewNode->expiredTime = pNewNode->addedTime + duration;
+
+ pNewNode->signature = (uint64_t)pNewNode;
+ pNewNode->size = (uint32_t)totalSize;
+
+ return pNewNode;
+}
+
+void taosAddToTrash(SCacheObj *pCacheObj, SCacheDataNode *pNode) {
+ if (pNode->inTrashCan) { /* node is already in trash */
+ return;
+ }
+
+ STrashElem *pElem = calloc(1, sizeof(STrashElem));
+ pElem->pData = pNode;
+
+ pElem->next = pCacheObj->pTrash;
+ if (pCacheObj->pTrash) {
+ pCacheObj->pTrash->prev = pElem;
+ }
+
+ pElem->prev = NULL;
+ pCacheObj->pTrash = pElem;
+
+ pNode->inTrashCan = true;
+ pCacheObj->numOfElemsInTrash++;
+
+ uTrace("key:%s %p move to trash, numOfElem in trash:%d", pNode->key, pNode, pCacheObj->numOfElemsInTrash);
+}
+
+void taosRemoveFromTrashCan(SCacheObj *pCacheObj, STrashElem *pElem) {
+ if (pElem->pData->signature != (uint64_t)pElem->pData) {
+ uError("key:sig:%d %p data has been released, ignore", pElem->pData->signature, pElem->pData);
+ return;
+ }
+
+ pCacheObj->numOfElemsInTrash--;
+ if (pElem->prev) {
+ pElem->prev->next = pElem->next;
+ } else { /* pnode is the header, update header */
+ pCacheObj->pTrash = pElem->next;
+ }
+
+ if (pElem->next) {
+ pElem->next->prev = pElem->prev;
+ }
+
+ pElem->pData->signature = 0;
+ if (pCacheObj->freeFp) pCacheObj->freeFp(pElem->pData->data);
+ free(pElem->pData);
+ free(pElem);
+}
+
+void taosTrashCanEmpty(SCacheObj *pCacheObj, bool force) {
+ __cache_wr_lock(pCacheObj);
+
+ if (pCacheObj->numOfElemsInTrash == 0) {
+ if (pCacheObj->pTrash != NULL) {
+ uError("key:inconsistency data in cache, numOfElem in trash:%d", pCacheObj->numOfElemsInTrash);
+ }
+ pCacheObj->pTrash = NULL;
+
+ __cache_unlock(pCacheObj);
+ return;
+ }
+
+ STrashElem *pElem = pCacheObj->pTrash;
+
+ while (pElem) {
+ T_REF_VAL_CHECK(pElem->pData);
+ if (pElem->next == pElem) {
+ pElem->next = NULL;
+ }
+
+ if (force || (T_REF_VAL_GET(pElem->pData) == 0)) {
+ uTrace("key:%s %p removed from trash. numOfElem in trash:%d", pElem->pData->key, pElem->pData,
+ pCacheObj->numOfElemsInTrash - 1);
+ STrashElem *p = pElem;
+
+ pElem = pElem->next;
+ taosRemoveFromTrashCan(pCacheObj, p);
+ } else {
+ pElem = pElem->next;
+ }
+ }
+
+ assert(pCacheObj->numOfElemsInTrash >= 0);
+ __cache_unlock(pCacheObj);
+}
+
+void doCleanupDataCache(SCacheObj *pCacheObj) {
+ __cache_wr_lock(pCacheObj);
+ taosHashCleanup(pCacheObj->pHashTable);
+ __cache_unlock(pCacheObj);
+
+ taosTrashCanEmpty(pCacheObj, true);
+ __cache_lock_destroy(pCacheObj);
+
+ memset(pCacheObj, 0, sizeof(SCacheObj));
+ free(pCacheObj);
+}
+
+void* taosCacheRefresh(void *handle) {
+ SCacheObj *pCacheObj = (SCacheObj *)handle;
+ if (pCacheObj == NULL) {
+ uTrace("object is destroyed. no refresh retry");
+ return NULL;
+ }
+
+ const int32_t SLEEP_DURATION = 500; //500 ms
+ int64_t totalTick = pCacheObj->refreshTime / SLEEP_DURATION;
+
+ int64_t count = 0;
+ while(1) {
+ taosMsleep(500);
+
+ // check if current cache object will be deleted every 500ms.
+ if (pCacheObj->deleting) {
+ break;
+ }
+
+ if (++count < totalTick) {
+ continue;
+ }
+
+ // reset the count value
+ count = 0;
+ size_t num = taosHashGetSize(pCacheObj->pHashTable);
+ if (num == 0) {
+ continue;
+ }
+
+ uint64_t expiredTime = taosGetTimestampMs();
+ pCacheObj->statistics.refreshCount++;
+
+ SHashMutableIterator *pIter = taosHashCreateIter(pCacheObj->pHashTable);
+
+ __cache_wr_lock(pCacheObj);
+ while (taosHashIterNext(pIter)) {
+ SCacheDataNode *pNode = *(SCacheDataNode **)taosHashIterGet(pIter);
+ if (pNode->expiredTime <= expiredTime && T_REF_VAL_GET(pNode) <= 0) {
+ taosCacheReleaseNode(pCacheObj, pNode);
+ }
+ }
+
+ __cache_unlock(pCacheObj);
+
+ taosHashDestroyIter(pIter);
+ taosTrashCanEmpty(pCacheObj, false);
+ }
+
+ return NULL;
}
diff --git a/src/util/src/tconfig.c b/src/util/src/tconfig.c
index 203a34fd15..543a84dc44 100644
--- a/src/util/src/tconfig.c
+++ b/src/util/src/tconfig.c
@@ -242,7 +242,7 @@ void taosReadGlobalLogCfg() {
wordexp_t full_path;
wordexp(configDir, &full_path, 0);
if (full_path.we_wordv != NULL && full_path.we_wordv[0] != NULL) {
- if (strlen(full_path.we_wordv[0]) > TSDB_FILENAME_LEN - 1) {
+ if (strlen(full_path.we_wordv[0]) >= TSDB_FILENAME_LEN) {
printf("\nconfig file: %s path overflow max len %d, all variables are set to default\n", full_path.we_wordv[0], TSDB_FILENAME_LEN - 1);
wordfree(&full_path);
return;
diff --git a/src/util/src/tkvstore.c b/src/util/src/tkvstore.c
index 148d8235a6..88cd446349 100644
--- a/src/util/src/tkvstore.c
+++ b/src/util/src/tkvstore.c
@@ -34,42 +34,67 @@
#define TD_KVSTORE_SNAP_SUFFIX ".snap"
#define TD_KVSTORE_NEW_SUFFIX ".new"
+typedef struct {
+ uint64_t uid;
+ int64_t offset;
+ int64_t size;
+} SKVRecord;
+
static int tdInitKVStoreHeader(int fd, char *fname);
static void * tdEncodeStoreInfo(void *buf, SStoreInfo *pInfo);
-// static void * tdDecodeStoreInfo(void *buf, SStoreInfo *pInfo);
+static void * tdDecodeStoreInfo(void *buf, SStoreInfo *pInfo);
static SKVStore *tdNewKVStore(char *fname, iterFunc iFunc, afterFunc aFunc, void *appH);
static char * tdGetKVStoreSnapshotFname(char *fdata);
static char * tdGetKVStoreNewFname(char *fdata);
static void tdFreeKVStore(SKVStore *pStore);
static int tdUpdateKVStoreHeader(int fd, char *fname, SStoreInfo *pInfo);
+static int tdLoadKVStoreHeader(int fd, char *fname, SStoreInfo *pInfo);
+static void * tdEncodeKVRecord(void *buf, SKVRecord *pRecord);
+static void * tdDecodeKVRecord(void *buf, SKVRecord *pRecord);
+static int tdRestoreKVStore(SKVStore *pStore);
int tdCreateKVStore(char *fname) {
- char *tname = strdup(fname);
- if (tname == NULL) return TSDB_CODE_COM_OUT_OF_MEMORY;
-
int fd = open(fname, O_RDWR | O_CREAT, 0755);
if (fd < 0) {
uError("failed to open file %s since %s", fname, strerror(errno));
- return TAOS_SYSTEM_ERROR(errno);
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ return -1;
}
- int code = tdInitKVStoreHeader(fd, fname);
- if (code != TSDB_CODE_SUCCESS) return code;
+ if (tdInitKVStoreHeader(fd, fname) < 0) {
+ close(fd);
+ return -1;
+ }
if (fsync(fd) < 0) {
uError("failed to fsync file %s since %s", fname, strerror(errno));
- return TAOS_SYSTEM_ERROR(errno);
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ close(fd);
+ return -1;
}
if (close(fd) < 0) {
uError("failed to close file %s since %s", fname, strerror(errno));
- return TAOS_SYSTEM_ERROR(errno);
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ return -1;
}
- return TSDB_CODE_SUCCESS;
+ return 0;
+}
+
+int tdDestroyKVStore(char *fname) {
+ if (remove(fname) < 0) {
+ uError("failed to remove file %s since %s", fname, strerror(errno));
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ return -1;
+ }
+
+ return 0;
}
SKVStore *tdOpenKVStore(char *fname, iterFunc iFunc, afterFunc aFunc, void *appH) {
+ SStoreInfo info = {0};
+
SKVStore *pStore = tdNewKVStore(fname, iFunc, aFunc, appH);
if (pStore == NULL) return NULL;
@@ -89,14 +114,44 @@ SKVStore *tdOpenKVStore(char *fname, iterFunc iFunc, afterFunc aFunc, void *appH
goto _err;
}
- // TODO: rewind the file
+ if (tdLoadKVStoreHeader(pStore->sfd, pStore->fsnap, &info) < 0) goto _err;
+
+ if (ftruncate(pStore->fd, info.size) < 0) {
+ uError("failed to truncate %s to " PRId64 " size since %s", pStore->fname, info.size, strerror(errno));
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ goto _err;
+ }
+
+ if (tdUpdateKVStoreHeader(pStore->fd, pStore->fname, &info) < 0) goto _err;
close(pStore->sfd);
pStore->sfd = -1;
remove(pStore->fsnap);
}
- // TODO: Recover from the file
+ if (tdLoadKVStoreHeader(pStore->fd, pStore->fname, &info) < 0) goto _err;
+
+ struct stat tfstat;
+ if (fstat(pStore->fd, &tfstat) < 0) {
+ uError("failed to fstat file %s since %s", pStore->fname, strerror(errno));
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ goto _err;
+ }
+
+ ASSERT(info.size == tfstat.st_size);
+
+ if (lseek(pStore->fd, TD_KVSTORE_HEADER_SIZE, SEEK_SET) < 0) {
+ uError("failed to lseek file %s since %s", pStore->fname, strerror(errno));
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ goto _err;
+ }
+
+ pStore->info.size += TD_KVSTORE_HEADER_SIZE;
+
+ if (tdRestoreKVStore(pStore) < 0) goto _err;
+
+ close(pStore->fd);
+ pStore->fd = -1;
return pStore;
@@ -113,7 +168,11 @@ _err:
return NULL;
}
+void tdCloseKVStore(SKVStore *pStore) { tdFreeKVStore(pStore); }
+
int tdKVStoreStartCommit(SKVStore *pStore) {
+ ASSERT(pStore->fd < 0);
+
pStore->fd = open(pStore->fname, O_RDWR);
if (pStore->fd < 0) {
uError("failed to open file %s since %s", pStore->fname, strerror(errno));
@@ -147,6 +206,14 @@ int tdKVStoreStartCommit(SKVStore *pStore) {
}
pStore->sfd = -1;
+ if (lseek(pStore->fd, 0, SEEK_END) < 0) {
+ uError("failed to lseek file %s since %s", pStore->fname, strerror(errno));
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ goto _err;
+ }
+
+ ASSERT(pStore->info.size == lseek(pStore->fd, 0, SEEK_CUR));
+
return 0;
_err:
@@ -162,11 +229,50 @@ _err:
return -1;
}
+int tdUpdateKVStoreRecord(SKVStore *pStore, uint64_t uid, void *cont, int contLen) {
+ SKVRecord *pRecord = taosHashGet(pStore->map, (void *)&uid, sizeof(uid));
+ if (pRecord != NULL) {
+ pStore->info.tombSize += (pRecord->size + sizeof(SKVRecord));
+ }
+
+ // TODO
+ return 0;
+}
+
+int tdDropKVStoreRecord(SKVStore *pStore, uint64_t uid) {
+ SKVRecord rInfo = {0};
+ char buf[128] = "\0";
+
+ SKVRecord *pRecord = taosHashGet(pStore->map, &uid, sizeof(uid));
+ if (pRecord == NULL) {
+ uError("failed to drop KV store record with key " PRIu64 " since not find", uid);
+ return -1;
+ }
+
+ rInfo.offset = -pRecord->offset;
+ rInfo.uid = pRecord->uid;
+ rInfo.size = pRecord->size;
+
+ void *pBuf = tdEncodeKVRecord(buf, &rInfo);
+
+ if (twrite(pStore->fd, buf, POINTER_DISTANCE(pBuf, buf)) < POINTER_DISTANCE(pBuf, buf)) {
+ uError("failed to write %d bytes to file %s since %s", POINTER_DISTANCE(pBuf, buf), pStore->fname, strerror(errno));
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ return -1;
+ }
+
+ pStore->info.size += POINTER_DISTANCE(pBuf, buf);
+ pStore->info.nDels++;
+ pStore->info.nRecords--;
+ pStore->info.tombSize += (rInfo.size + sizeof(SKVRecord) * 2);
+
+ return 0;
+}
+
int tdKVStoreEndCommit(SKVStore *pStore) {
ASSERT(pStore->fd > 0);
- terrno = tdUpdateKVStoreHeader(pStore->fd, pStore->fname, &(pStore->info));
- if (terrno != TSDB_CODE_SUCCESS) return -1;
+ if (tdUpdateKVStoreHeader(pStore->fd, pStore->fname, &(pStore->info)) < 0) return -1;
if (fsync(pStore->fd) < 0) {
uError("failed to fsync file %s since %s", pStore->fname, strerror(errno));
@@ -179,27 +285,56 @@ int tdKVStoreEndCommit(SKVStore *pStore) {
terrno = TAOS_SYSTEM_ERROR(errno);
return -1;
}
+ pStore->fd = -1;
remove(pStore->fsnap);
return 0;
}
+static int tdLoadKVStoreHeader(int fd, char *fname, SStoreInfo *pInfo) {
+ char buf[TD_KVSTORE_HEADER_SIZE] = "\0";
+
+ if (lseek(fd, 0, SEEK_SET) < 0) {
+ uError("failed to lseek file %s since %s", fname, strerror(errno));
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ return -1;
+ }
+
+ if (tread(fd, buf, TD_KVSTORE_HEADER_SIZE) < TD_KVSTORE_HEADER_SIZE) {
+ uError("failed to read %d bytes from file %s since %s", TD_KVSTORE_HEADER_SIZE, fname, strerror(errno));
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ return -1;
+ }
+
+ if (!taosCheckChecksumWhole((uint8_t *)buf, TD_KVSTORE_HEADER_SIZE)) {
+ uError("file %s is broken", fname);
+ terrno = TSDB_CODE_COM_FILE_CORRUPTED;
+ return -1;
+ }
+
+ tdDecodeStoreInfo(buf, pInfo);
+
+ return 0;
+}
+
static int tdUpdateKVStoreHeader(int fd, char *fname, SStoreInfo *pInfo) {
char buf[TD_KVSTORE_HEADER_SIZE] = "\0";
if (lseek(fd, 0, SEEK_SET) < 0) {
uError("failed to lseek file %s since %s", fname, strerror(errno));
- return TAOS_SYSTEM_ERROR(errno);
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ return -1;
}
tdEncodeStoreInfo(buf, pInfo);
taosCalcChecksumAppend(0, (uint8_t *)buf, TD_KVSTORE_HEADER_SIZE);
if (twrite(fd, buf, TD_KVSTORE_HEADER_SIZE) < TD_KVSTORE_HEADER_SIZE) {
- uError("failed to write file %s %d bytes since %s", fname, TD_KVSTORE_HEADER_SIZE, strerror(errno));
- return TAOS_SYSTEM_ERROR(errno);
+ uError("failed to write %d bytes to file %s since %s", TD_KVSTORE_HEADER_SIZE, fname, strerror(errno));
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ return -1;
}
- return TSDB_CODE_SUCCESS;
+ return 0;
}
static int tdInitKVStoreHeader(int fd, char *fname) {
@@ -217,21 +352,24 @@ static void *tdEncodeStoreInfo(void *buf, SStoreInfo *pInfo) {
return buf;
}
-// static void *tdDecodeStoreInfo(void *buf, SStoreInfo *pInfo) {
-// buf = taosDecodeVariantI64(buf, &(pInfo->size));
-// buf = taosDecodeVariantI64(buf, &(pInfo->tombSize));
-// buf = taosDecodeVariantI64(buf, &(pInfo->nRecords));
-// buf = taosDecodeVariantI64(buf, &(pInfo->nDels));
+static void *tdDecodeStoreInfo(void *buf, SStoreInfo *pInfo) {
+ buf = taosDecodeVariantI64(buf, &(pInfo->size));
+ buf = taosDecodeVariantI64(buf, &(pInfo->tombSize));
+ buf = taosDecodeVariantI64(buf, &(pInfo->nRecords));
+ buf = taosDecodeVariantI64(buf, &(pInfo->nDels));
-// return buf;
-// }
+ return buf;
+}
static SKVStore *tdNewKVStore(char *fname, iterFunc iFunc, afterFunc aFunc, void *appH) {
SKVStore *pStore = (SKVStore *)malloc(sizeof(SKVStore));
if (pStore == NULL) goto _err;
pStore->fname = strdup(fname);
- if (pStore->map == NULL) goto _err;
+ if (pStore->map == NULL) {
+ terrno = TSDB_CODE_COM_OUT_OF_MEMORY;
+ goto _err;
+ }
pStore->fsnap = tdGetKVStoreSnapshotFname(fname);
if (pStore->fsnap == NULL) goto _err;
@@ -254,7 +392,6 @@ static SKVStore *tdNewKVStore(char *fname, iterFunc iFunc, afterFunc aFunc, void
return pStore;
_err:
- terrno = TSDB_CODE_COM_OUT_OF_MEMORY;
tdFreeKVStore(pStore);
return NULL;
}
@@ -289,4 +426,116 @@ static char *tdGetKVStoreNewFname(char *fdata) {
}
sprintf(fname, "%s%s", fdata, TD_KVSTORE_NEW_SUFFIX);
return fname;
+}
+
+static void *tdEncodeKVRecord(void *buf, SKVRecord *pRecord) {
+ buf = taosEncodeFixedU64(buf, pRecord->uid);
+ buf = taosEncodeFixedI64(buf, pRecord->offset);
+ buf = taosEncodeFixedI64(buf, pRecord->size);
+
+ return buf;
+}
+
+static void *tdDecodeKVRecord(void *buf, SKVRecord *pRecord) {
+ buf = taosDecodeFixedU64(buf, &(pRecord->uid));
+ buf = taosDecodeFixedI64(buf, &(pRecord->offset));
+ buf = taosDecodeFixedI64(buf, &(pRecord->size));
+
+ return buf;
+}
+
+static int tdRestoreKVStore(SKVStore *pStore) {
+ char tbuf[128] = "\0";
+ void * buf = NULL;
+ int maxBufSize = 0;
+ SKVRecord rInfo = {0};
+
+ ASSERT(TD_KVSTORE_HEADER_SIZE == lseek(pStore->fd, 0, SEEK_CUR));
+
+ while (true) {
+ ssize_t tsize = tread(pStore->fd, tbuf, sizeof(SKVRecord));
+ if (tsize == 0) break;
+ if (tsize < sizeof(SKVRecord)) {
+ uError("failed to read %d bytes from file %s since %s", sizeof(SKVRecord), pStore->fname, strerror(errno));
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ goto _err;
+ }
+
+ char *pBuf = tdDecodeKVRecord(tbuf, &rInfo);
+ ASSERT(POINTER_DISTANCE(pBuf, tbuf) == sizeof(SKVRecord));
+ ASSERT(rInfo.offset > 0 ? pStore->info.size == rInfo.offset : true);
+
+ if (rInfo.offset < 0) {
+ taosHashRemove(pStore->map, (void *)(&rInfo.uid), sizeof(rInfo.uid));
+ pStore->info.size += sizeof(SKVRecord);
+ pStore->info.nRecords--;
+ pStore->info.nDels++;
+ pStore->info.tombSize += (rInfo.size + sizeof(SKVRecord) + sizeof(SKVRecord));
+ } else {
+ // TODO: add statistics
+ ASSERT(rInfo.offset > 0 && rInfo.size > 0);
+ if (taosHashPut(pStore->map, (void *)(&rInfo.uid), sizeof(rInfo.uid), &rInfo, sizeof(rInfo)) < 0) {
+ uError("failed to put record in KV store %s", pStore->fname);
+ terrno = TSDB_CODE_COM_OUT_OF_MEMORY;
+ goto _err;
+ }
+
+ maxBufSize = MAX(maxBufSize, rInfo.size);
+
+ if (lseek(pStore->fd, rInfo.size, SEEK_CUR) < 0) {
+ uError("failed to lseek file %s since %s", pStore->fname, strerror(errno));
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ goto _err;
+ }
+ }
+ }
+
+ buf = malloc(maxBufSize);
+ if (buf == NULL) {
+ uError("failed to allocate %d bytes in KV store %s", maxBufSize, pStore->fname);
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ goto _err;
+ }
+
+ SHashMutableIterator *pIter = taosHashCreateIter(pStore->map);
+ if (pIter == NULL) {
+ uError("failed to create hash iter while opening KV store %s", pStore->fname);
+ terrno = TSDB_CODE_COM_OUT_OF_MEMORY;
+ goto _err;
+ }
+
+ while (taosHashIterNext(pIter)) {
+ SKVRecord *pRecord = taosHashIterGet(pIter);
+
+ if (lseek(pStore->fd, pRecord->offset, SEEK_SET) < 0) {
+ uError("failed to lseek file %s since %s", pStore->fname, strerror(errno));
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ goto _err;
+ }
+
+ if (tread(pStore->fd, buf, pRecord->size) < pRecord->size) {
+ uError("failed to read %d bytes from file %s since %s", pRecord->size, pStore->fname, strerror(errno));
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ goto _err;
+ }
+
+ if (!taosCheckChecksumWhole((uint8_t *)buf, pRecord->size)) {
+ uError("file %s has checksum error, offset " PRId64 " size %d", pStore->fname, pRecord->offset, pRecord->size);
+ terrno = TSDB_CODE_COM_FILE_CORRUPTED;
+ goto _err;
+ }
+
+ if (pStore->iFunc) (*pStore->iFunc)(pStore->appH, buf, pRecord->size);
+ }
+
+ taosHashDestroyIter(pIter);
+
+ if (pStore->aFunc) (*pStore->aFunc)(pStore->appH);
+
+ tfree(buf);
+ return 0;
+
+_err:
+ tfree(buf);
+ return -1;
}
\ No newline at end of file
diff --git a/src/util/src/tsched.c b/src/util/src/tsched.c
index 898ab70876..6fc767fd3b 100644
--- a/src/util/src/tsched.c
+++ b/src/util/src/tsched.c
@@ -15,6 +15,7 @@
#include "os.h"
#include "taosdef.h"
+#include "tutil.h"
#include "tulog.h"
#include "tsched.h"
#include "ttimer.h"
@@ -62,8 +63,7 @@ void *taosInitScheduler(int queueSize, int numOfThreads, const char *label) {
}
pSched->queueSize = queueSize;
- strncpy(pSched->label, label, sizeof(pSched->label)); // fix buffer overflow
- pSched->label[sizeof(pSched->label)-1] = '\0';
+ tstrncpy(pSched->label, label, sizeof(pSched->label)); // fix buffer overflow
pSched->fullSlot = 0;
pSched->emptySlot = 0;
diff --git a/src/util/src/tsocket.c b/src/util/src/tsocket.c
index 00c8bba94e..1f4d57115b 100644
--- a/src/util/src/tsocket.c
+++ b/src/util/src/tsocket.c
@@ -29,11 +29,12 @@ int taosGetFqdn(char *fqdn) {
hints.ai_flags = AI_CANONNAME;
- getaddrinfo(hostname, NULL, &hints, &result);
+ int32_t ret = getaddrinfo(hostname, NULL, &hints, &result);
if (result) {
strcpy(fqdn, result->ai_canonname);
freeaddrinfo(result);
} else {
+ uError("failed to get fqdn, code:%d, reason:%s", ret, gai_strerror(ret));
code = -1;
}
@@ -42,9 +43,12 @@ int taosGetFqdn(char *fqdn) {
uint32_t taosGetIpFromFqdn(const char *fqdn) {
struct addrinfo hints = {0};
+ hints.ai_family = AF_UNSPEC;
+ hints.ai_socktype = SOCK_STREAM;
+
struct addrinfo *result = NULL;
- getaddrinfo(fqdn, NULL, &hints, &result);
+ int32_t ret = getaddrinfo(fqdn, NULL, &hints, &result);
if (result) {
struct sockaddr *sa = result->ai_addr;
struct sockaddr_in *si = (struct sockaddr_in*)sa;
@@ -53,7 +57,8 @@ uint32_t taosGetIpFromFqdn(const char *fqdn) {
freeaddrinfo(result);
return ip;
} else {
- return -1;
+ uError("failed get the ip address, fqdn:%s, code:%d, reason:%s", fqdn, ret, gai_strerror(ret));
+ return 0xFFFFFFFF;
}
}
diff --git a/src/util/src/ttime.c b/src/util/src/ttime.c
index 8355ed9dc1..4dd6360752 100644
--- a/src/util/src/ttime.c
+++ b/src/util/src/ttime.c
@@ -24,7 +24,6 @@
#include "taosdef.h"
#include "ttime.h"
#include "tutil.h"
-
/*
* mktime64 - Converts date to seconds.
* Converts Gregorian date to seconds since 1970-01-01 00:00:00.
@@ -119,15 +118,21 @@ static int month[12] = {
static int64_t parseFraction(char* str, char** end, int32_t timePrec);
static int32_t parseTimeWithTz(char* timestr, int64_t* time, int32_t timePrec);
static int32_t parseLocaltime(char* timestr, int64_t* time, int32_t timePrec);
+static int32_t parseLocaltimeWithDst(char* timestr, int64_t* time, int32_t timePrec);
+
+static int32_t (*parseLocaltimeFp[]) (char* timestr, int64_t* time, int32_t timePrec) = {
+ parseLocaltime,
+ parseLocaltimeWithDst
+};
int32_t taosGetTimestampSec() { return (int32_t)time(NULL); }
-int32_t taosParseTime(char* timestr, int64_t* time, int32_t len, int32_t timePrec) {
+int32_t taosParseTime(char* timestr, int64_t* time, int32_t len, int32_t timePrec, int8_t daylight) {
/* parse datatime string in with tz */
if (strnchr(timestr, 'T', len, false) != NULL) {
return parseTimeWithTz(timestr, time, timePrec);
} else {
- return parseLocaltime(timestr, time, timePrec);
+ return (*parseLocaltimeFp[daylight])(timestr, time, timePrec);
}
}
@@ -304,9 +309,6 @@ int32_t parseLocaltime(char* timestr, int64_t* time, int32_t timePrec) {
return -1;
}
- /* mktime will be affected by TZ, set by using taos_options */
- //int64_t seconds = mktime(&tm);
- //int64_t seconds = (int64_t)user_mktime(&tm);
int64_t seconds = user_mktime64(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
int64_t fraction = 0;
@@ -324,6 +326,32 @@ int32_t parseLocaltime(char* timestr, int64_t* time, int32_t timePrec) {
return 0;
}
+int32_t parseLocaltimeWithDst(char* timestr, int64_t* time, int32_t timePrec) {
+ *time = 0;
+ struct tm tm = {0};
+ tm.tm_isdst = -1;
+
+ char* str = strptime(timestr, "%Y-%m-%d %H:%M:%S", &tm);
+ if (str == NULL) {
+ return -1;
+ }
+
+ /* mktime will be affected by TZ, set by using taos_options */
+ int64_t seconds = mktime(&tm);
+
+ int64_t fraction = 0;
+
+ if (*str == '.') {
+ /* parse the second fraction part */
+ if ((fraction = parseFraction(str + 1, &str, timePrec)) < 0) {
+ return -1;
+ }
+ }
+
+ int64_t factor = (timePrec == TSDB_TIME_PRECISION_MILLI) ? 1000 : 1000000;
+ *time = factor * seconds + fraction;
+ return 0;
+}
static int32_t getTimestampInUsFromStrImpl(int64_t val, char unit, int64_t* result) {
*result = val;
diff --git a/src/util/src/ttimer.c b/src/util/src/ttimer.c
index 68db574d82..cdb2c6c0be 100644
--- a/src/util/src/ttimer.c
+++ b/src/util/src/ttimer.c
@@ -552,8 +552,7 @@ void* taosTmrInit(int maxNumOfTmrs, int resolution, int longest, const char* lab
return NULL;
}
- strncpy(ctrl->label, label, sizeof(ctrl->label));
- ctrl->label[sizeof(ctrl->label) - 1] = 0;
+ tstrncpy(ctrl->label, label, sizeof(ctrl->label));
tmrTrace("%s timer controller is initialized, number of timer controllers: %d.", label, numOfTmrCtrl);
return ctrl;
}
diff --git a/src/util/src/tutil.c b/src/util/src/tutil.c
index c524a61591..9209117415 100644
--- a/src/util/src/tutil.c
+++ b/src/util/src/tutil.c
@@ -582,13 +582,13 @@ bool taosGetVersionNumber(char *versionStr, int *versionNubmer) {
}
int taosCheckVersion(char *input_client_version, char *input_server_version, int comparedSegments) {
- char client_version[64] = {0};
- char server_version[64] = {0};
+ char client_version[TSDB_VERSION_LEN] = {0};
+ char server_version[TSDB_VERSION_LEN] = {0};
int clientVersionNumber[4] = {0};
int serverVersionNumber[4] = {0};
- strcpy(client_version, input_client_version);
- strcpy(server_version, input_server_version);
+ tstrncpy(client_version, input_client_version, sizeof(client_version));
+ tstrncpy(server_version, input_server_version, sizeof(server_version));
if (!taosGetVersionNumber(client_version, clientVersionNumber)) {
uError("invalid client version:%s", client_version);
diff --git a/src/util/tests/cacheTest.cpp b/src/util/tests/cacheTest.cpp
index b636bcfac0..5762d5700b 100644
--- a/src/util/tests/cacheTest.cpp
+++ b/src/util/tests/cacheTest.cpp
@@ -19,8 +19,7 @@ int32_t tsMaxMeterConnections = 200;
// test cache
TEST(testCase, client_cache_test) {
const int32_t REFRESH_TIME_IN_SEC = 2;
- void* tscTmr = taosTmrInit (tsMaxMgmtConnections*2, 200, 6000, "TSC");
- SCacheObj* tscCacheHandle = taosCacheInit(tscTmr, REFRESH_TIME_IN_SEC);
+ SCacheObj* tscCacheHandle = taosCacheInit(REFRESH_TIME_IN_SEC);
const char* key1 = "test1";
char data1[] = "test11";
@@ -106,9 +105,7 @@ TEST(testCase, client_cache_test) {
TEST(testCase, cache_resize_test) {
const int32_t REFRESH_TIME_IN_SEC = 2;
- void* tscTmr = taosTmrInit (1000*2, 200, 6000, "TSC");
-
- auto* pCache = taosCacheInit(tscTmr, REFRESH_TIME_IN_SEC);
+ auto* pCache = taosCacheInit(REFRESH_TIME_IN_SEC);
char key[256] = {0};
char data[1024] = "abcdefghijk";
diff --git a/src/vnode/inc/vnodeInt.h b/src/vnode/inc/vnodeInt.h
index 7c95e81cf5..ab74e329e6 100644
--- a/src/vnode/inc/vnodeInt.h
+++ b/src/vnode/inc/vnodeInt.h
@@ -51,7 +51,7 @@ typedef struct {
SSyncCfg syncCfg;
SWalCfg walCfg;
char *rootDir;
- char db[TSDB_DB_NAME_LEN + 1];
+ char db[TSDB_DB_NAME_LEN];
} SVnodeObj;
int vnodeWriteToQueue(void *param, void *pHead, int type);
diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c
index 9ec982b1de..0882ee983d 100644
--- a/src/vnode/src/vnodeMain.c
+++ b/src/vnode/src/vnodeMain.c
@@ -321,6 +321,22 @@ void vnodeRelease(void *pVnodeRaw) {
return;
}
+ if (pVnode->tsdb)
+ tsdbCloseRepo(pVnode->tsdb, 1);
+ pVnode->tsdb = NULL;
+
+ if (pVnode->wal)
+ walClose(pVnode->wal);
+ pVnode->wal = NULL;
+
+ if (pVnode->wqueue)
+ dnodeFreeVnodeWqueue(pVnode->wqueue);
+ pVnode->wqueue = NULL;
+
+ if (pVnode->rqueue)
+ dnodeFreeVnodeRqueue(pVnode->rqueue);
+ pVnode->rqueue = NULL;
+
tfree(pVnode->rootDir);
if (pVnode->status == TAOS_VN_STATUS_DELETING) {
@@ -381,16 +397,18 @@ void *vnodeGetWal(void *pVnode) {
static void vnodeBuildVloadMsg(SVnodeObj *pVnode, SDMStatusMsg *pStatus) {
if (pVnode->status == TAOS_VN_STATUS_DELETING) return;
if (pStatus->openVnodes >= TSDB_MAX_VNODES) return;
+ int64_t totalStorage, compStorage, pointsWritten = 0;
+ tsdbReportStat(pVnode->tsdb, &pointsWritten, &totalStorage, &compStorage);
SVnodeLoad *pLoad = &pStatus->load[pStatus->openVnodes++];
pLoad->vgId = htonl(pVnode->vgId);
pLoad->cfgVersion = htonl(pVnode->cfgVersion);
- pLoad->totalStorage = htobe64(pLoad->totalStorage);
- pLoad->compStorage = htobe64(pLoad->compStorage);
- pLoad->pointsWritten = htobe64(pLoad->pointsWritten);
+ pLoad->totalStorage = htobe64(totalStorage);
+ pLoad->compStorage = htobe64(compStorage);
+ pLoad->pointsWritten = htobe64(pointsWritten);
pLoad->status = pVnode->status;
pLoad->role = pVnode->role;
- pLoad->replica = pVnode->syncCfg.replica;
+ pLoad->replica = pVnode->syncCfg.replica;
}
void vnodeBuildStatusMsg(void *param) {
@@ -409,33 +427,21 @@ void vnodeBuildStatusMsg(void *param) {
}
static void vnodeCleanUp(SVnodeObj *pVnode) {
+ // remove from hash, so new messages wont be consumed
taosHashRemove(tsDnodeVnodesHash, (const char *)&pVnode->vgId, sizeof(int32_t));
+ // stop replication module
if (pVnode->sync) {
syncStop(pVnode->sync);
pVnode->sync = NULL;
}
- if (pVnode->tsdb)
- tsdbCloseRepo(pVnode->tsdb, 1);
- pVnode->tsdb = NULL;
-
- if (pVnode->wal)
- walClose(pVnode->wal);
- pVnode->wal = NULL;
-
+ // stop continuous query
if (pVnode->cq)
cqClose(pVnode->cq);
pVnode->cq = NULL;
- if (pVnode->wqueue)
- dnodeFreeVnodeWqueue(pVnode->wqueue);
- pVnode->wqueue = NULL;
-
- if (pVnode->rqueue)
- dnodeFreeVnodeRqueue(pVnode->rqueue);
- pVnode->rqueue = NULL;
-
+ // release local resources only after cutting off outside connections
vnodeRelease(pVnode);
}
@@ -551,6 +557,7 @@ static int32_t vnodeSaveCfg(SMDCreateVnodeMsg *pVnodeCfg) {
len += snprintf(content + len, maxLen - len, "}\n");
fwrite(content, 1, len, fp);
+ fflush(fp);
fclose(fp);
free(content);
diff --git a/src/vnode/src/vnodeRead.c b/src/vnode/src/vnodeRead.c
index f198c2ffe4..1e770d8d27 100644
--- a/src/vnode/src/vnodeRead.c
+++ b/src/vnode/src/vnodeRead.c
@@ -27,17 +27,18 @@
#include "vnodeLog.h"
#include "query.h"
-static int32_t (*vnodeProcessReadMsgFp[TSDB_MSG_TYPE_MAX])(SVnodeObj *, void *pCont, int32_t contLen, SRspRet *pRet);
-static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, void *pCont, int32_t contLen, SRspRet *pRet);
-static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, void *pCont, int32_t contLen, SRspRet *pRet);
+static int32_t (*vnodeProcessReadMsgFp[TSDB_MSG_TYPE_MAX])(SVnodeObj *pVnode, SReadMsg *pReadMsg);
+static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg);
+static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg);
void vnodeInitReadFp(void) {
vnodeProcessReadMsgFp[TSDB_MSG_TYPE_QUERY] = vnodeProcessQueryMsg;
vnodeProcessReadMsgFp[TSDB_MSG_TYPE_FETCH] = vnodeProcessFetchMsg;
}
-int32_t vnodeProcessRead(void *param, int msgType, void *pCont, int32_t contLen, SRspRet *ret) {
+int32_t vnodeProcessRead(void *param, SReadMsg *pReadMsg) {
SVnodeObj *pVnode = (SVnodeObj *)param;
+ int msgType = pReadMsg->rpcMsg.msgType;
if (vnodeProcessReadMsgFp[msgType] == NULL) {
vTrace("vgId:%d, msgType:%s not processed, no handle", pVnode->vgId, taosMsg[msgType]);
@@ -55,16 +56,46 @@ int32_t vnodeProcessRead(void *param, int msgType, void *pCont, int32_t contLen,
return TSDB_CODE_RPC_NOT_READY;
}
- return (*vnodeProcessReadMsgFp[msgType])(pVnode, pCont, contLen, ret);
+ return (*vnodeProcessReadMsgFp[msgType])(pVnode, pReadMsg);
}
-static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, void *pCont, int32_t contLen, SRspRet *pRet) {
+// notify connection(handle) that current qhandle is created, if current connection from
+// client is broken, the query needs to be killed immediately.
+static int32_t vnodeNotifyCurrentQhandle(void* handle, void* qhandle, int32_t vgId) {
+ SRetrieveTableMsg* killQueryMsg = rpcMallocCont(sizeof(SRetrieveTableMsg));
+ killQueryMsg->qhandle = htobe64((uint64_t) qhandle);
+ killQueryMsg->free = htons(1);
+ killQueryMsg->header.vgId = htonl(vgId);
+ killQueryMsg->header.contLen = htonl(sizeof(SRetrieveTableMsg));
+
+ vTrace("QInfo:%p register qhandle to connect:%p", qhandle, handle);
+ return rpcReportProgress(handle, (char*) killQueryMsg, sizeof(SRetrieveTableMsg));
+}
+
+static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
+ void * pCont = pReadMsg->pCont;
+ int32_t contLen = pReadMsg->contLen;
+ SRspRet *pRet = &pReadMsg->rspRet;
+
SQueryTableMsg* pQueryTableMsg = (SQueryTableMsg*) pCont;
memset(pRet, 0, sizeof(SRspRet));
- int32_t code = TSDB_CODE_SUCCESS;
+ // qHandle needs to be freed correctly
+ if (pReadMsg->rpcMsg.code == TSDB_CODE_RPC_NETWORK_UNAVAIL) {
+ SRetrieveTableMsg* killQueryMsg = (SRetrieveTableMsg*) pReadMsg->pCont;
+ killQueryMsg->free = htons(killQueryMsg->free);
+ killQueryMsg->qhandle = htobe64(killQueryMsg->qhandle);
+ vWarn("QInfo:%p connection %p broken, kill query", killQueryMsg->qhandle, pReadMsg->rpcMsg.handle);
+ assert(pReadMsg->rpcMsg.contLen > 0 && killQueryMsg->free == 1);
+
+ qKillQuery((qinfo_t) killQueryMsg->qhandle);
+ return TSDB_CODE_TSC_QUERY_CANCELLED; // todo change the error code
+ }
+
+ int32_t code = TSDB_CODE_SUCCESS;
qinfo_t pQInfo = NULL;
+
if (contLen != 0) {
code = qCreateQueryInfo(pVnode->tsdb, pVnode->vgId, pQueryTableMsg, &pQInfo);
@@ -74,7 +105,19 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, void *pCont, int32_t cont
pRet->len = sizeof(SQueryTableRsp);
pRet->rsp = pRsp;
-
+
+ // current connect is broken
+ if (vnodeNotifyCurrentQhandle(pReadMsg->rpcMsg.handle, pQInfo, pVnode->vgId) != TSDB_CODE_SUCCESS) {
+ vError("vgId:%d, QInfo:%p, dnode query discarded since link is broken, %p", pVnode->vgId, pQInfo, pReadMsg->rpcMsg.handle);
+ pRsp->code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
+
+ //NOTE: there two refcount, needs to kill twice, todo refactor
+ qKillQuery(pQInfo);
+ qKillQuery(pQInfo);
+
+ return pRsp->code;
+ }
+
vTrace("vgId:%d, QInfo:%p, dnode query msg disposed", pVnode->vgId, pQInfo);
} else {
assert(pCont != NULL);
@@ -91,13 +134,34 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, void *pCont, int32_t cont
return code;
}
-static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, void *pCont, int32_t contLen, SRspRet *pRet) {
+static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
+ void * pCont = pReadMsg->pCont;
+ SRspRet *pRet = &pReadMsg->rspRet;
+
SRetrieveTableMsg *pRetrieve = pCont;
void *pQInfo = (void*) htobe64(pRetrieve->qhandle);
+ pRetrieve->free = htons(pRetrieve->free);
+
memset(pRet, 0, sizeof(SRspRet));
+ if (pRetrieve->free == 1) {
+ vTrace("vgId:%d, QInfo:%p, retrieve msg received to kill query and free qhandle", pVnode->vgId, pQInfo);
+ int32_t ret = qKillQuery(pQInfo);
+
+ pRet->rsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp));
+ pRet->len = sizeof(SRetrieveTableRsp);
+
+ memset(pRet->rsp, 0, sizeof(SRetrieveTableRsp));
+ SRetrieveTableRsp* pRsp = pRet->rsp;
+ pRsp->numOfRows = 0;
+ pRsp->completed = true;
+ pRsp->useconds = 0;
+
+ return ret;
+ }
+
vTrace("vgId:%d, QInfo:%p, retrieve msg is received", pVnode->vgId, pQInfo);
-
+
int32_t code = qRetrieveQueryResultInfo(pQInfo);
if (code != TSDB_CODE_SUCCESS) {
//TODO
@@ -110,8 +174,7 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, void *pCont, int32_t cont
if (qHasMoreResultsToRetrieve(pQInfo)) {
pRet->qhandle = pQInfo;
code = TSDB_CODE_VND_ACTION_NEED_REPROCESSED;
- } else {
- // no further execution invoked, release the ref to vnode
+ } else { // no further execution invoked, release the ref to vnode
qDestroyQueryInfo(pQInfo);
vnodeRelease(pVnode);
}
diff --git a/src/wal/src/walMain.c b/src/wal/src/walMain.c
index 09225984c4..b05b0db4c9 100644
--- a/src/wal/src/walMain.c
+++ b/src/wal/src/walMain.c
@@ -44,7 +44,7 @@ typedef struct {
uint32_t id; // increase continuously
int num; // number of wal files
char path[TSDB_FILENAME_LEN];
- char name[TSDB_FILENAME_LEN];
+ char name[TSDB_FILENAME_LEN+16];
pthread_mutex_t mutex;
} SWal;
@@ -108,7 +108,7 @@ void walClose(void *handle) {
if (pWal->keep == 0) {
// remove all files in the directory
for (int i=0; inum; ++i) {
- sprintf(pWal->name, "%s/%s%d", pWal->path, walPrefix, pWal->id-i);
+ snprintf(pWal->name, sizeof(pWal->name), "%s/%s%d", pWal->path, walPrefix, pWal->id-i);
if (remove(pWal->name) <0) {
wError("wal:%s, failed to remove", pWal->name);
} else {
@@ -140,7 +140,7 @@ int walRenew(void *handle) {
pWal->num++;
- sprintf(pWal->name, "%s/%s%d", pWal->path, walPrefix, pWal->id);
+ snprintf(pWal->name, sizeof(pWal->name), "%s/%s%d", pWal->path, walPrefix, pWal->id);
pWal->fd = open(pWal->name, O_WRONLY | O_CREAT, S_IRWXU | S_IRWXG | S_IRWXO);
if (pWal->fd < 0) {
@@ -152,7 +152,7 @@ int walRenew(void *handle) {
if (pWal->num > pWal->max) {
// remove the oldest wal file
char name[TSDB_FILENAME_LEN * 3];
- sprintf(name, "%s/%s%d", pWal->path, walPrefix, pWal->id - pWal->max);
+ snprintf(name, sizeof(name), "%s/%s%d", pWal->path, walPrefix, pWal->id - pWal->max);
if (remove(name) <0) {
wError("wal:%s, failed to remove(%s)", name, strerror(errno));
} else {
@@ -214,7 +214,7 @@ int walRestore(void *handle, void *pVnode, int (*writeFp)(void *, void *, int))
int plen = strlen(walPrefix);
char opath[TSDB_FILENAME_LEN+5];
- int slen = sprintf(opath, "%s", pWal->path);
+ int slen = snprintf(opath, sizeof(opath), "%s", pWal->path);
if ( pWal->keep == 0)
strcpy(opath+slen, "/old");
@@ -245,7 +245,7 @@ int walRestore(void *handle, void *pVnode, int (*writeFp)(void *, void *, int))
wTrace("wal:%s, %d files will be restored", opath, count);
for (index = minId; index<=maxId; ++index) {
- sprintf(pWal->name, "%s/%s%d", opath, walPrefix, index);
+ snprintf(pWal->name, sizeof(pWal->name), "%s/%s%d", opath, walPrefix, index);
terrno = walRestoreWalFile(pWal, pVnode, writeFp);
if (terrno < 0) break;
}
@@ -264,7 +264,7 @@ int walRestore(void *handle, void *pVnode, int (*writeFp)(void *, void *, int))
// open the existing WAL file in append mode
pWal->num = count;
pWal->id = maxId;
- sprintf(pWal->name, "%s/%s%d", opath, walPrefix, maxId);
+ snprintf(pWal->name, sizeof(pWal->name), "%s/%s%d", opath, walPrefix, maxId);
pWal->fd = open(pWal->name, O_WRONLY | O_CREAT | O_APPEND, S_IRWXU | S_IRWXG | S_IRWXO);
if (pWal->fd < 0) {
wError("wal:%s, failed to open file(%s)", pWal->name, strerror(errno));
@@ -361,7 +361,7 @@ int walHandleExistingFiles(const char *path) {
char nname[TSDB_FILENAME_LEN * 3];
char opath[TSDB_FILENAME_LEN];
- sprintf(opath, "%s/old", path);
+ snprintf(opath, sizeof(opath), "%s/old", path);
struct dirent *ent;
DIR *dir = opendir(path);
@@ -377,8 +377,8 @@ int walHandleExistingFiles(const char *path) {
int count = 0;
while ((ent = readdir(dir))!= NULL) {
if ( strncmp(ent->d_name, walPrefix, plen) == 0) {
- sprintf(oname, "%s/%s", path, ent->d_name);
- sprintf(nname, "%s/old/%s", path, ent->d_name);
+ snprintf(oname, sizeof(oname), "%s/%s", path, ent->d_name);
+ snprintf(nname, sizeof(nname), "%s/old/%s", path, ent->d_name);
if (access(opath, F_OK) != 0) {
if (mkdir(opath, 0755) != 0) {
wError("wal:%s, failed to create directory:%s(%s)", oname, opath, strerror(errno));
@@ -416,7 +416,7 @@ static int walRemoveWalFiles(const char *path) {
while ((ent = readdir(dir))!= NULL) {
if ( strncmp(ent->d_name, walPrefix, plen) == 0) {
- sprintf(name, "%s/%s", path, ent->d_name);
+ snprintf(name, sizeof(name), "%s/%s", path, ent->d_name);
if (remove(name) <0) {
wError("wal:%s, failed to remove(%s)", name, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
diff --git a/tests/examples/c/demo.c b/tests/examples/c/demo.c
index 55a19eb5f9..34c785a271 100644
--- a/tests/examples/c/demo.c
+++ b/tests/examples/c/demo.c
@@ -30,7 +30,7 @@ static int32_t doQuery(TAOS* taos, const char* sql) {
TAOS_RES* res = taos_query(taos, sql);
if (taos_errno(res) != 0) {
- printf("failed to execute query, reason:%s\n", taos_errstr(res));
+ printf("failed to execute query, reason:%s\n", taos_errstr(taos));
return -1;
}
@@ -77,7 +77,7 @@ static __attribute__((unused)) void multiThreadTest(int32_t numOfThreads, void*
pthread_attr_init(&thattr);
pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
- pthread_t* threadId = malloc(sizeof(pthread_t)*numOfThreads);
+ pthread_t* threadId = (pthread_t*)malloc(sizeof(pthread_t)*(uint32_t)numOfThreads);
for (int i = 0; i < numOfThreads; ++i) {
pthread_create(&threadId[i], NULL, oneLoader, conn);
@@ -115,15 +115,15 @@ int main(int argc, char *argv[]) {
printf("success to connect to server\n");
// doQuery(taos, "select c1,count(*) from group_db0.group_mt0 where c1<8 group by c1");
- doQuery(taos, "select * from test.m1");
+// doQuery(taos, "select * from test.m1");
// multiThreadTest(1, taos);
// doQuery(taos, "select tbname from test.m1");
// doQuery(taos, "select max(c1), min(c2), sum(c3), avg(c4), first(c7), last(c8), first(c9) from lm2_db0.lm2_stb0 where ts >= 1537146000000 and ts <= 1543145400000 and tbname in ('lm2_tb0') interval(1s) group by t1");
// doQuery(taos, "select max(c1), min(c2), sum(c3), avg(c4), first(c7), last(c8), first(c9) from lm2_db0.lm2_stb0 where ts >= 1537146000000 and ts <= 1543145400000 and tbname in ('lm2_tb0', 'lm2_tb1', 'lm2_tb2') interval(1s)");
-// for(int32_t i = 0; i < 100000; ++i) {
-// doQuery(taos, "insert into t1 values(now, 2)");
-// }
+ for(int32_t i = 0; i < 200; ++i) {
+ doQuery(taos, "select * from lm2_db0.lm2_stb0");
+ }
// doQuery(taos, "create table t1(ts timestamp, k binary(12), f nchar(2))");
taos_close(taos);
diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh
index 99dc658d71..58ab8b99b4 100755
--- a/tests/pytest/fulltest.sh
+++ b/tests/pytest/fulltest.sh
@@ -134,3 +134,11 @@ python3 ./test.py -f query/filter.py
python3 ./test.py -f query/filterCombo.py
python3 ./test.py -f query/queryNormal.py
python3 ./test.py -f query/queryError.py
+python3 ./test.py -f query/filterAllIntTypes.py
+python3 ./test.py -f query/filterFloatAndDouble.py
+python3 ./test.py -f query/filterOtherTypes.py
+python3 ./test.py -f query/querySort.py
+
+#stream
+python3 ./test.py -f stream/stream1.py
+python3 ./test.py -f stream/stream2.py
diff --git a/tests/pytest/query/filterAllIntTypes.py b/tests/pytest/query/filterAllIntTypes.py
new file mode 100644
index 0000000000..32e635d6da
--- /dev/null
+++ b/tests/pytest/query/filterAllIntTypes.py
@@ -0,0 +1,117 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import *
+from util.cases import *
+from util.sql import *
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor())
+
+ self.powers = [7, 15, 31, 63]
+ self.types = ["tinyint", "smallint", "int", "bigint"]
+ self.rowNum = 10
+ self.ts = 1537146000000
+
+ def run(self):
+ tdSql.prepare()
+
+ for i in range(len(self.powers)):
+ curType = self.types[i]
+ print("======= Verify filter for %s type =========" % (curType))
+ tdLog.debug(
+ "create table st%s(ts timestamp, num %s) tags(id %s)" % (curType, curType, curType))
+ tdSql.execute(
+ "create table st%s(ts timestamp, num %s) tags(id %s)" % (curType, curType, curType))
+
+ #create 10 tables, insert 10 rows for each table
+ for j in range(self.rowNum):
+ tdSql.execute("create table st%s%d using st%s tags(%d)" % (curType, j + 1, curType, j + 1))
+ for k in range(self.rowNum):
+ tdSql.execute("insert into st%s%d values(%d, %d)" % (curType, j + 1, self.ts + k + 1, j * 10 + k + 1))
+
+ tdSql.error("insert into st%s10 values(%d, %d)" % (curType, self.ts + 11, pow(2, self.powers[i])))
+ tdSql.execute("insert into st%s10 values(%d, %d)" % (curType, self.ts + 12, pow(2, self.powers[i]) - 1))
+ tdSql.error("insert into st%s10 values(%d, %d)" % (curType, self.ts + 13, pow(-2, self.powers[i])))
+ tdSql.execute("insert into st%s10 values(%d, %d)" % (curType, self.ts + 14, pow(-2, self.powers[i]) + 1))
+
+ # > for int type on column
+ tdSql.query("select * from st%s where num > 50" % curType)
+ tdSql.checkRows(51)
+
+ # >= for int type on column
+ tdSql.query("select * from st%s where num >= 50" % curType)
+ tdSql.checkRows(52)
+
+ # = for int type on column
+ tdSql.query("select * from st%s where num = 50" % curType)
+ tdSql.checkRows(1)
+
+ # < for int type on column
+ tdSql.query("select * from st%s where num < 50" % curType)
+ tdSql.checkRows(50)
+
+ # <= for int type on column
+ tdSql.query("select * from st%s where num <= 50" % curType)
+ tdSql.checkRows(51)
+
+ # <> for int type on column
+ tdSql.query("select * from st%s where num <> 50" % curType)
+ tdSql.checkRows(101)
+
+ # != for int type on column
+ tdSql.query("select * from st%s where num != 50" % curType)
+ tdSql.checkRows(101)
+
+ # > for int type on tag
+ tdSql.query("select * from st%s where id > 5" % curType)
+ tdSql.checkRows(52)
+
+ # >= for int type on tag
+ tdSql.query("select * from st%s where id >= 5" % curType)
+ tdSql.checkRows(62)
+
+ # = for int type on tag
+ tdSql.query("select * from st%s where id = 5" % curType)
+ tdSql.checkRows(10)
+
+ # < for int type on tag
+ tdSql.query("select * from st%s where id < 5" % curType)
+ tdSql.checkRows(40)
+
+ # <= for int type on tag
+ tdSql.query("select * from st%s where id <= 5" % curType)
+ tdSql.checkRows(50)
+
+ # <> for int type on tag
+ tdSql.query("select * from st%s where id <> 5" % curType)
+ tdSql.checkRows(92)
+
+ # != for int type on tag
+ tdSql.query("select * from st%s where id != 5" % curType)
+ tdSql.checkRows(92)
+
+ print("======= Verify filter for %s type finished =========" % curType)
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/query/filterFloatAndDouble.py b/tests/pytest/query/filterFloatAndDouble.py
new file mode 100644
index 0000000000..bea41be11c
--- /dev/null
+++ b/tests/pytest/query/filterFloatAndDouble.py
@@ -0,0 +1,160 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import *
+from util.cases import *
+from util.sql import *
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor())
+
+ self.rowNum = 10
+ self.ts = 1537146000000
+
+ def run(self):
+ tdSql.prepare()
+
+ print("======= Verify filter for float and double type =========")
+ tdLog.debug(
+ "create table st(ts timestamp, num float, speed double) tags(tagcol1 float, tagcol2 double)")
+ tdSql.execute(
+ "create table st(ts timestamp, num float, speed double) tags(tagcol1 float, tagcol2 double)")
+
+ for j in range(self.rowNum):
+ tdSql.execute(
+ "insert into st1 using st tags(1.1, 2.3) values(%d, %f, %f)" % (self.ts + j + 1, 1.1 * (j + 1), 2.3 * (j + 1)))
+
+ # > for float type on column
+ tdSql.query("select * from st where num > 5.5")
+ tdSql.checkRows(5)
+
+ # >= for float type on column
+ tdSql.query("select * from st where num >= 5.5")
+ tdSql.checkRows(6)
+
+ # = for float type on column
+ tdSql.query("select * from st where num = 5.5")
+ tdSql.checkRows(1)
+
+ # <> for float type on column
+ tdSql.query("select * from st where num <> 5.5")
+ tdSql.checkRows(9)
+
+ # != for float type on column
+ tdSql.query("select * from st where num != 5.5")
+ tdSql.checkRows(9)
+
+ # <= for float type on column
+ tdSql.query("select * from st where num <= 5.5")
+ tdSql.checkRows(5)
+
+ # < for float type on column
+ tdSql.query("select * from st where num < 5.5")
+ tdSql.checkRows(4)
+
+ # > for float type on tag
+ tdSql.query("select * from st where tagcol1 > 1.1")
+ tdSql.checkRows(0)
+
+ # >= for float type on tag
+ tdSql.query("select * from st where tagcol1 >= 1.1")
+ tdSql.checkRows(10)
+
+ # = for float type on tag
+ tdSql.query("select * from st where tagcol1 = 1.1")
+ tdSql.checkRows(10)
+
+ # <> for float type on tag
+ tdSql.query("select * from st where tagcol1 <> 1.1")
+ tdSql.checkRows(0)
+
+ # != for float type on tag
+ tdSql.query("select * from st where tagcol1 != 1.1")
+ tdSql.checkRows(0)
+
+ # <= for float type on tag
+ tdSql.query("select * from st where tagcol1 <= 1.1")
+ tdSql.checkRows(10)
+
+ # < for float type on tag
+ tdSql.query("select * from st where tagcol1 < 1.1")
+ tdSql.checkRows(0)
+
+ # > for double type on column
+ tdSql.query("select * from st where speed > 11.5")
+ tdSql.checkRows(5)
+
+ # >= for double type on column
+ tdSql.query("select * from st where speed >= 11.5")
+ tdSql.checkRows(6)
+
+ # = for double type on column
+ tdSql.query("select * from st where speed = 11.5")
+ tdSql.checkRows(1)
+
+ # <> for double type on column
+ tdSql.query("select * from st where speed <> 11.5")
+ tdSql.checkRows(9)
+
+ # != for double type on column
+ tdSql.query("select * from st where speed != 11.5")
+ tdSql.checkRows(9)
+
+ # <= for double type on column
+ tdSql.query("select * from st where speed <= 11.5")
+ tdSql.checkRows(5)
+
+ # < for double type on column
+ tdSql.query("select * from st where speed < 11.5")
+ tdSql.checkRows(4)
+
+ # > for double type on tag
+ tdSql.query("select * from st where tagcol2 > 2.3")
+ tdSql.checkRows(0)
+
+ # >= for double type on tag
+ tdSql.query("select * from st where tagcol2 >= 2.3")
+ tdSql.checkRows(10)
+
+ # = for double type on tag
+ tdSql.query("select * from st where tagcol2 = 2.3")
+ tdSql.checkRows(10)
+
+ # <> for double type on tag
+ tdSql.query("select * from st where tagcol2 <> 2.3")
+ tdSql.checkRows(0)
+
+ # != for double type on tag
+ tdSql.query("select * from st where tagcol2 != 2.3")
+ tdSql.checkRows(0)
+
+ # <= for double type on tag
+ tdSql.query("select * from st where tagcol2 <= 2.3")
+ tdSql.checkRows(10)
+
+ # < for double type on tag
+ tdSql.query("select * from st where tagcol2 < 2.3")
+ tdSql.checkRows(0)
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/query/filterOtherTypes.py b/tests/pytest/query/filterOtherTypes.py
new file mode 100644
index 0000000000..f09ac596c7
--- /dev/null
+++ b/tests/pytest/query/filterOtherTypes.py
@@ -0,0 +1,362 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import *
+from util.cases import *
+from util.sql import *
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor())
+
+ self.ts = 1537146000000
+
+ def run(self):
+ tdSql.prepare()
+
+ print("======= Verify filter for bool, nchar and binary type =========")
+ tdLog.debug(
+ "create table st(ts timestamp, tbcol1 bool, tbcol2 nchar(10), tbcol3 binary(20)) tags(tagcol1 bool, tagcol2 nchar(10), tagcol3 binary(10))")
+ tdSql.execute(
+ "create table st(ts timestamp, tbcol1 bool, tbcol2 nchar(10), tbcol3 binary(20)) tags(tagcol1 bool, tagcol2 nchar(10), tagcol3 binary(10))")
+
+ tdSql.execute("create table st1 using st tags(true, 'table1', '水表')")
+ for i in range(1, 6):
+ tdSql.execute("insert into st1 values(%d, %d, 'taosdata%d', '涛思数据%d')" % (self.ts + i, i % 2, i, i))
+
+ tdSql.execute("create table st2 using st tags(false, 'table2', '电表')")
+ for i in range(6, 11):
+ tdSql.execute("insert into st2 values(%d, %d, 'taosdata%d', '涛思数据%d')" % (self.ts + i, i % 2, i, i))
+
+ # =============Verify stable columns====================
+ # > for bool type on column
+ tdSql.error("select * from st where tbcol1 > false")
+
+ # >= for bool type on column
+ tdSql.error("select * from st where tbcol1 >= false")
+
+ # = for bool type on column
+ tdSql.query("select * from st where tbcol1 = false")
+ tdSql.checkRows(5)
+
+ # <> for bool type on column
+ tdSql.query("select * from st where tbcol1 <> true")
+ tdSql.checkRows(5)
+
+ # != for bool type on column
+ tdSql.query("select * from st where tbcol1 != true")
+ tdSql.checkRows(5)
+
+ # > for bool type on column
+ tdSql.error("select * from st where tbcol1 < true")
+
+ # >= for bool type on column
+ tdSql.error("select * from st where tbcol1 <= true")
+
+ # % for bool type on column
+ tdSql.error("select * from st where tbcol1 like '%'")
+
+ # _ for bool type on column
+ tdSql.error("select * from st where tbcol1 like '____'")
+
+ # > for nchar type on column
+ tdSql.error("select * from st where tbcol2 > 'taosdata'")
+
+ # >= for nchar type on column
+ tdSql.error("select * from st where tbcol2 >= 'taosdata'")
+
+ # = for nchar type on column
+ tdSql.query("select * from st where tbcol2 = 'taosdata1'")
+ tdSql.checkRows(1)
+
+ # <> for nchar type on column
+ tdSql.query("select * from st where tbcol2 <> 'taosdata1'")
+ tdSql.checkRows(9)
+
+ # != for nchar type on column
+ tdSql.query("select * from st where tbcol2 != 'taosdata1'")
+ tdSql.checkRows(9)
+
+ # > for nchar type on column
+ tdSql.error("select * from st where tbcol2 < 'taodata'")
+
+ # >= for nchar type on column
+ tdSql.error("select * from st where tbcol2 <= 'taodata'")
+
+ # % for nchar type on column case 1
+ tdSql.query("select * from st where tbcol2 like '%'")
+ tdSql.checkRows(10)
+
+ # % for nchar type on column case 2
+ tdSql.query("select * from st where tbcol2 like 'a%'")
+ tdSql.checkRows(0)
+
+ # % for nchar type on column case 3
+ tdSql.query("select * from st where tbcol2 like 't%_'")
+ tdSql.checkRows(10)
+
+ # % for nchar type on column case 4
+ tdSql.query("select * from st where tbcol2 like '%1'")
+ # tdSql.checkRows(2)
+
+ # _ for nchar type on column case 1
+ tdSql.query("select * from st where tbcol2 like '____________'")
+ tdSql.checkRows(0)
+
+ # _ for nchar type on column case 2
+ tdSql.query("select * from st where tbcol2 like '__________'")
+ tdSql.checkRows(1)
+
+ # _ for nchar type on column case 3
+ tdSql.query("select * from st where tbcol2 like '_________'")
+ tdSql.checkRows(9)
+
+ # _ for nchar type on column case 4
+ tdSql.query("select * from st where tbcol2 like 't________'")
+ tdSql.checkRows(9)
+
+ # _ for nchar type on column case 5
+ tdSql.query("select * from st where tbcol2 like '%________'")
+ tdSql.checkRows(10)
+
+ # > for binary type on column
+ tdSql.error("select * from st where tbcol3 > '涛思数据'")
+
+ # >= for binary type on column
+ tdSql.error("select * from st where tbcol3 >= '涛思数据'")
+
+ # = for binary type on column
+ tdSql.query("select * from st where tbcol3 = '涛思数据1'")
+ tdSql.checkRows(1)
+
+ # <> for binary type on column
+ tdSql.query("select * from st where tbcol3 <> '涛思数据1'")
+ tdSql.checkRows(9)
+
+ # != for binary type on column
+ tdSql.query("select * from st where tbcol3 != '涛思数据1'")
+ tdSql.checkRows(9)
+
+ # > for binary type on column
+ tdSql.error("select * from st where tbcol3 < '涛思数据'")
+
+ # >= for binary type on column
+ tdSql.error("select * from st where tbcol3 <= '涛思数据'")
+
+ # % for binary type on column case 1
+ tdSql.query("select * from st where tbcol3 like '%'")
+ tdSql.checkRows(10)
+
+ # % for binary type on column case 2
+ tdSql.query("select * from st where tbcol3 like '陶%'")
+ tdSql.checkRows(0)
+
+ # % for binary type on column case 3
+ tdSql.query("select * from st where tbcol3 like '涛%_'")
+ tdSql.checkRows(10)
+
+ # % for binary type on column case 4
+ tdSql.query("select * from st where tbcol3 like '%1'")
+ tdSql.checkRows(1)
+
+ # _ for binary type on column case 1
+ tdSql.query("select * from st where tbcol3 like '_______'")
+ tdSql.checkRows(0)
+
+ # _ for binary type on column case 2
+ tdSql.query("select * from st where tbcol3 like '______'")
+ tdSql.checkRows(1)
+
+ # _ for binary type on column case 2
+ tdSql.query("select * from st where tbcol3 like '_____'")
+ tdSql.checkRows(9)
+
+ # _ for binary type on column case 3
+ tdSql.query("select * from st where tbcol3 like '____'")
+ tdSql.checkRows(0)
+
+ # _ for binary type on column case 4
+ tdSql.query("select * from st where tbcol3 like 't____'")
+ tdSql.checkRows(0)
+
+ # =============Verify stable tags====================
+ # > for bool type on tag
+ tdSql.error("select * from st where tagcol1 > false")
+
+ # >= for bool type on tag
+ tdSql.error("select * from st where tagcol1 >= false")
+
+ # = for bool type on tag
+ tdSql.query("select * from st where tagcol1 = false")
+ tdSql.checkRows(5)
+
+ # <> for bool type on tag
+ tdSql.query("select * from st where tagcol1 <> true")
+ tdSql.checkRows(5)
+
+ # != for bool type on tag
+ tdSql.query("select * from st where tagcol1 != true")
+ tdSql.checkRows(5)
+
+ # > for bool type on tag
+ tdSql.error("select * from st where tagcol1 < true")
+
+ # >= for bool type on tag
+ tdSql.error("select * from st where tagcol1 <= true")
+
+ # % for bool type on tag
+ tdSql.error("select * from st where tagcol1 like '%'")
+
+ # _ for bool type on tag
+ tdSql.error("select * from st where tagcol1 like '____'")
+
+ # > for nchar type on tag
+ tdSql.error("select * from st where tagcol2 > 'table'")
+
+ # >= for nchar type on tag
+ tdSql.error("select * from st where tagcol2 >= 'table'")
+
+ # = for nchar type on tag
+ tdSql.query("select * from st where tagcol2 = 'table1'")
+ tdSql.checkRows(5)
+
+ # <> for nchar type on tag
+ tdSql.query("select * from st where tagcol2 <> 'table1'")
+ tdSql.checkRows(5)
+
+ # != for nchar type on tag
+ tdSql.query("select * from st where tagcol2 != 'table'")
+ tdSql.checkRows(10)
+
+ # > for nchar type on tag
+ tdSql.error("select * from st where tagcol2 < 'table'")
+
+ # >= for nchar type on tag
+ tdSql.error("select * from st where tagcol2 <= 'table'")
+
+ # % for nchar type on tag case 1
+ tdSql.query("select * from st where tagcol2 like '%'")
+ tdSql.checkRows(10)
+
+ # % for nchar type on tag case 2
+ tdSql.query("select * from st where tagcol2 like 'a%'")
+ tdSql.checkRows(0)
+
+ # % for nchar type on tag case 3
+ tdSql.query("select * from st where tagcol2 like 't%_'")
+ tdSql.checkRows(10)
+
+ # % for nchar type on tag case 4
+ tdSql.query("select * from st where tagcol2 like '%1'")
+ tdSql.checkRows(5)
+
+ # _ for nchar type on tag case 1
+ tdSql.query("select * from st where tagcol2 like '_______'")
+ tdSql.checkRows(0)
+
+ # _ for nchar type on tag case 2
+ tdSql.query("select * from st where tagcol2 like '______'")
+ tdSql.checkRows(10)
+
+ # _ for nchar type on tag case 3
+ tdSql.query("select * from st where tagcol2 like 't_____'")
+ tdSql.checkRows(10)
+
+ # _ for nchar type on tag case 4
+ tdSql.query("select * from st where tagcol2 like 's________'")
+ tdSql.checkRows(0)
+
+ # _ for nchar type on tag case 5
+ tdSql.query("select * from st where tagcol2 like '%__'")
+ tdSql.checkRows(10)
+
+ # > for binary type on tag
+ tdSql.error("select * from st where tagcol3 > '表'")
+
+ # >= for binary type on tag
+ tdSql.error("select * from st where tagcol3 >= '表'")
+
+ # = for binary type on tag
+ tdSql.query("select * from st where tagcol3 = '水表'")
+ tdSql.checkRows(5)
+
+ # <> for binary type on tag
+ tdSql.query("select * from st where tagcol3 <> '水表'")
+ tdSql.checkRows(5)
+
+ # != for binary type on tag
+ tdSql.query("select * from st where tagcol3 != '水表'")
+ tdSql.checkRows(5)
+
+ # > for binary type on tag
+ tdSql.error("select * from st where tagcol3 < '水表'")
+
+ # >= for binary type on tag
+ tdSql.error("select * from st where tagcol3 <= '水表'")
+
+ # % for binary type on tag case 1
+ tdSql.query("select * from st where tagcol3 like '%'")
+ tdSql.checkRows(10)
+
+ # % for binary type on tag case 2
+ tdSql.query("select * from st where tagcol3 like '水%'")
+ tdSql.checkRows(5)
+
+ # % for binary type on tag case 3
+ tdSql.query("select * from st where tagcol3 like '数%_'")
+ tdSql.checkRows(0)
+
+ # % for binary type on tag case 4
+ tdSql.query("select * from st where tagcol3 like '%表'")
+ tdSql.checkRows(10)
+
+ # % for binary type on tag case 5
+ tdSql.query("select * from st where tagcol3 like '%据'")
+ tdSql.checkRows(0)
+
+ # _ for binary type on tag case 1
+ tdSql.query("select * from st where tagcol3 like '__'")
+ tdSql.checkRows(10)
+
+ # _ for binary type on tag case 2
+ tdSql.query("select * from st where tagcol3 like '水_'")
+ tdSql.checkRows(5)
+
+ # _ for binary type on tag case 2
+ tdSql.query("select * from st where tagcol3 like '_表'")
+ tdSql.checkRows(10)
+
+ # _ for binary type on tag case 3
+ tdSql.query("select * from st where tagcol3 like '___'")
+ tdSql.checkRows(0)
+
+ # _ for binary type on tag case 4
+ tdSql.query("select * from st where tagcol3 like '数_'")
+ tdSql.checkRows(0)
+
+ # _ for binary type on tag case 5
+ tdSql.query("select * from st where tagcol3 like '_据'")
+ tdSql.checkRows(0)
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
+
diff --git a/tests/pytest/query/querySort.py b/tests/pytest/query/querySort.py
new file mode 100644
index 0000000000..bb806c27bc
--- /dev/null
+++ b/tests/pytest/query/querySort.py
@@ -0,0 +1,80 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import *
+from util.cases import *
+from util.sql import *
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor())
+
+ self.rowNum = 10
+ self.ts = 1537146000000
+
+ def run(self):
+ tdSql.prepare()
+
+ print("======= step 1: create table and insert data =========")
+ tdLog.debug(
+ ''' create table st(ts timestamp, tbcol1 tinyint, tbcol2 smallint, tbcol3 int, tbcol4 bigint, tbcol5 float, tbcol6 double,
+ tbcol7 bool, tbcol8 nchar(20), tbcol9 binary(20)) tags(tagcol1 tinyint, tagcol2 smallint, tagcol3 int, tagcol4 bigint, tagcol5 float,
+ tagcol6 double, tagcol7 bool, tagcol8 nchar(20), tagcol9 binary(20))''')
+ tdSql.execute(
+ ''' create table st(ts timestamp, tbcol1 tinyint, tbcol2 smallint, tbcol3 int, tbcol4 bigint, tbcol5 float, tbcol6 double,
+ tbcol7 bool, tbcol8 nchar(20), tbcol9 binary(20)) tags(tagcol1 tinyint, tagcol2 smallint, tagcol3 int, tagcol4 bigint, tagcol5 float,
+ tagcol6 double, tagcol7 bool, tagcol8 nchar(20), tagcol9 binary(20))''')
+
+ for i in range(self.rowNum):
+ tdSql.execute("create table st%d using st tags(%d, %d, %d, %d, %f, %f, %d, 'tag%d', '标签%d')" % (i + 1, i + 1, i + 1, i + 1, i + 1, 1.1 * (i + 1),
+ 1.23 * (i + 1), (i + 1) % 2, i + 1, i + 1))
+ for j in range(self.rowNum):
+ tdSql.execute("insert into st%d values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')" % (i + 1, self.ts + 10 * (i + 1) + j + 1,
+ j + 1, j + 1, j + 1, j + 1, 1.1 * (j + 1), 1.23 * (j + 1), (j + 1) % 2, j + 1, j + 1))
+
+
+ print("======= step 2: verify order for each column =========")
+ # sort for timestamp in asc order
+ tdSql.query("select * from st order by ts asc")
+ tdSql.checkColumnSorted(0, "asc")
+
+ # sort for timestamp in desc order
+ tdSql.query("select * from st order by ts desc")
+ tdSql.checkColumnSorted(0, "desc")
+
+
+ for i in range(1, 10):
+ tdSql.error("select * from st order by tbcol%d" % i)
+ tdSql.error("select * from st order by tbcol%d asc" % i)
+ tdSql.error("select * from st order by tbcol%d desc" % i)
+
+ tdSql.query("select avg(tbcol1) from st group by tagcol%d order by tagcol%d" % (i, i))
+ tdSql.checkColumnSorted(1, "")
+
+ tdSql.query("select avg(tbcol1) from st group by tagcol%d order by tagcol%d asc" % (i, i))
+ tdSql.checkColumnSorted(1, "asc")
+
+ tdSql.query("select avg(tbcol1) from st group by tagcol%d order by tagcol%d desc" % (i, i))
+ tdSql.checkColumnSorted(1, "desc")
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/random-test/random-test-multi-threading-3.py b/tests/pytest/random-test/random-test-multi-threading-3.py
index cab17c4c1a..47c4228a8f 100644
--- a/tests/pytest/random-test/random-test-multi-threading-3.py
+++ b/tests/pytest/random-test/random-test-multi-threading-3.py
@@ -127,6 +127,8 @@ class Test (Thread):
def drop_stable(self):
tdLog.info("drop_stable")
global last_stb
+ global last_tb
+ global written
if (last_stb == ""):
tdLog.info("no super table")
@@ -135,6 +137,8 @@ class Test (Thread):
tdLog.info("will drop last super table")
tdSql.execute('drop table %s' % last_stb)
last_stb = ""
+ last_tb = ""
+ written = 0
def restart_database(self):
tdLog.info("restart_database")
@@ -205,6 +209,7 @@ class Test (Thread):
global written
dnodesDir = tdDnodes.getDnodesRootDir()
+ tdDnodes.forcestop(1)
dataDir = dnodesDir + '/dnode1/data/*'
deleteCmd = 'rm -rf %s' % dataDir
os.system(deleteCmd)
@@ -261,7 +266,7 @@ class Test (Thread):
while True:
self.queryEvent.wait()
tdLog.notice("third thread")
- randQueryOp = random.randint(1, 9)
+ randQueryOp = random.randint(1, 2)
queryOp.get(randQueryOp, lambda: "ERROR")()
self.queryEvent.clear()
self.dbEvent.clear()
diff --git a/tests/pytest/random-test/random-test-multi-threading.py b/tests/pytest/random-test/random-test-multi-threading.py
index 1d8a5c3c82..65b6dcd948 100644
--- a/tests/pytest/random-test/random-test-multi-threading.py
+++ b/tests/pytest/random-test/random-test-multi-threading.py
@@ -105,12 +105,18 @@ class Test (threading.Thread):
return
else:
tdLog.info("will create stable %s" % current_stb)
+ tdLog.info(
+ 'create table %s(ts timestamp, c1 int, c2 nchar(10)) tags (t1 int, t2 nchar(10))' %
+ current_stb)
tdSql.execute(
'create table %s(ts timestamp, c1 int, c2 nchar(10)) tags (t1 int, t2 nchar(10))' %
current_stb)
last_stb = current_stb
current_tb = "tb%d" % int(round(time.time() * 1000))
+ tdLog.info(
+ "create table %s using %s tags (1, '表1')" %
+ (current_tb, last_stb))
tdSql.execute(
"create table %s using %s tags (1, '表1')" %
(current_tb, last_stb))
@@ -128,6 +134,8 @@ class Test (threading.Thread):
def drop_stable(self):
tdLog.info("drop_stable")
global last_stb
+ global last_tb
+ global written
if (last_stb == ""):
tdLog.info("no super table")
@@ -136,6 +144,8 @@ class Test (threading.Thread):
tdLog.info("will drop last super table")
tdSql.execute('drop table %s' % last_stb)
last_stb = ""
+ last_tb = ""
+ written = 0
def restart_database(self):
tdLog.info("restart_database")
@@ -208,12 +218,12 @@ class Test (threading.Thread):
global written
dnodesDir = tdDnodes.getDnodesRootDir()
+ tdDnodes.forcestop(1)
dataDir = dnodesDir + '/dnode1/data/*'
deleteCmd = 'rm -rf %s' % dataDir
os.system(deleteCmd)
tdDnodes.start(1)
-# tdLog.sleep(10)
tdSql.prepare()
last_tb = ""
last_stb = ""
diff --git a/tests/pytest/random-test/random-test.py b/tests/pytest/random-test/random-test.py
index 855cabdedd..5eb356960a 100644
--- a/tests/pytest/random-test/random-test.py
+++ b/tests/pytest/random-test/random-test.py
@@ -111,6 +111,8 @@ class Test:
tdLog.info("will drop last super table")
tdSql.execute('drop table %s' % self.last_stb)
self.last_stb = ""
+ self.last_tb = ""
+ self.written = 0
def query_data_from_stable(self):
tdLog.info("query_data_from_stable")
@@ -166,7 +168,8 @@ class Test:
def delete_datafiles(self):
tdLog.info("delete_datafiles")
dnodesDir = tdDnodes.getDnodesRootDir()
- dataDir = dnodesDir + '/dnode1/*'
+ tdDnodes.forcestop(1)
+ dataDir = dnodesDir + '/dnode1/data/*'
deleteCmd = 'rm -rf %s' % dataDir
os.system(deleteCmd)
diff --git a/tests/pytest/regressiontest.sh b/tests/pytest/regressiontest.sh
index e1d4c6348b..50237f27c5 100755
--- a/tests/pytest/regressiontest.sh
+++ b/tests/pytest/regressiontest.sh
@@ -36,7 +36,7 @@ python3 ./test.py -f tag_lite/binary.py
python3 ./test.py -f tag_lite/bool_binary.py
python3 ./test.py -f tag_lite/bool_int.py
python3 ./test.py -f tag_lite/bool.py
-python3 ./test.py -f tag_lite/change.py
+# python3 ./test.py -f tag_lite/change.py
python3 ./test.py -f tag_lite/column.py
# python3 ./test.py -f tag_lite/commit.py
python3 ./test.py -f tag_lite/create.py
@@ -80,8 +80,8 @@ python3 ./test.py -f import_merge/importCacheFileTO.py
python3 ./test.py -f import_merge/importCacheFileTPO.py
python3 ./test.py -f import_merge/importCacheFileT.py
python3 ./test.py -f import_merge/importDataH2.py
-# python3 ./test.py -f import_merge/importDataHO2.py
-# python3 ./test.py -f import_merge/importDataHO.py
+python3 ./test.py -f import_merge/importDataHO2.py
+python3 ./test.py -f import_merge/importDataHO.py
python3 ./test.py -f import_merge/importDataHPO.py
python3 ./test.py -f import_merge/importDataLastHO.py
python3 ./test.py -f import_merge/importDataLastHPO.py
@@ -92,7 +92,7 @@ python3 ./test.py -f import_merge/importDataLastTO.py
python3 ./test.py -f import_merge/importDataLastTPO.py
python3 ./test.py -f import_merge/importDataLastT.py
python3 ./test.py -f import_merge/importDataS.py
-# python3 ./test.py -f import_merge/importDataSub.py
+python3 ./test.py -f import_merge/importDataSub.py
python3 ./test.py -f import_merge/importDataTO.py
python3 ./test.py -f import_merge/importDataTPO.py
python3 ./test.py -f import_merge/importDataT.py
@@ -127,10 +127,18 @@ python3 ./test.py -f user/user_create.py
python3 ./test.py -f user/pass_len.py
# table
-#python3 ./test.py -f table/del_stable.py
+# python3 ./test.py -f table/del_stable.py
#query
python3 ./test.py -f query/filter.py
-# python3 ./test.py -f query/filterCombo.py
-# python3 ./test.py -f query/queryNormal.py
-# python3 ./test.py -f query/queryError.py
+python3 ./test.py -f query/filterAllIntTypes.py
+python3 ./test.py -f query/filterFloatAndDouble.py
+python3 ./test.py -f query/querySort.py
+
+
+#stream
+python3 ./test.py -f stream/stream1.py
+python3 ./test.py -f stream/stream2.py
+
+
+
diff --git a/tests/pytest/table/boundary.py b/tests/pytest/table/boundary.py
index bb5e187f0d..50586b72ff 100644
--- a/tests/pytest/table/boundary.py
+++ b/tests/pytest/table/boundary.py
@@ -96,14 +96,14 @@ class TDTestCase:
tdLog.notice("table name max length is %d" % maxTableNameLen)
# create a super table with name exceed max length
- sname = self.generateString(maxTableNameLen + 1)
+ sname = self.generateString(maxTableNameLen)
tdLog.info("create a super table with length %d" % len(sname))
tdSql.error(
"create table %s (ts timestamp, value int) tags(id int)" %
sname)
# create a super table with name of max length
- sname = self.generateString(maxTableNameLen)
+ sname = self.generateString(maxTableNameLen - 1)
tdLog.info("create a super table with length %d" % len(sname))
tdSql.execute(
"create table %s (ts timestamp, value int) tags(id int)" %
@@ -113,12 +113,12 @@ class TDTestCase:
tdSql.checkRows(1)
# create a child table with name exceed max length
- name = self.generateString(maxTableNameLen + 1)
+ name = self.generateString(maxTableNameLen)
tdLog.info("create a child table with length %d" % len(name))
tdSql.error("create table %s using %s tags(0)" % (name, sname))
# create a child table with name of max length
- name = self.generateString(maxTableNameLen)
+ name = self.generateString(maxTableNameLen - 1)
tdLog.info("create a child table with length %d" % len(name))
tdSql.execute("create table %s using %s tags(0)" % (name, sname))
tdSql.query('show tables')
@@ -141,7 +141,7 @@ class TDTestCase:
tdSql.prepare()
# 8 bytes for timestamp
- maxRowSize = 65536 - 8
+ maxRowSize = 65535 - 8
maxCols = self.getLimitFromSourceCode('TSDB_MAX_COLUMNS') - 1
# for binary cols, 2 bytes are used for length
diff --git a/tests/pytest/table/tablename-boundary.py b/tests/pytest/table/tablename-boundary.py
index ce843c3fe7..6ce986c51e 100644
--- a/tests/pytest/table/tablename-boundary.py
+++ b/tests/pytest/table/tablename-boundary.py
@@ -18,9 +18,7 @@ class TDTestCase:
tdSql.prepare()
getTableNameLen = "grep -w '#define TSDB_TABLE_NAME_LEN' ../../src/inc/taosdef.h|awk '{print $3}'"
- tableNameMaxLen = int(
- subprocess.check_output(
- getTableNameLen, shell=True))
+ tableNameMaxLen = int( subprocess.check_output(getTableNameLen, shell=True)) - 1
tdLog.info("table name max length is %d" % tableNameMaxLen)
chars = string.ascii_uppercase + string.ascii_lowercase
tb_name = ''.join(random.choices(chars, k=tableNameMaxLen))
diff --git a/tests/pytest/tag_lite/change.py b/tests/pytest/tag_lite/change.py
index 910d11ca7c..1f411b7e5a 100644
--- a/tests/pytest/tag_lite/change.py
+++ b/tests/pytest/tag_lite/change.py
@@ -50,18 +50,18 @@ class TDTestCase:
# TSIM: sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1
# bool, tgcol2 int)
tdLog.info(
- "create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 bool, tgcol2 int)")
+ "create table ta_ch_mt2 (ts timestamp, tbcol int) TAGS(tgcol1 bool, tgcol2 int)")
tdSql.execute(
- 'create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 bool, tgcol2 int)')
+ 'create table ta_ch_mt2 (ts timestamp, tbcol int) TAGS(tgcol1 bool, tgcol2 int)')
# TSIM: sql create table $tb using $mt tags( 1, 2 )
- tdLog.info("create table tb2 using $mt tags( 1, 2 )")
- tdSql.execute('create table tb2 using $mt tags( 1, 2 )')
+ tdLog.info("create table tb2 using ta_ch_mt2 tags( 1, 2 )")
+ tdSql.execute('create table tb2 using ta_ch_mt2 tags( 1, 2 )')
# TSIM: sql insert into $tb values(now, 1)
tdLog.info("insert into tb2 values(now, 1)")
tdSql.execute("insert into tb2 values(now, 1)")
# TSIM: sql select * from $mt where tgcol1 = 1
- tdLog.info('select * from $mt where tgcol1 = 1')
- tdSql.query('select * from $mt where tgcol1 = 1')
+ tdLog.info('select * from ta_ch_mt2 where tgcol1 = 1')
+ tdSql.query('select * from ta_ch_mt2 where tgcol1 = 1')
# TSIM: if $rows != 1 then
tdLog.info('tdSql.checkRow(1)')
tdSql.checkRows(1)
@@ -107,18 +107,18 @@ class TDTestCase:
# TSIM: sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1
# smallint, tgcol2 tinyint)
tdLog.info(
- "create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 smallint, tgcol2 tinyint)")
+ "create table ta_ch_mt3 (ts timestamp, tbcol int) TAGS(tgcol1 smallint, tgcol2 tinyint)")
tdSql.execute(
- 'create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 smallint, tgcol2 tinyint)')
+ 'create table ta_ch_mt3 (ts timestamp, tbcol int) TAGS(tgcol1 smallint, tgcol2 tinyint)')
# TSIM: sql create table $tb using $mt tags( 1, 2 )
- tdLog.info("create table tb3 using $mt tags( 1, 2 )")
- tdSql.execute('create table tb3 using $mt tags( 1, 2 )')
+ tdLog.info("create table tb3 using ta_ch_mt3 tags( 1, 2 )")
+ tdSql.execute('create table tb3 using ta_ch_mt3 tags( 1, 2 )')
# TSIM: sql insert into $tb values(now, 1)
tdLog.info("insert into tb3 values(now, 1)")
tdSql.execute("insert into tb3 values(now, 1)")
# TSIM: sql select * from $mt where tgcol1 = 1
- tdLog.info('select * from $mt where tgcol1 = 1')
- tdSql.query('select * from $mt where tgcol1 = 1')
+ tdLog.info('select * from ta_ch_mt3 where tgcol1 = 1')
+ tdSql.query('select * from ta_ch_mt3 where tgcol1 = 1')
# TSIM: if $rows != 1 then
tdLog.info('tdSql.checkRow(1)')
tdSql.checkRows(1)
@@ -151,18 +151,18 @@ class TDTestCase:
# TSIM: sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1
# bigint, tgcol2 float)
tdLog.info(
- "create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 bigint, tgcol2 float)")
+ "create table ta_ch_mt4 (ts timestamp, tbcol int) TAGS(tgcol1 bigint, tgcol2 float)")
tdSql.execute(
- 'create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 bigint, tgcol2 float)')
+ 'create table ta_ch_mt4 (ts timestamp, tbcol int) TAGS(tgcol1 bigint, tgcol2 float)')
# TSIM: sql create table $tb using $mt tags( 1, 2 )
- tdLog.info("create table tb4 using $mt tags( 1, 2 )")
- tdSql.execute('create table tb4 using $mt tags( 1, 2 )')
+ tdLog.info("create table tb4 using ta_ch_mt4 tags( 1, 2 )")
+ tdSql.execute('create table tb4 using ta_ch_mt4 tags( 1, 2 )')
# TSIM: sql insert into $tb values(now, 1)
tdLog.info("insert into tb4 values(now, 1)")
tdSql.execute("insert into tb4 values(now, 1)")
# TSIM: sql select * from $mt where tgcol1 = 1
- tdLog.info('select * from $mt where tgcol1 = 1')
- tdSql.query('select * from $mt where tgcol1 = 1')
+ tdLog.info('select * from ta_ch_mt4 where tgcol1 = 1')
+ tdSql.query('select * from ta_ch_mt4 where tgcol1 = 1')
# TSIM: if $rows != 1 then
tdLog.info('tdSql.checkRow(1)')
tdSql.checkRows(1)
@@ -195,18 +195,18 @@ class TDTestCase:
# TSIM: sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1
# double, tgcol2 binary(10))
tdLog.info(
- "create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 double, tgcol2 binary(10))")
+ "create table ta_ch_mt5 (ts timestamp, tbcol int) TAGS(tgcol1 double, tgcol2 binary(10))")
tdSql.execute(
- 'create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 double, tgcol2 binary(10))')
+ 'create table ta_ch_mt5 (ts timestamp, tbcol int) TAGS(tgcol1 double, tgcol2 binary(10))')
# TSIM: sql create table $tb using $mt tags( 1, '2' )
- tdLog.info("create table tb5 using $mt tags( 1, '2' )")
- tdSql.execute('create table tb5 using $mt tags( 1, '2' )')
+ tdLog.info("create table tb5 using ta_ch_mt5 tags( 1, '2' )")
+ tdSql.execute("create table tb5 using ta_ch_mt5 tags( 1, '2' )")
# TSIM: sql insert into $tb values(now, 1)
tdLog.info("insert into tb5 values(now, 1)")
tdSql.execute("insert into tb5 values(now, 1)")
# TSIM: sql select * from $mt where tgcol2 = '2'
- tdLog.info('select * from $mt where tgcol2 = '2'')
- tdSql.query('select * from $mt where tgcol2 = '2'')
+ tdLog.info("select * from ta_ch_mt5 where tgcol2 = '2'")
+ tdSql.query("select * from ta_ch_mt5 where tgcol2 = '2'")
# TSIM: if $rows != 1 then
tdLog.info('tdSql.checkRow(1)')
tdSql.checkRows(1)
@@ -239,18 +239,18 @@ class TDTestCase:
# TSIM: sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1
# binary(10), tgcol2 int, tgcol3 smallint, tgcol4 binary(11), tgcol5
# double, tgcol6 binary(20))
- tdLog.info("create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 binary(10), tgcol2 int, tgcol3 smallint, tgcol4 binary(11), tgcol5 double, tgcol6 binary(20))")
+ tdLog.info("create table ta_ch_mt6 (ts timestamp, tbcol int) TAGS(tgcol1 binary(10), tgcol2 int, tgcol3 smallint, tgcol4 binary(11), tgcol5 double, tgcol6 binary(20))")
tdSql.execute(
- 'create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 binary(10), tgcol2 int, tgcol3 smallint, tgcol4 binary(11), tgcol5 double, tgcol6 binary(20))')
+ 'create table ta_ch_mt6 (ts timestamp, tbcol int) TAGS(tgcol1 binary(10), tgcol2 int, tgcol3 smallint, tgcol4 binary(11), tgcol5 double, tgcol6 binary(20))')
# TSIM: sql create table $tb using $mt tags( '1', 2, 3, '4', 5, '6' )
- tdLog.info("create table tb6 using $mt tags( '1', 2, 3, '4', 5, '6' )")
- tdSql.execute('create table tb6 using $mt tags( '1', 2, 3, '4', 5, '6' )')
+ tdLog.info("create table tb6 using ta_ch_mt6 tags( '1', 2, 3, '4', 5, '6' )")
+ tdSql.execute("create table tb6 using ta_ch_mt6 tags( '1', 2, 3, '4', 5, '6' )")
# TSIM: sql insert into $tb values(now, 1)
tdLog.info("insert into tb6 values(now, 1)")
tdSql.execute("insert into tb6 values(now, 1)")
# TSIM: sql select * from $mt where tgcol1 = '1'
- tdLog.info('select * from $mt where tgcol1 = '1'')
- tdSql.query('select * from $mt where tgcol1 = '1'')
+ tdLog.info("select * from ta_ch_mt6 where tgcol1 = '1'")
+ tdSql.query("select * from ta_ch_mt6 where tgcol1 = '1'")
# TSIM: if $rows != 1 then
tdLog.info('tdSql.checkRow(1)')
tdSql.checkRows(1)
@@ -312,19 +312,19 @@ class TDTestCase:
# TSIM: $tb = $tbPrefix . $i
# TSIM:
# TSIM: sql select * from $mt where tgcol1 = 1 -x step24
- tdLog.info('select * from $mt where tgcol1 = 1 -x step24')
- tdSql.error('select * from $mt where tgcol1 = 14')
+ tdLog.info('select * from ta_ch_mt2 where tgcol1 = 1 -x step24')
+ tdSql.error('select * from ta_ch_mt2 where tgcol1 = 14')
# TSIM: return -1
# TSIM: step24:
# TSIM: sql select * from $mt where tgcol2 = 1 -x step25
- tdLog.info('select * from $mt where tgcol2 = 1 -x step25')
- tdSql.error('select * from $mt where tgcol2 = 15')
+ tdLog.info('select * from ta_ch_mt2 where tgcol2 = 1 -x step25')
+ tdSql.error('select * from ta_ch_mt2 where tgcol2 = 15')
# TSIM: return -1
# TSIM: step25:
# TSIM:
# TSIM: sql select * from $mt where tgcol3 = 1
- tdLog.info('select * from $mt where tgcol3 = 1')
- tdSql.query('select * from $mt where tgcol3 = 1')
+ tdLog.info('select * from ta_ch_mt2 where tgcol3 = 1')
+ tdSql.query('select * from ta_ch_mt2 where tgcol3 = 1')
# TSIM: print $data01 $data02 $data03
tdLog.info('$data01 $data02 $data03')
# TSIM: if $rows != 1 then
@@ -349,8 +349,8 @@ class TDTestCase:
# TSIM: endi
# TSIM:
# TSIM: sql select * from $mt where tgcol4 = 2
- tdLog.info('select * from $mt where tgcol4 = 2')
- tdSql.query('select * from $mt where tgcol4 = 2')
+ tdLog.info('select * from ta_ch_mt2 where tgcol4 = 2')
+ tdSql.query('select * from ta_ch_mt2 where tgcol4 = 2')
# TSIM: print $data01 $data02 $data03
tdLog.info('$data01 $data02 $data03')
# TSIM: if $rows != 1 then
@@ -381,19 +381,19 @@ class TDTestCase:
# TSIM: $tb = $tbPrefix . $i
# TSIM:
# TSIM: sql select * from $mt where tgcol1 = 1 -x step31
- tdLog.info('select * from $mt where tgcol1 = 1 -x step31')
- tdSql.error('select * from $mt where tgcol1 = 11')
+ tdLog.info('select * from ta_ch_mt3 where tgcol1 = 1 -x step31')
+ tdSql.error('select * from ta_ch_mt3 where tgcol1 = 11')
# TSIM: return -1
# TSIM: step31:
# TSIM: sql select * from $mt where tgcol2 = 1 -x step32
- tdLog.info('select * from $mt where tgcol2 = 1 -x step32')
- tdSql.error('select * from $mt where tgcol2 = 12')
+ tdLog.info('select * from ta_ch_mt3 where tgcol2 = 1 -x step32')
+ tdSql.error('select * from ta_ch_mt3 where tgcol2 = 12')
# TSIM: return -1
# TSIM: step32:
# TSIM:
# TSIM: sql select * from $mt where tgcol3 = 1
- tdLog.info('select * from $mt where tgcol3 = 1')
- tdSql.query('select * from $mt where tgcol3 = 1')
+ tdLog.info('select * from ta_ch_mt3 where tgcol3 = 1')
+ tdSql.query('select * from ta_ch_mt3 where tgcol3 = 1')
# TSIM: print $data01 $data02 $data03
tdLog.info('$data01 $data02 $data03')
# TSIM: if $rows != 1 then
@@ -450,19 +450,19 @@ class TDTestCase:
# TSIM: $tb = $tbPrefix . $i
# TSIM:
# TSIM: sql select * from $mt where tgcol1 = 1 -x step41
- tdLog.info('select * from $mt where tgcol1 = 1 -x step41')
- tdSql.error('select * from $mt where tgcol1 = 11')
+ tdLog.info('select * from ta_ch_mt4 where tgcol1 = 1 -x step41')
+ tdSql.error('select * from ta_ch_mt4 where tgcol1 = 11')
# TSIM: return -1
# TSIM: step41:
# TSIM: sql select * from $mt where tgcol2 = 1 -x step42
- tdLog.info('select * from $mt where tgcol2 = 1 -x step42')
- tdSql.error('select * from $mt where tgcol2 = 12')
+ tdLog.info('select * from ta_ch_mt4 where tgcol2 = 1 -x step42')
+ tdSql.error('select * from ta_ch_mt4 where tgcol2 = 12')
# TSIM: return -1
# TSIM: step42:
# TSIM:
# TSIM: sql select * from $mt where tgcol3 = 1
- tdLog.info('select * from $mt where tgcol3 = 1')
- tdSql.query('select * from $mt where tgcol3 = 1')
+ tdLog.info('select * from ta_ch_mt4 where tgcol3 = 1')
+ tdSql.query('select * from ta_ch_mt4 where tgcol3 = 1')
# TSIM: print $data01 $data02 $data03
tdLog.info('$data01 $data02 $data03')
# TSIM: if $rows != 1 then
@@ -487,8 +487,8 @@ class TDTestCase:
# TSIM: endi
# TSIM:
# TSIM: sql select * from $mt where tgcol4 = 2
- tdLog.info('select * from $mt where tgcol4 = 2')
- tdSql.query('select * from $mt where tgcol4 = 2')
+ tdLog.info('select * from ta_ch_mt4 where tgcol4 = 2')
+ tdSql.query('select * from ta_ch_mt4 where tgcol4 = 2')
# TSIM: print $data01 $data02 $data03
tdLog.info('$data01 $data02 $data03')
# TSIM: if $rows != 1 then
@@ -519,19 +519,19 @@ class TDTestCase:
# TSIM: $tb = $tbPrefix . $i
# TSIM:
# TSIM: sql select * from $mt where tgcol1 = 1 -x step51
- tdLog.info('select * from $mt where tgcol1 = 1 -x step51')
- tdSql.error('select * from $mt where tgcol1 = 11')
+ tdLog.info('select * from ta_ch_mt5 where tgcol1 = 1 -x step51')
+ tdSql.error('select * from ta_ch_mt5 where tgcol1 = 11')
# TSIM: return -1
# TSIM: step51:
# TSIM: sql select * from $mt where tgcol2 = 1 -x step52
- tdLog.info('select * from $mt where tgcol2 = 1 -x step52')
- tdSql.error('select * from $mt where tgcol2 = 12')
+ tdLog.info('select * from ta_ch_mt5 where tgcol2 = 1 -x step52')
+ tdSql.error('select * from ta_ch_mt5 where tgcol2 = 12')
# TSIM: return -1
# TSIM: step52:
# TSIM:
# TSIM: sql select * from $mt where tgcol3 = 1
- tdLog.info('select * from $mt where tgcol3 = 1')
- tdSql.query('select * from $mt where tgcol3 = 1')
+ tdLog.info('select * from ta_ch_mt5 where tgcol3 = 1')
+ tdSql.query('select * from ta_ch_mt5 where tgcol3 = 1')
# TSIM: print $data01 $data02 $data03
tdLog.info('$data01 $data02 $data03')
# TSIM: if $rows != 1 then
@@ -556,8 +556,8 @@ class TDTestCase:
# TSIM: endi
# TSIM:
# TSIM: sql select * from $mt where tgcol4 = '2'
- tdLog.info('select * from $mt where tgcol4 = '2'')
- tdSql.query('select * from $mt where tgcol4 = '2'')
+ tdLog.info("select * from ta_ch_mt5 where tgcol4 = '2'")
+ tdSql.query("select * from ta_ch_mt5 where tgcol4 = '2'")
# TSIM: print $data01 $data02 $data03
tdLog.info('$data01 $data02 $data03')
# TSIM: if $rows != 1 then
@@ -588,39 +588,39 @@ class TDTestCase:
# TSIM: $tb = $tbPrefix . $i
# TSIM:
# TSIM: sql select * from $mt where tgcol1 = 1 -x step61
- tdLog.info('select * from $mt where tgcol1 = 1 -x step61')
- tdSql.error('select * from $mt where tgcol1 = 11')
+ tdLog.info('select * from ta_ch_mt6 where tgcol1 = 1 -x step61')
+ tdSql.error('select * from ta_ch_mt6 where tgcol1 = 11')
# TSIM: return -1
# TSIM: step61:
# TSIM: sql select * from $mt where tgcol2 = 1 -x step62
- tdLog.info('select * from $mt where tgcol2 = 1 -x step62')
- tdSql.error('select * from $mt where tgcol2 = 12')
+ tdLog.info('select * from ta_ch_mt6 where tgcol2 = 1 -x step62')
+ tdSql.error('select * from ta_ch_mt6 where tgcol2 = 12')
# TSIM: return -1
# TSIM: step62:
# TSIM: sql select * from $mt where tgcol3 = 1 -x step63
- tdLog.info('select * from $mt where tgcol3 = 1 -x step63')
- tdSql.error('select * from $mt where tgcol3 = 13')
+ tdLog.info('select * from ta_ch_mt6 where tgcol3 = 1 -x step63')
+ tdSql.error('select * from ta_ch_mt6 where tgcol3 = 13')
# TSIM: return -1
# TSIM: step63:
# TSIM: sql select * from $mt where tgcol4 = 1 -x step64
- tdLog.info('select * from $mt where tgcol4 = 1 -x step64')
- tdSql.error('select * from $mt where tgcol4 = 14')
+ tdLog.info('select * from ta_ch_mt6 where tgcol4 = 1 -x step64')
+ tdSql.error('select * from ta_ch_mt6 where tgcol4 = 14')
# TSIM: return -1
# TSIM: step64:
# TSIM: sql select * from $mt where tgcol5 = 1 -x step65
- tdLog.info('select * from $mt where tgcol5 = 1 -x step65')
- tdSql.error('select * from $mt where tgcol5 = 15')
+ tdLog.info('select * from ta_ch_mt6 where tgcol5 = 1 -x step65')
+ tdSql.error('select * from ta_ch_mt6 where tgcol5 = 15')
# TSIM: return -1
# TSIM: step65:
# TSIM: sql select * from $mt where tgcol6 = 1 -x step66
- tdLog.info('select * from $mt where tgcol6 = 1 -x step66')
- tdSql.error('select * from $mt where tgcol6 = 16')
+ tdLog.info('select * from ta_ch_mt6 where tgcol6 = 1 -x step66')
+ tdSql.error('select * from ta_ch_mt6 where tgcol6 = 16')
# TSIM: return -1
# TSIM: step66:
# TSIM:
# TSIM: sql select * from $mt where tgcol7 = '1'
- tdLog.info('select * from $mt where tgcol7 = '1'')
- tdSql.query('select * from $mt where tgcol7 = '1'')
+ tdLog.info("select * from ta_ch_mt6 where tgcol7 = '1'")
+ tdSql.query("select * from ta_ch_mt6 where tgcol7 = '1'")
# TSIM: print $data01 $data02 $data03
tdLog.info('$data01 $data02 $data03')
# TSIM: if $rows != 1 then
@@ -665,8 +665,8 @@ class TDTestCase:
# TSIM: endi
# TSIM:
# TSIM: sql select * from $mt where tgcol8 = 2
- tdLog.info('select * from $mt where tgcol8 = 2')
- tdSql.query('select * from $mt where tgcol8 = 2')
+ tdLog.info('select * from ta_ch_mt6 where tgcol8 = 2')
+ tdSql.query('select * from ta_ch_mt6 where tgcol8 = 2')
# TSIM: print $data01 $data02 $data03
tdLog.info('$data01 $data02 $data03')
# TSIM: if $rows != 1 then
@@ -711,8 +711,8 @@ class TDTestCase:
# TSIM: endi
# TSIM:
# TSIM: sql select * from $mt where tgcol9 = '4'
- tdLog.info('select * from $mt where tgcol9 = '4'')
- tdSql.query('select * from $mt where tgcol9 = '4'')
+ tdLog.info("select * from ta_ch_mt6 where tgcol9 = '4'")
+ tdSql.query("select * from ta_ch_mt6 where tgcol9 = '4'")
# TSIM: print $data01 $data02 $data03
tdLog.info('$data01 $data02 $data03')
# TSIM: if $rows != 1 then
@@ -757,8 +757,8 @@ class TDTestCase:
# TSIM: endi
# TSIM:
# TSIM: sql select * from $mt where tgcol10 = 5
- tdLog.info('select * from $mt where tgcol10 = 5')
- tdSql.query('select * from $mt where tgcol10 = 5')
+ tdLog.info('select * from ta_ch_mt6 where tgcol10 = 5')
+ tdSql.query('select * from ta_ch_mt6 where tgcol10 = 5')
# TSIM: print $data01 $data02 $data03
tdLog.info('$data01 $data02 $data03')
# TSIM: if $rows != 1 then
@@ -803,8 +803,8 @@ class TDTestCase:
# TSIM: endi
# TSIM:
# TSIM: sql select * from $mt where tgcol11 = '6'
- tdLog.info('select * from $mt where tgcol11 = '6'')
- tdSql.query('select * from $mt where tgcol11 = '6'')
+ tdLog.info("select * from ta_ch_mt6 where tgcol11 = '6'")
+ tdSql.query("select * from ta_ch_mt6 where tgcol11 = '6'")
# TSIM: print $data01 $data02 $data03
tdLog.info('$data01 $data02 $data03')
# TSIM: if $rows != 1 then
diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py
index ec7ac117c0..367217cd49 100644
--- a/tests/pytest/util/sql.py
+++ b/tests/pytest/util/sql.py
@@ -17,6 +17,7 @@ import time
import datetime
import inspect
from util.log import *
+import numpy as np
class TDSql:
@@ -196,5 +197,39 @@ class TDSql:
tdLog.info("sql:%s, affectedRows:%d == expect:%d" %
(self.sql, self.affectedRows, expectAffectedRows))
+ def checkColumnSorted(self, col, order):
+ frame = inspect.stack()[1]
+ callerModule = inspect.getmodule(frame[0])
+ callerFilename = callerModule.__file__
+
+ if col < 0:
+ tdLog.exit(
+ "%s failed: sql:%s, col:%d is smaller than zero" %
+ (callerFilename, self.sql, col))
+ if col > self.queryCols:
+ tdLog.exit(
+ "%s failed: sql:%s, col:%d is larger than queryCols:%d" %
+ (callerFilename, self.sql, col, self.queryCols))
+
+ matrix = np.array(self.queryResult)
+ list = matrix[:, 0]
+
+ if order == "" or order.upper() == "ASC":
+ if all(sorted(list) == list):
+ tdLog.info("sql:%s, column :%d is sorted in accending order as expected" %
+ (self.sql, col))
+ else:
+ tdLog.exit("%s failed: sql:%s, col:%d is not sorted in accesnind order" %
+ (callerFilename, self.sql, col))
+ elif order.upper() == "DESC":
+ if all(sorted(list, reverse=True) == list):
+ tdLog.info("sql:%s, column :%d is sorted in decending order as expected" %
+ (self.sql, col))
+ else:
+ tdLog.exit("%s failed: sql:%s, col:%d is not sorted in decending order" %
+ (callerFilename, self.sql, col))
+ else:
+ tdLog.exit("%s failed: sql:%s, the order provided for col:%d is not correct" %
+ (callerFilename, self.sql, col))
tdSql = TDSql()
diff --git a/tests/script/fullGeneralSuite.sim b/tests/script/fullGeneralSuite.sim
index aa32fe4008..a7d6707e7d 100644
--- a/tests/script/fullGeneralSuite.sim
+++ b/tests/script/fullGeneralSuite.sim
@@ -211,3 +211,15 @@ run general/vector/table_field.sim
run general/vector/table_mix.sim
run general/vector/table_query.sim
run general/vector/table_time.sim
+run general/stream/stream_1.sim
+run general/stream/stream_2.sim
+run general/stream/stream_3.sim
+run general/stream/stream_restart.sim
+run general/stream/table_1.sim
+run general/stream/metrics_1.sim
+run general/stream/table_n.sim
+run general/stream/metrics_n.sim
+run general/stream/table_del.sim
+run general/stream/metrics_del.sim
+run general/stream/table_replica1_vnoden.sim
+run general/stream/metrics_replica1_vnoden.sim
diff --git a/tests/script/general/alter/cached_schema_after_alter.sim b/tests/script/general/alter/cached_schema_after_alter.sim
index 921cfee3d1..d06856d5ff 100644
--- a/tests/script/general/alter/cached_schema_after_alter.sim
+++ b/tests/script/general/alter/cached_schema_after_alter.sim
@@ -53,7 +53,7 @@ endi
print ================== restart server to commit data into disk
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 2000
+sleep 5000
system sh/exec.sh -n dnode1 -s start
print ================== server restart completed
sql connect
diff --git a/tests/script/general/alter/metrics.sim b/tests/script/general/alter/metrics.sim
index b346b3b2c0..a2c5b7b465 100644
--- a/tests/script/general/alter/metrics.sim
+++ b/tests/script/general/alter/metrics.sim
@@ -375,9 +375,9 @@ endi
print ======== step9
print ======== step10
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 3000
+sleep 5000
system sh/exec.sh -n dnode1 -s start
-sleep 3000
+sleep 5000
sql use d2
sql describe tb
diff --git a/tests/script/general/alter/table.sim b/tests/script/general/alter/table.sim
index c6402b45f7..25d3b42066 100644
--- a/tests/script/general/alter/table.sim
+++ b/tests/script/general/alter/table.sim
@@ -326,9 +326,9 @@ endi
print ======== step9
print ======== step10
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 3000
+sleep 5000
system sh/exec.sh -n dnode1 -s start
-sleep 3000
+sleep 5000
sql use d1
sql describe tb
diff --git a/tests/script/general/cache/restart_metrics.sim b/tests/script/general/cache/restart_metrics.sim
index c85a66869a..18c514acbf 100644
--- a/tests/script/general/cache/restart_metrics.sim
+++ b/tests/script/general/cache/restart_metrics.sim
@@ -50,6 +50,7 @@ endi
print =============== step2
system sh/exec.sh -n dnode1 -s stop
+sleep 5000
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 0
system sh/cfg.sh -n dnode1 -c tableMetaKeepTimer -v 10
@@ -57,7 +58,8 @@ system sh/exec.sh -n dnode1 -s start
print =============== step3
print ==> sleep 8 seconds to renew cache
-sleep 8000
+sql reset query cache
+sleep 1000
print =============== step4
sql create database $db
diff --git a/tests/script/general/cache/restart_table.sim b/tests/script/general/cache/restart_table.sim
index 4e8bc92c10..c4e6c6f2ac 100644
--- a/tests/script/general/cache/restart_table.sim
+++ b/tests/script/general/cache/restart_table.sim
@@ -34,6 +34,7 @@ endi
print =============== step2
system sh/exec.sh -n dnode1 -s stop
+sleep 5000
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 0
system sh/cfg.sh -n dnode1 -c tableMetaKeepTimer -v 10
diff --git a/tests/script/general/column/commit.sim b/tests/script/general/column/commit.sim
index 035d28d3e5..c574db1aa9 100644
--- a/tests/script/general/column/commit.sim
+++ b/tests/script/general/column/commit.sim
@@ -89,9 +89,9 @@ endi
print =============== step4
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 4000
+sleep 5000
system sh/exec.sh -n dnode1 -s start
-sleep 3000
+sleep 5000
print =============== step5
diff --git a/tests/script/general/column/metrics.sim b/tests/script/general/column/metrics.sim
index 401a4ae54e..673b66c9e2 100644
--- a/tests/script/general/column/metrics.sim
+++ b/tests/script/general/column/metrics.sim
@@ -157,9 +157,9 @@ endi
print =============== step4
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 1000
+sleep 5000
system sh/exec.sh -n dnode1 -s start
-sleep 2000
+sleep 5000
print =============== step5
diff --git a/tests/script/general/column/table.sim b/tests/script/general/column/table.sim
index 8e9c44fc46..aec0dc3c75 100644
--- a/tests/script/general/column/table.sim
+++ b/tests/script/general/column/table.sim
@@ -129,9 +129,9 @@ endi
print =============== step4
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 1000
+sleep 5000
system sh/exec.sh -n dnode1 -s start
-sleep 2000
+sleep 5000
print ============== step5
diff --git a/tests/script/general/compress/commitlog.sim b/tests/script/general/compress/commitlog.sim
index 7c167db3f3..b5d653fe83 100644
--- a/tests/script/general/compress/commitlog.sim
+++ b/tests/script/general/compress/commitlog.sim
@@ -89,7 +89,7 @@ print =============== step4
system sh/exec.sh -n dnode1 -s stop -x SIGINT
sleep 5000
system sh/exec.sh -n dnode1 -s start
-sleep 3000
+sleep 5000
print =============== step5
diff --git a/tests/script/general/compress/compress.sim b/tests/script/general/compress/compress.sim
index 93fdcbaafa..6975f87996 100644
--- a/tests/script/general/compress/compress.sim
+++ b/tests/script/general/compress/compress.sim
@@ -84,7 +84,7 @@ print =============== step4
system sh/exec.sh -n dnode1 -s stop -x SIGINT
sleep 5000
system sh/exec.sh -n dnode1 -s start
-sleep 3000
+sleep 5000
print =============== step5
diff --git a/tests/script/general/compress/compress2.sim b/tests/script/general/compress/compress2.sim
index 0eb58bd94b..cf96f572ac 100644
--- a/tests/script/general/compress/compress2.sim
+++ b/tests/script/general/compress/compress2.sim
@@ -84,7 +84,7 @@ print =============== step4
system sh/exec.sh -n dnode1 -s stop -x SIGINT
sleep 5000
system sh/exec.sh -n dnode1 -s start
-sleep 3000
+sleep 5000
print =============== step5
diff --git a/tests/script/general/db/alter_tables_d2.sim b/tests/script/general/db/alter_tables_d2.sim
index 1fd24f9cec..bf950a5e49 100644
--- a/tests/script/general/db/alter_tables_d2.sim
+++ b/tests/script/general/db/alter_tables_d2.sim
@@ -211,10 +211,10 @@ endi
print ============================ step7
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
-sleep 1000
+sleep 5000
system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode2 -s start
-sleep 3000
+sleep 5000
sql reset query cache
sleep 1000
@@ -334,10 +334,10 @@ endi
print ============================ step10
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
-sleep 1000
+sleep 5000
system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode2 -s start
-sleep 3000
+sleep 5000
sql reset query cache
sleep 1000
diff --git a/tests/script/general/db/alter_tables_v1.sim b/tests/script/general/db/alter_tables_v1.sim
index 17aa746ce3..ccddb8f959 100644
--- a/tests/script/general/db/alter_tables_v1.sim
+++ b/tests/script/general/db/alter_tables_v1.sim
@@ -143,9 +143,9 @@ endi
print ============================ step7
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 1000
+sleep 5000
system sh/exec.sh -n dnode1 -s start
-sleep 3000
+sleep 5000
sql reset query cache
sleep 1000
@@ -245,9 +245,9 @@ endi
print ============================ step10
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 1000
+sleep 5000
system sh/exec.sh -n dnode1 -s start
-sleep 3000
+sleep 5000
sql reset query cache
sleep 1000
diff --git a/tests/script/general/db/alter_tables_v4.sim b/tests/script/general/db/alter_tables_v4.sim
index db00219ed0..75687b417e 100644
--- a/tests/script/general/db/alter_tables_v4.sim
+++ b/tests/script/general/db/alter_tables_v4.sim
@@ -204,9 +204,9 @@ endi
print ============================ step7
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 1000
+sleep 5000
system sh/exec.sh -n dnode1 -s start
-sleep 3000
+sleep 5000
sql reset query cache
sleep 1000
@@ -325,9 +325,9 @@ endi
print ============================ step10
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 1000
+sleep 5000
system sh/exec.sh -n dnode1 -s start
-sleep 3000
+sleep 5000
sql reset query cache
sleep 1000
diff --git a/tests/script/general/db/alter_vgroups.sim b/tests/script/general/db/alter_vgroups.sim
index 1aae7b9383..93f3b1c0e5 100644
--- a/tests/script/general/db/alter_vgroups.sim
+++ b/tests/script/general/db/alter_vgroups.sim
@@ -70,9 +70,9 @@ endi
print ============================ step3
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 2
-sleep 1000
+sleep 5000
system sh/exec.sh -n dnode1 -s start
-sleep 3000
+sleep 5000
sql create table db.t100 using db.st tags(0)
sql create table db.t101 using db.st tags(1)
@@ -132,9 +132,9 @@ print ============================ step5
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 3
-sleep 1000
+sleep 5000
system sh/exec.sh -n dnode1 -s start
-sleep 3000
+sleep 5000
sql create table db.t200 using db.st tags(0)
sql create table db.t201 using db.st tags(1)
diff --git a/tests/script/general/db/backup/keep.sim b/tests/script/general/db/backup/keep.sim
index 156b32ba4c..29771fc978 100644
--- a/tests/script/general/db/backup/keep.sim
+++ b/tests/script/general/db/backup/keep.sim
@@ -50,9 +50,9 @@ endi
print ======== step2 stop dnode
system sh/exec.sh -n dnode2 -s stop -x SIGINT
-sleep 3000
+sleep 5000
system sh/exec.sh -n dnode2 -s start
-sleep 3000
+sleep 5000
sql select * from tb
print ===> rows $rows
@@ -112,9 +112,9 @@ endi
print ======== step5 stop dnode
system sh/exec.sh -n dnode2 -s stop -x SIGINT
-sleep 3000
+sleep 5000
system sh/exec.sh -n dnode2 -s start
-sleep 3000
+sleep 5000
sql select * from tb
print ===> rows $rows
diff --git a/tests/script/general/db/delete.sim b/tests/script/general/db/delete.sim
index f95676088b..6d0090a78d 100644
--- a/tests/script/general/db/delete.sim
+++ b/tests/script/general/db/delete.sim
@@ -43,7 +43,7 @@ endi
print ======= step3
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 1000
+sleep 5000
system sh/exec.sh -n dnode1 -s start
$x = 0
diff --git a/tests/script/general/db/dropdnodes.sim b/tests/script/general/db/dropdnodes.sim
index 7ba5e7b66e..be160910c5 100644
--- a/tests/script/general/db/dropdnodes.sim
+++ b/tests/script/general/db/dropdnodes.sim
@@ -70,7 +70,7 @@ endi
print ========== step3
system sh/exec.sh -n dnode2 -s stop -x SIGINT
-sleep 500
+sleep 5000
sql drop dnode $hostname2
sleep 2000
@@ -101,4 +101,5 @@ endi
sql_error select * from db.t1
sql_error select * from db.t9
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
+system sh/exec.sh -n dnode2 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/general/import/replica1.sim b/tests/script/general/import/replica1.sim
index c212e8723e..d450b3fb49 100644
--- a/tests/script/general/import/replica1.sim
+++ b/tests/script/general/import/replica1.sim
@@ -93,7 +93,7 @@ endi
print ================== dnode restart
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 3000
+sleep 5000
system sh/exec.sh -n dnode1 -s start
sleep 5000
@@ -162,7 +162,7 @@ endi
print ================= step10
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 3000
+sleep 5000
system sh/exec.sh -n dnode1 -s start
sleep 5000
diff --git a/tests/script/general/insert/insert_drop.sim b/tests/script/general/insert/insert_drop.sim
index c688a35557..cf29656067 100644
--- a/tests/script/general/insert/insert_drop.sim
+++ b/tests/script/general/insert/insert_drop.sim
@@ -43,7 +43,7 @@ print ====== tables created
print ================== restart server to commit data into disk
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 3000
+sleep 5000
system sh/exec.sh -n dnode1 -s start
print ================== server restart completed
@@ -69,7 +69,7 @@ endw
print ================== restart server to commit data into disk
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 4000
+sleep 5000
system sh/exec.sh -n dnode1 -s start
print ================== server restart completed
diff --git a/tests/script/general/parser/auto_create_tb.sim b/tests/script/general/parser/auto_create_tb.sim
index ad65ea723f..6065daa6d3 100644
--- a/tests/script/general/parser/auto_create_tb.sim
+++ b/tests/script/general/parser/auto_create_tb.sim
@@ -208,7 +208,7 @@ endi
print ================== restart server to commit data into disk
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 2000
+sleep 5000
system sh/exec.sh -n dnode1 -s start
print ================== server restart completed
sql connect
diff --git a/tests/script/general/parser/commit.sim b/tests/script/general/parser/commit.sim
index f1c980f3db..ecf7113926 100644
--- a/tests/script/general/parser/commit.sim
+++ b/tests/script/general/parser/commit.sim
@@ -81,7 +81,7 @@ endw
print ================== restart server to commit data into disk
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 3000
+sleep 5000
system sh/exec.sh -n dnode1 -s start
sleep 3000
print ================== server restart completed
diff --git a/tests/script/general/parser/first_last.sim b/tests/script/general/parser/first_last.sim
index 48288daeca..fa2d7675d2 100644
--- a/tests/script/general/parser/first_last.sim
+++ b/tests/script/general/parser/first_last.sim
@@ -75,7 +75,7 @@ run general/parser/first_last_query.sim
print ================== restart server to commit data into disk
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 3000
+sleep 5000
system sh/exec.sh -n dnode1 -s start
print ================== server restart completed
sql connect
diff --git a/tests/script/general/parser/groupby.sim b/tests/script/general/parser/groupby.sim
index 513b3cbbbe..5d785a2fc3 100644
--- a/tests/script/general/parser/groupby.sim
+++ b/tests/script/general/parser/groupby.sim
@@ -355,7 +355,7 @@ if $data00 != 0 then
return -1
endi
-if $data01 != 800 then
+if $data11 != 800 then
return -1
endi
diff --git a/tests/script/general/parser/import_commit3.sim b/tests/script/general/parser/import_commit3.sim
index 916bf6d05e..99ece98278 100644
--- a/tests/script/general/parser/import_commit3.sim
+++ b/tests/script/general/parser/import_commit3.sim
@@ -25,7 +25,7 @@ step1:
sql create database $db cache 16
print ====== create tables
sql use $db
-
+sql reset query cache
$i = 0
$ts = $ts0
$tb = $tbPrefix . $i
diff --git a/tests/script/general/parser/interp.sim b/tests/script/general/parser/interp.sim
index 4033d1c735..1cd857162d 100644
--- a/tests/script/general/parser/interp.sim
+++ b/tests/script/general/parser/interp.sim
@@ -60,7 +60,7 @@ run general/parser/interp_test.sim
print ================== restart server to commit data into disk
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 2000
+sleep 5000
system sh/exec.sh -n dnode1 -s start
print ================== server restart completed
diff --git a/tests/script/general/parser/lastrow.sim b/tests/script/general/parser/lastrow.sim
index 29f535942d..48f6e65a4f 100644
--- a/tests/script/general/parser/lastrow.sim
+++ b/tests/script/general/parser/lastrow.sim
@@ -61,7 +61,7 @@ run general/parser/lastrow_query.sim
print ================== restart server to commit data into disk
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 3000
+sleep 5000
system sh/exec.sh -n dnode1 -s start
print ================== server restart completed
sql connect
diff --git a/tests/script/general/parser/limit.sim b/tests/script/general/parser/limit.sim
index e3e952bdd3..2129c6b82a 100644
--- a/tests/script/general/parser/limit.sim
+++ b/tests/script/general/parser/limit.sim
@@ -61,7 +61,7 @@ run general/parser/limit_stb.sim
print ================== restart server to commit data into disk
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 2000
+sleep 5000
system sh/exec.sh -n dnode1 -s start
print ================== server restart completed
sql connect
diff --git a/tests/script/general/parser/limit1.sim b/tests/script/general/parser/limit1.sim
index 48fb6aaede..84b760ced9 100644
--- a/tests/script/general/parser/limit1.sim
+++ b/tests/script/general/parser/limit1.sim
@@ -61,7 +61,7 @@ run general/parser/limit1_stb.sim
print ================== restart server to commit data into disk
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 2000
+sleep 5000
system sh/exec.sh -n dnode1 -s start
print ================== server restart completed
diff --git a/tests/script/general/parser/limit1_stb.sim b/tests/script/general/parser/limit1_stb.sim
index a1f9aabd9d..fbcd8d0965 100644
--- a/tests/script/general/parser/limit1_stb.sim
+++ b/tests/script/general/parser/limit1_stb.sim
@@ -352,7 +352,7 @@ if $data07 != nchar0 then
return -1
endi
-sql select max(c1), min(c2), avg(c3), sum(c5), spread(c6), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu and c1 > 1 and c2 < 9 and c3 > 2 and c4 < 8 and c5 > 3 and c6 < 7 and c7 > 0 and c8 like '%5' and t1 > 3 and t1 < 6 limit 1 offset 0;
+sql select max(c1), min(c2), avg(c3), sum(c5), spread(c6), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu and c1 > 1 and c2 < 9 and c3 > 2 and c4 < 8 and c5 > 3 and c6 < 7 and c7 != 0 and c8 like '%5' and t1 > 3 and t1 < 6 limit 1 offset 0;
if $rows != 1 then
return -1
endi
diff --git a/tests/script/general/parser/limit1_tblocks100.sim b/tests/script/general/parser/limit1_tblocks100.sim
index 126c3227a7..99ffd31f6d 100644
--- a/tests/script/general/parser/limit1_tblocks100.sim
+++ b/tests/script/general/parser/limit1_tblocks100.sim
@@ -61,7 +61,7 @@ run general/parser/limit1_stb.sim
print ================== restart server to commit data into disk
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 2000
+sleep 5000
system sh/exec.sh -n dnode1 -s start
print ================== server restart completed
diff --git a/tests/script/general/parser/limit2.sim b/tests/script/general/parser/limit2.sim
index 5f71232585..d16ee29cf7 100644
--- a/tests/script/general/parser/limit2.sim
+++ b/tests/script/general/parser/limit2.sim
@@ -71,7 +71,7 @@ print ================== restart server to commit data into disk
system sh/exec.sh -n dnode1 -s stop -x SIGINT
return
-sleep 3000
+sleep 5000
system sh/exec.sh -n dnode1 -s start
print ================== server restart completed
diff --git a/tests/script/general/parser/nchar.sim b/tests/script/general/parser/nchar.sim
index 2c86748f21..c0de6f5058 100644
--- a/tests/script/general/parser/nchar.sim
+++ b/tests/script/general/parser/nchar.sim
@@ -198,15 +198,15 @@ if $rows != 100 then
return -1
endi
-sql select * from $mt where tgcol > '0'
-#print rows = $rows
-if $rows != 100 then
- return -1
-endi
-#print $data03
-if $data03 != 1 then
- return -1
-endi
+#sql select * from $mt where tgcol > '0'
+##print rows = $rows
+#if $rows != 100 then
+# return -1
+#endi
+##print $data03
+#if $data03 != 1 then
+# return -1
+#endi
# cumulative query with nchar tag filtering
sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tgcol = '1'
diff --git a/tests/script/general/parser/select_from_cache_disk.sim b/tests/script/general/parser/select_from_cache_disk.sim
index 9ffdf817d7..629d456761 100644
--- a/tests/script/general/parser/select_from_cache_disk.sim
+++ b/tests/script/general/parser/select_from_cache_disk.sim
@@ -36,7 +36,7 @@ sql insert into $tb values ('2018-09-17 09:00:00.030', 3)
print ================== restart server to commit data into disk
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 3000
+sleep 5000
system sh/exec.sh -n dnode1 -s start
print ================== server restart completed
sql connect
diff --git a/tests/script/general/parser/single_row_in_tb.sim b/tests/script/general/parser/single_row_in_tb.sim
index 200047140e..2313a98b41 100644
--- a/tests/script/general/parser/single_row_in_tb.sim
+++ b/tests/script/general/parser/single_row_in_tb.sim
@@ -31,7 +31,7 @@ run general/parser/single_row_in_tb_query.sim
print ================== restart server to commit data into disk
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 2000
+sleep 5000
system sh/exec.sh -n dnode1 -s start
print ================== server restart completed
diff --git a/tests/script/general/parser/slimit.sim b/tests/script/general/parser/slimit.sim
index be63f91803..3d28ac3d54 100644
--- a/tests/script/general/parser/slimit.sim
+++ b/tests/script/general/parser/slimit.sim
@@ -98,7 +98,7 @@ run general/parser/slimit_query.sim
print ================== restart server to commit data into disk
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 3000
+sleep 5000
system sh/exec.sh -n dnode1 -s start
print ================== server restart completed
sql connect
diff --git a/tests/script/general/parser/slimit1.sim b/tests/script/general/parser/slimit1.sim
index b987a6852b..05e34bc7f4 100644
--- a/tests/script/general/parser/slimit1.sim
+++ b/tests/script/general/parser/slimit1.sim
@@ -57,7 +57,7 @@ run general/parser/slimit1_query.sim
print ================== restart server to commit data into disk
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 3000
+sleep 5000
system sh/exec.sh -n dnode1 -s start
print ================== server restart completed
sql connect
diff --git a/tests/script/general/parser/slimit_alter_tags.sim b/tests/script/general/parser/slimit_alter_tags.sim
index b8397f950c..54bc868f6a 100644
--- a/tests/script/general/parser/slimit_alter_tags.sim
+++ b/tests/script/general/parser/slimit_alter_tags.sim
@@ -170,7 +170,7 @@ endi
print ================== restart server to commit data into disk
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 2000
+sleep 5000
system sh/exec.sh -n dnode1 -s start
print ================== server restart completed
sql connect
diff --git a/tests/script/general/parser/tbnameIn.sim b/tests/script/general/parser/tbnameIn.sim
index 16302888d0..fd5f32972a 100644
--- a/tests/script/general/parser/tbnameIn.sim
+++ b/tests/script/general/parser/tbnameIn.sim
@@ -68,7 +68,7 @@ run general/parser/tbnameIn_query.sim
print ================== restart server to commit data into disk
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 2000
+sleep 5000
system sh/exec.sh -n dnode1 -s start
print ================== server restart completed
diff --git a/tests/script/general/parser/testSuite.sim b/tests/script/general/parser/testSuite.sim
index 4a3da2f2a0..67ef952c41 100644
--- a/tests/script/general/parser/testSuite.sim
+++ b/tests/script/general/parser/testSuite.sim
@@ -40,42 +40,42 @@
#run general/parser/nchar.sim
#sleep 2000
##run general/parser/null_char.sim
-#sleep 2000
-#run general/parser/single_row_in_tb.sim
-#sleep 2000
-#run general/parser/select_from_cache_disk.sim
-#sleep 2000
-#run general/parser/selectResNum.sim
-#sleep 2000
-#run general/parser/mixed_blocks.sim
-#sleep 2000
-#run general/parser/limit1.sim
-#sleep 2000
-#run general/parser/limit.sim
-#sleep 2000
-#run general/parser/limit1_tblocks100.sim
-#sleep 2000
-#run general/parser/select_across_vnodes.sim
-#sleep 2000
-#run general/parser/slimit1.sim
-#sleep 2000
-#run general/parser/tbnameIn.sim
-#sleep 2000
+sleep 2000
+run general/parser/single_row_in_tb.sim
+sleep 2000
+run general/parser/select_from_cache_disk.sim
+sleep 2000
+run general/parser/selectResNum.sim
+sleep 2000
+run general/parser/mixed_blocks.sim
+sleep 2000
+run general/parser/limit1.sim
+sleep 2000
+run general/parser/limit.sim
+sleep 2000
+run general/parser/limit1_tblocks100.sim
+sleep 2000
+run general/parser/select_across_vnodes.sim
+sleep 2000
+run general/parser/slimit1.sim
+sleep 2000
+run general/parser/tbnameIn.sim
+sleep 2000
run general/parser/projection_limit_offset.sim
sleep 2000
run general/parser/limit2.sim
sleep 2000
-#run general/parser/fill.sim
-#sleep 2000
-#run general/parser/fill_stb.sim
-#sleep 2000
-#run general/parser/where.sim
-#sleep 2000
-#run general/parser/slimit.sim
-#sleep 2000
-#run general/parser/select_with_tags.sim
-#sleep 2000
-#run general/parser/interp.sim
+run general/parser/fill.sim
+sleep 2000
+run general/parser/fill_stb.sim
+sleep 2000
+run general/parser/where.sim
+sleep 2000
+run general/parser/slimit.sim
+sleep 2000
+run general/parser/select_with_tags.sim
+sleep 2000
+run general/parser/interp.sim
sleep 2000
run general/parser/tags_dynamically_specifiy.sim
sleep 2000
diff --git a/tests/script/general/stable/disk.sim b/tests/script/general/stable/disk.sim
index 0865719a14..c7ddfb0591 100644
--- a/tests/script/general/stable/disk.sim
+++ b/tests/script/general/stable/disk.sim
@@ -58,7 +58,7 @@ endi
sleep 1000
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 3000
+sleep 5000
system sh/exec.sh -n dnode1 -s start
sleep 6000
diff --git a/tests/script/general/stable/dnode3.sim b/tests/script/general/stable/dnode3.sim
index 38d05a924b..7e85641598 100644
--- a/tests/script/general/stable/dnode3.sim
+++ b/tests/script/general/stable/dnode3.sim
@@ -213,11 +213,4 @@ if $rows != 0 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
-system sh/exec.sh -n dnode2 -s stop -x SIGINT
-system sh/exec.sh -n dnode3 -s stop -x SIGINT
-system sh/exec.sh -n dnode4 -s stop -x SIGINT
-system sh/exec.sh -n dnode5 -s stop -x SIGINT
-system sh/exec.sh -n dnode6 -s stop -x SIGINT
-system sh/exec.sh -n dnode7 -s stop -x SIGINT
-system sh/exec.sh -n dnode8 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/general/tag/commit.sim b/tests/script/general/tag/commit.sim
index 94dc13f0cf..fca94ee7d4 100644
--- a/tests/script/general/tag/commit.sim
+++ b/tests/script/general/tag/commit.sim
@@ -824,7 +824,7 @@ endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
sleep 5000
system sh/exec.sh -n dnode1 -s start
-sleep 3000
+sleep 5000
print =============== step1
$i = 0
diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt
index 191e3212b6..084109623f 100644
--- a/tests/script/jenkins/basic.txt
+++ b/tests/script/jenkins/basic.txt
@@ -159,6 +159,21 @@ cd ../../../debug; make
./test.sh -f general/stable/values.sim
./test.sh -f general/stable/vnode3.sim
+#./test.sh -f general/stream/metrics_1.sim
+#./test.sh -f general/stream/metrics_del.sim
+#./test.sh -f general/stream/metrics_n.sim
+#./test.sh -f general/stream/metrics_replica1_vnoden.sim
+#./test.sh -f general/stream/new_stream.sim
+#./test.sh -f general/stream/restart_stream.sim
+#./test.sh -f general/stream/stream_1.sim
+#./test.sh -f general/stream/stream_2.sim
+#./test.sh -f general/stream/stream_3.sim
+#./test.sh -f general/stream/stream_restart.sim
+#./test.sh -f general/stream/table_1.sim
+#./test.sh -f general/stream/table_del.sim
+#./test.sh -f general/stream/table_n.sim
+#./test.sh -f general/stream/table_replica1_vnoden.sim
+
./test.sh -f general/table/autocreate.sim
./test.sh -f general/table/basic1.sim
./test.sh -f general/table/basic2.sim
diff --git a/tests/script/regressionSuite.sim b/tests/script/regressionSuite.sim
index 67f54523df..6ef4001495 100644
--- a/tests/script/regressionSuite.sim
+++ b/tests/script/regressionSuite.sim
@@ -71,8 +71,8 @@ run general/http/restful_limit.sim
run general/http/restful_full.sim
run general/http/prepare.sim
run general/http/telegraf.sim
-# run general/http/grafana_bug.sim
-# run general/http/grafana.sim
+run general/http/grafana_bug.sim
+run general/http/grafana.sim
run general/import/basic.sim
run general/import/commit.sim
run general/import/large.sim
@@ -102,32 +102,32 @@ run general/parser/import_commit1.sim
run general/parser/import_commit2.sim
run general/parser/import_commit3.sim
run general/parser/insert_tb.sim
-# run general/parser/first_last.sim
+run general/parser/first_last.sim
#unsupport run general/parser/import_file.sim
-# run general/parser/lastrow.sim
+run general/parser/lastrow.sim
run general/parser/nchar.sim
#unsupport run general/parser/null_char.sim
-# run general/parser/single_row_in_tb.sim
+run general/parser/single_row_in_tb.sim
run general/parser/select_from_cache_disk.sim
-# run general/parser/limit.sim
+run general/parser/limit.sim
# run general/parser/limit1.sim
-# run general/parser/limit1_tblocks100.sim
-# run general/parser/mixed_blocks.sim
+run general/parser/limit1_tblocks100.sim
+run general/parser/mixed_blocks.sim
# run general/parser/selectResNum.sim
run general/parser/select_across_vnodes.sim
run general/parser/slimit1.sim
run general/parser/tbnameIn.sim
run general/parser/binary_escapeCharacter.sim
-# run general/parser/projection_limit_offset.sim
+run general/parser/projection_limit_offset.sim
run general/parser/limit2.sim
-# run general/parser/slimit.sim
+run general/parser/slimit.sim
run general/parser/fill.sim
-# run general/parser/fill_stb.sim
+run general/parser/fill_stb.sim
# run general/parser/interp.sim
# run general/parser/where.sim
#unsupport run general/parser/join.sim
#unsupport run general/parser/join_multivnode.sim
-# run general/parser/select_with_tags.sim
+run general/parser/select_with_tags.sim
#unsupport run general/parser/groupby.sim
#unsupport run general/parser/bug.sim
#unsupport run general/parser/tags_dynamically_specifiy.sim
@@ -142,7 +142,7 @@ run general/stable/dnode3.sim
run general/stable/metrics.sim
run general/stable/values.sim
run general/stable/vnode3.sim
-# run general/table/autocreate.sim
+run general/table/autocreate.sim
run general/table/basic1.sim
run general/table/basic2.sim
run general/table/basic3.sim
@@ -166,7 +166,7 @@ run general/table/int.sim
run general/table/limit.sim
run general/table/smallint.sim
run general/table/table_len.sim
-# run general/table/table.sim
+run general/table/table.sim
run general/table/tinyint.sim
run general/table/vgroup.sim
run general/tag/3.sim
@@ -211,3 +211,15 @@ run general/vector/table_field.sim
run general/vector/table_mix.sim
run general/vector/table_query.sim
run general/vector/table_time.sim
+run general/stream/stream_1.sim
+run general/stream/stream_2.sim
+run general/stream/stream_3.sim
+run general/stream/stream_restart.sim
+run general/stream/table_1.sim
+run general/stream/metrics_1.sim
+run general/stream/table_n.sim
+run general/stream/metrics_n.sim
+run general/stream/table_del.sim
+run general/stream/metrics_del.sim
+run general/stream/table_replica1_vnoden.sim
+run general/stream/metrics_replica1_vnoden.sim
diff --git a/tests/script/unique/arbitrator/dn3_mn1_full_createTableFail.sim b/tests/script/unique/arbitrator/dn3_mn1_full_createTableFail.sim
new file mode 100644
index 0000000000..7bcde02ba8
--- /dev/null
+++ b/tests/script/unique/arbitrator/dn3_mn1_full_createTableFail.sim
@@ -0,0 +1,105 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/deploy.sh -n dnode3 -i 3
+system sh/deploy.sh -n dnode4 -i 4
+
+system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
+
+system sh/cfg.sh -n dnode1 -c walLevel -v 2
+system sh/cfg.sh -n dnode2 -c walLevel -v 2
+system sh/cfg.sh -n dnode3 -c walLevel -v 2
+system sh/cfg.sh -n dnode4 -c walLevel -v 2
+
+system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
+
+system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
+
+system sh/cfg.sh -n dnode1 -c alternativeRole -v 1
+system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
+
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 4
+
+system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
+
+print ============== step0: start tarbitrator
+system sh/exec_tarbitrator.sh -s start
+
+print ============== step1: start dnode1, only deploy mnode
+system sh/exec.sh -n dnode1 -s start
+sleep 3000
+sql connect
+
+print ============== step2: start dnode2/dnode3/dnode4 and add into cluster , then create database with replica 3, and create table to max tables
+system sh/exec.sh -n dnode2 -s start
+system sh/exec.sh -n dnode3 -s start
+system sh/exec.sh -n dnode4 -s start
+sql create dnode $hostname2
+sql create dnode $hostname3
+sql create dnode $hostname4
+sleep 3000
+
+$totalTableNum = 16
+$sleepTimer = 3000
+
+$db = db
+sql create database $db replica 3
+sql use $db
+
+# create table , insert data
+$stb = stb
+sql create table $stb (ts timestamp, c1 int) tags(t1 int)
+$rowNum = 10
+$tblNum = $totalTableNum
+$totalRows = 0
+$tsStart = 1420041600000
+
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql create table $tb using $stb tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x )
+ $x = $x + 10
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+
+print ============== step3: create one table, should return error for not more vnode
+sql_error create table tbm using $stb tags( 10000 )
+
+
+
+
+
+
+
+
diff --git a/tests/script/unique/arbitrator/dn3_mn1_full_dropDnodeFail.sim b/tests/script/unique/arbitrator/dn3_mn1_full_dropDnodeFail.sim
new file mode 100644
index 0000000000..97c2f02427
--- /dev/null
+++ b/tests/script/unique/arbitrator/dn3_mn1_full_dropDnodeFail.sim
@@ -0,0 +1,105 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/deploy.sh -n dnode3 -i 3
+system sh/deploy.sh -n dnode4 -i 4
+
+system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
+
+system sh/cfg.sh -n dnode1 -c walLevel -v 2
+system sh/cfg.sh -n dnode2 -c walLevel -v 2
+system sh/cfg.sh -n dnode3 -c walLevel -v 2
+system sh/cfg.sh -n dnode4 -c walLevel -v 2
+
+system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
+
+system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
+
+system sh/cfg.sh -n dnode1 -c alternativeRole -v 1
+system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
+
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 4
+
+system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
+
+print ============== step0: start tarbitrator
+system sh/exec_tarbitrator.sh -s start
+
+print ============== step1: start dnode1, only deploy mnode
+system sh/exec.sh -n dnode1 -s start
+sleep 3000
+sql connect
+
+print ============== step2: start dnode2/dnode3/dnode4 and add into cluster , then create database with replica 3, and create table to max tables
+system sh/exec.sh -n dnode2 -s start
+system sh/exec.sh -n dnode3 -s start
+system sh/exec.sh -n dnode4 -s start
+sql create dnode $hostname2
+sql create dnode $hostname3
+sql create dnode $hostname4
+sleep 3000
+
+$totalTableNum = 16
+$sleepTimer = 3000
+
+$db = db
+sql create database $db replica 3
+sql use $db
+
+# create table , insert data
+$stb = stb
+sql create table $stb (ts timestamp, c1 int) tags(t1 int)
+$rowNum = 10
+$tblNum = $totalTableNum
+$totalRows = 0
+$tsStart = 1420041600000
+
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql create table $tb using $stb tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x )
+ $x = $x + 10
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+
+print ============== step3: drop dnode4, should return error for not more dnode
+sql_error drop dnode $hostname4
+
+
+
+
+
+
+
+
diff --git a/tests/script/unique/arbitrator/dn3_mn1_multiCreateDropTable.sim b/tests/script/unique/arbitrator/dn3_mn1_multiCreateDropTable.sim
new file mode 100644
index 0000000000..67164908c7
--- /dev/null
+++ b/tests/script/unique/arbitrator/dn3_mn1_multiCreateDropTable.sim
@@ -0,0 +1,309 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/deploy.sh -n dnode3 -i 3
+system sh/deploy.sh -n dnode4 -i 4
+
+system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
+
+system sh/cfg.sh -n dnode1 -c walLevel -v 2
+system sh/cfg.sh -n dnode2 -c walLevel -v 2
+system sh/cfg.sh -n dnode3 -c walLevel -v 2
+system sh/cfg.sh -n dnode4 -c walLevel -v 2
+
+system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
+
+system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
+
+system sh/cfg.sh -n dnode1 -c alternativeRole -v 1
+system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
+
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 4
+
+system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
+
+print ============== step0: start tarbitrator
+system sh/exec_tarbitrator.sh -s start
+
+print ============== step1: start dnode1, only deploy mnode
+system sh/exec.sh -n dnode1 -s start
+sleep 3000
+sql connect
+
+print ============== step2: start dnode2/dnode3/dnode4 and add into cluster , then create database with replica 3, and create table, insert data
+system sh/exec.sh -n dnode2 -s start
+system sh/exec.sh -n dnode3 -s start
+system sh/exec.sh -n dnode4 -s start
+sql create dnode $hostname2
+sql create dnode $hostname3
+sql create dnode $hostname4
+sleep 3000
+
+$totalTableNum = 10
+$sleepTimer = 3000
+
+$db = db
+sql create database $db replica 3 maxTables $totalTableNum
+sql use $db
+
+# create table , insert data
+$stb = stb
+sql create table $stb (ts timestamp, c1 int) tags(t1 int)
+$rowNum = 10
+$tblNum = $totalTableNum
+$totalRows = 0
+$tsStart = 1420041600000
+
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql create table $tb using $stb tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x )
+ $x = $x + 10
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+print ============== step3: drop the middle table 5
+sql drop table tb5
+$totalRows = $totalRows - 10
+sleep 6000
+
+print ============== step4: insert data into other tables
+$tsStart = 1420041610000
+$i = 0
+$tblNum = 5
+while $i < $tblNum
+ $tb = tb . $i
+ #sql create table $tb using $stb tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x )
+ $x = $x + 10
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+
+$i = 6
+$tblNum = 10
+while $i < $tblNum
+ $tb = tb . $i
+ #sql create table $tb using $stb tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x )
+ $x = $x + 10
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+
+print ============== step5: create the middle table 5 and insert data
+sql create table tb5 using $stb tags( 5 )
+sleep 3000
+
+$tsStart = 1420041620000
+$i = 5
+$tblNum = 6
+while $i < $tblNum
+ $tb = tb . $i
+ #sql create table $tb using $stb tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x )
+ $x = $x + 10
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+sql select count(*) from tb5
+print data00 $data00
+if $data00 != 10 then
+ return -1
+endi
+
+print ============== step6: drop the first table 0
+sql drop table tb0
+$totalRows = $totalRows - 20
+sleep 6000
+
+print ============== step7: insert data into other tables
+$tsStart = 1420041630000
+$i = 1
+$tblNum = 10
+while $i < $tblNum
+ $tb = tb . $i
+ #sql create table $tb using $stb tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x )
+ $x = $x + 10
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+sql select count(*) from tb5
+print data00 $data00
+if $data00 != 20 then
+ return -1
+endi
+
+print ============== step8: create the first table 0 and insert data
+sql create table tb0 using $stb tags( 0 )
+sleep 3000
+
+$tsStart = 1420041640000
+$i = 0
+$tblNum = 10
+while $i < $tblNum
+ $tb = tb . $i
+ #sql create table $tb using $stb tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x )
+ $x = $x + 10
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+sql select count(*) from tb0
+print data00 $data00
+if $data00 != 10 then
+ return -1
+endi
+
+print ============== step9: drop the last table 9
+sql drop table tb9
+$totalRows = $totalRows - 40
+sleep 6000
+
+print ============== step10: insert data into other tables
+$tsStart = 1420041650000
+$i = 0
+$tblNum = 9
+while $i < $tblNum
+ $tb = tb . $i
+ #sql create table $tb using $stb tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x )
+ $x = $x + 10
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+
+print ============== step11: create the last table 9 and insert data
+sql create table tb9 using $stb tags( 9 )
+sleep 3000
+
+$tsStart = 1420041660000
+$i = 0
+$tblNum = 10
+while $i < $tblNum
+ $tb = tb . $i
+ #sql create table $tb using $stb tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x )
+ $x = $x + 10
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+sql select count(*) from tb9
+print data00 $data00
+if $data00 != 10 then
+ return -1
+endi
diff --git a/tests/script/unique/arbitrator/dn3_mn1_nw_disable_timeout_autoDropDnode.sim b/tests/script/unique/arbitrator/dn3_mn1_nw_disable_timeout_autoDropDnode.sim
new file mode 100644
index 0000000000..71e606e529
--- /dev/null
+++ b/tests/script/unique/arbitrator/dn3_mn1_nw_disable_timeout_autoDropDnode.sim
@@ -0,0 +1,168 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/deploy.sh -n dnode3 -i 3
+system sh/deploy.sh -n dnode4 -i 4
+system sh/deploy.sh -n dnode5 -i 5
+
+system sh/cfg.sh -n dnode1 -c numOfMPeers -v 1
+system sh/cfg.sh -n dnode2 -c numOfMPeers -v 1
+system sh/cfg.sh -n dnode3 -c numOfMPeers -v 1
+system sh/cfg.sh -n dnode4 -c numOfMPeers -v 1
+system sh/cfg.sh -n dnode5 -c numOfMPeers -v 1
+
+system sh/cfg.sh -n dnode1 -c walLevel -v 1
+system sh/cfg.sh -n dnode2 -c walLevel -v 1
+system sh/cfg.sh -n dnode3 -c walLevel -v 1
+system sh/cfg.sh -n dnode4 -c walLevel -v 1
+system sh/cfg.sh -n dnode5 -c walLevel -v 1
+
+system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode5 -c balanceInterval -v 10
+
+system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode5 -c numOfTotalVnodes -v 4
+
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 4
+
+system sh/cfg.sh -n dnode1 -c alternativeRole -v 1
+system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
+
+system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode4 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode5 -c arbitrator -v $arbitrator
+
+system sh/cfg.sh -n dnode1 -c offlineThreshold -v 10
+system sh/cfg.sh -n dnode2 -c offlineThreshold -v 10
+system sh/cfg.sh -n dnode3 -c offlineThreshold -v 10
+system sh/cfg.sh -n dnode4 -c offlineThreshold -v 10
+system sh/cfg.sh -n dnode5 -c offlineThreshold -v 10
+
+system sh/cfg.sh -n dnode1 -c enableCoreFile -v 1
+system sh/cfg.sh -n dnode2 -c enableCoreFile -v 1
+system sh/cfg.sh -n dnode3 -c enableCoreFile -v 1
+system sh/cfg.sh -n dnode4 -c enableCoreFile -v 1
+system sh/cfg.sh -n dnode5 -c enableCoreFile -v 1
+
+print ============== step0: start tarbitrator
+system sh/exec_tarbitrator.sh -s start
+
+print ============== step1: start dnode1, only deploy mnode
+system sh/exec.sh -n dnode1 -s start
+sleep 3000
+sql connect
+
+print ============== step2: start dnode2/dnode3 and add into cluster, then create database, create table , and insert data
+system sh/exec.sh -n dnode2 -s start
+system sh/exec.sh -n dnode3 -s start
+system sh/exec.sh -n dnode4 -s start
+sql create dnode $hostname2
+sql create dnode $hostname3
+sql create dnode $hostname4
+sleep 3000
+
+$rowNum = 10
+$tblNum = 16
+$totalRows = 0
+$tsStart = 1420041600000
+
+$db = db
+sql create database $db replica 2
+sql use $db
+
+# create table , insert data
+$stb = stb
+sql create table $stb (ts timestamp, c1 int) tags(t1 int)
+
+
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql create table $tb using $stb tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x )
+ $x = $x + 10
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+
+print info: select count(*) from $stb
+sleep 1000
+sql reset query cache
+sleep 1000
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+sql show dnodes
+if $rows != 4 then
+ return -1
+endi
+
+print ============== step3: stop dnode4, after timerout dnode4 will be auto-dropped.
+### The script using down port to stop the network is invalid, so temp it stops the service instead
+system sh/exec.sh -n dnode4 -s stop
+#system sh/port.sh -p 7400 -s down
+sleep 12000
+
+wait_dnode4_dropped:
+sql show dnodes
+if $rows != 3 then
+ sleep 2000
+ goto wait_dnode4_dropped
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
+print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
+#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
+#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
+#$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+$dnode3Status = $data4_3
+$dnode4Status = $data4_4
+#$dnode5Status = $data4_5
+
+if $dnode2Status != ready then
+ sleep 2000
+ goto wait_dnode4_dropped
+endi
+if $dnode3Status != ready then
+ sleep 2000
+ goto wait_dnode4_dropped
+endi
+
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+print ============== step4: restart dnode4, but there no dnode4 in cluster
+system sh/exec.sh -n dnode4 -s start
+sql show dnodes
+if $rows != 3 then
+ return -1
+endi
+
diff --git a/tests/script/unique/arbitrator/dn3_mn1_replica_change.sim b/tests/script/unique/arbitrator/dn3_mn1_replica_change.sim
index 09654ca8a9..1652470346 100644
--- a/tests/script/unique/arbitrator/dn3_mn1_replica_change.sim
+++ b/tests/script/unique/arbitrator/dn3_mn1_replica_change.sim
@@ -51,7 +51,7 @@ system sh/exec.sh -n dnode2 -s start
sql create dnode $hostname2
sleep 3000
-$totalTableNum = 10000
+$totalTableNum = 10
$sleepTimer = 10000
$db = db
@@ -192,7 +192,7 @@ if $data00 != $totalRows then
endi
-print ============== step4: stop dnode2 for checking if sync success
+print ============== step4: stop dnode2 for checking if sync ok
system sh/exec.sh -n dnode2 -s stop
sleep $sleepTimer
diff --git a/tests/script/unique/arbitrator/dn3_mn1_replica_change_dropDnod.sim b/tests/script/unique/arbitrator/dn3_mn1_replica_change_dropDnod.sim
new file mode 100644
index 0000000000..b870d07c1d
--- /dev/null
+++ b/tests/script/unique/arbitrator/dn3_mn1_replica_change_dropDnod.sim
@@ -0,0 +1,135 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/deploy.sh -n dnode3 -i 3
+system sh/deploy.sh -n dnode4 -i 4
+
+system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
+
+system sh/cfg.sh -n dnode1 -c walLevel -v 2
+system sh/cfg.sh -n dnode2 -c walLevel -v 2
+system sh/cfg.sh -n dnode3 -c walLevel -v 2
+system sh/cfg.sh -n dnode4 -c walLevel -v 2
+
+system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
+
+system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
+
+system sh/cfg.sh -n dnode1 -c alternativeRole -v 1
+system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
+
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 4
+
+system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
+
+print ============== step0: start tarbitrator
+system sh/exec_tarbitrator.sh -s start
+
+print ============== step1: start dnode1, only deploy mnode
+system sh/exec.sh -n dnode1 -s start
+sleep 3000
+sql connect
+
+print ============== step2: start dnode2/dnode3/dnode4 and add into cluster , then create database with replica 2, and create table, insert data
+system sh/exec.sh -n dnode2 -s start
+system sh/exec.sh -n dnode3 -s start
+system sh/exec.sh -n dnode4 -s start
+sql create dnode $hostname2
+sql create dnode $hostname3
+sql create dnode $hostname4
+sleep 3000
+
+$totalTableNum = 10
+$sleepTimer = 10000
+
+$db = db
+sql create database $db replica 2 maxTables $totalTableNum
+sql use $db
+
+# create table , insert data
+$stb = stb
+sql create table $stb (ts timestamp, c1 int) tags(t1 int)
+$rowNum = 100
+$tblNum = $totalTableNum
+$totalRows = 0
+$tsStart = 1420041600000
+
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql create table $tb using $stb tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
+ $x = $x + 60
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+
+sql select count(*) from $stb
+sleep 1000
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+print ============== step3: drop dnode4, then check rows
+#system sh/exec.sh -n dnode4 -s stop -x SIGINT
+sql drop dnode $hostname4
+sleep $sleepTimer
+
+wait_dnode4_dropped:
+sql show dnodes
+if $rows != 3 then
+ sleep 2000
+ goto wait_dnode4_dropped
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
+#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
+#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
+#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
+#$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+$dnode3Status = $data4_3
+$dnode4Status = $data4_4
+#$dnode5Status = $data4_5
+
+if $dnode2Status != ready then
+ sleep 2000
+ goto wait_dnode4_dropped
+endi
+if $dnode3Status != ready then
+ sleep 2000
+ goto wait_dnode4_dropped
+endi
+
+sql reset query cache
+sql select count(*) from $stb
+sleep 1000
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
diff --git a/tests/script/unique/arbitrator/dn3_mn1_stopDnode_timeout.sim b/tests/script/unique/arbitrator/dn3_mn1_stopDnode_timeout.sim
new file mode 100644
index 0000000000..1b0a184690
--- /dev/null
+++ b/tests/script/unique/arbitrator/dn3_mn1_stopDnode_timeout.sim
@@ -0,0 +1,224 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/deploy.sh -n dnode3 -i 3
+system sh/deploy.sh -n dnode4 -i 4
+system sh/deploy.sh -n dnode5 -i 5
+
+system sh/cfg.sh -n dnode1 -c numOfMPeers -v 1
+system sh/cfg.sh -n dnode2 -c numOfMPeers -v 1
+system sh/cfg.sh -n dnode3 -c numOfMPeers -v 1
+system sh/cfg.sh -n dnode4 -c numOfMPeers -v 1
+system sh/cfg.sh -n dnode5 -c numOfMPeers -v 1
+
+system sh/cfg.sh -n dnode1 -c walLevel -v 1
+system sh/cfg.sh -n dnode2 -c walLevel -v 1
+system sh/cfg.sh -n dnode3 -c walLevel -v 1
+system sh/cfg.sh -n dnode4 -c walLevel -v 1
+system sh/cfg.sh -n dnode5 -c walLevel -v 1
+
+system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode5 -c balanceInterval -v 10
+
+system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode5 -c numOfTotalVnodes -v 4
+
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 4
+
+system sh/cfg.sh -n dnode1 -c alternativeRole -v 1
+system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
+
+system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode4 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode5 -c arbitrator -v $arbitrator
+
+system sh/cfg.sh -n dnode1 -c offlineThreshold -v 10
+system sh/cfg.sh -n dnode2 -c offlineThreshold -v 10
+system sh/cfg.sh -n dnode3 -c offlineThreshold -v 10
+system sh/cfg.sh -n dnode4 -c offlineThreshold -v 10
+system sh/cfg.sh -n dnode5 -c offlineThreshold -v 10
+
+system sh/cfg.sh -n dnode1 -c enableCoreFile -v 1
+system sh/cfg.sh -n dnode2 -c enableCoreFile -v 1
+system sh/cfg.sh -n dnode3 -c enableCoreFile -v 1
+system sh/cfg.sh -n dnode4 -c enableCoreFile -v 1
+system sh/cfg.sh -n dnode5 -c enableCoreFile -v 1
+
+print ============== step0: start tarbitrator
+system sh/exec_tarbitrator.sh -s start
+
+print ============== step1: start dnode1, only deploy mnode
+system sh/exec.sh -n dnode1 -s start
+sleep 3000
+sql connect
+
+print ============== step2: start dnode2/dnode3 and add into cluster, then create database, create table , and insert data
+system sh/exec.sh -n dnode2 -s start
+system sh/exec.sh -n dnode3 -s start
+system sh/exec.sh -n dnode4 -s start
+sql create dnode $hostname2
+sql create dnode $hostname3
+sql create dnode $hostname4
+sleep 3000
+
+$rowNum = 10
+$tblNum = 16
+$totalRows = 0
+$tsStart = 1420041600000
+
+$db = db
+sql create database $db replica 2
+sql use $db
+
+# create table , insert data
+$stb = stb
+sql create table $stb (ts timestamp, c1 int) tags(t1 int)
+
+
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql create table $tb using $stb tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x )
+ $x = $x + 10
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+
+print info: select count(*) from $stb
+sleep 1000
+sql reset query cache
+sleep 1000
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+sql show dnodes
+if $rows != 4 then
+ return -1
+endi
+
+print ============== step3: stop dnode4, after timerout dnode4 will be auto-dropped.
+system sh/exec.sh -n dnode4 -s stop
+sleep 12000
+
+wait_dnode4_dropped:
+sql show dnodes
+if $rows != 3 then
+ sleep 2000
+ goto wait_dnode4_dropped
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
+print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
+#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
+#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
+#$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+$dnode3Status = $data4_3
+$dnode4Status = $data4_4
+#$dnode5Status = $data4_5
+
+if $dnode2Status != ready then
+ sleep 2000
+ goto wait_dnode4_dropped
+endi
+if $dnode3Status != ready then
+ sleep 2000
+ goto wait_dnode4_dropped
+endi
+
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+print ============== step4: restart dnode4, but there are not dnode4 in cluster
+system sh/exec.sh -n dnode4 -s start
+sleep 3000
+sql show dnodes
+if $rows != 3 then
+ return -1
+endi
+
+print ============== step5: recreate dnode4 into cluster, result should fail
+sql create dnode $hostname4
+sleep 12000
+sql show dnodes
+if $rows != 3 then
+ return -1
+endi
+
+print ============== step5: remove dnode4 director, then recreate dnode4 into cluster, result should ok
+system sh/exec.sh -n dnode4 -s stop
+system rm -rf ../../../sim/dnode4
+
+system sh/deploy.sh -n dnode4 -i 4
+system sh/cfg.sh -n dnode4 -c numOfMPeers -v 1
+system sh/cfg.sh -n dnode4 -c walLevel -v 1
+system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode4 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode4 -c offlineThreshold -v 10
+system sh/cfg.sh -n dnode4 -c enableCoreFile -v 1
+
+system sh/exec.sh -n dnode4 -s start
+sql create dnode $hostname4
+sleep 6000
+
+wait_dnode4_ready:
+sql show dnodes
+if $rows != 4 then
+ sleep 2000
+ goto wait_dnode4_ready
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
+print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
+#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
+#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
+#$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+$dnode3Status = $data4_3
+$dnode4Status = $data4_6
+#$dnode5Status = $data4_5
+
+if $dnode2Status != ready then
+ sleep 2000
+ goto wait_dnode4_ready
+endi
+if $dnode3Status != ready then
+ sleep 2000
+ goto wait_dnode4_ready
+endi
+if $dnode4Status != ready then
+ sleep 2000
+ goto wait_dnode4_ready
+endi
diff --git a/tests/script/unique/arbitrator/dn3_mn1_vnode_change.sim b/tests/script/unique/arbitrator/dn3_mn1_vnode_change.sim
index c156d7d55c..fda850d2c9 100644
--- a/tests/script/unique/arbitrator/dn3_mn1_vnode_change.sim
+++ b/tests/script/unique/arbitrator/dn3_mn1_vnode_change.sim
@@ -55,7 +55,7 @@ sql create dnode $hostname3
sql create dnode $hostname4
sleep 3000
-$totalTableNum = 100
+$totalTableNum = 10
$sleepTimer = 3000
$db = db
diff --git a/tests/script/unique/arbitrator/dn3_mn1_vnode_corruptFile_offline.sim b/tests/script/unique/arbitrator/dn3_mn1_vnode_corruptFile_offline.sim
new file mode 100644
index 0000000000..d556976a43
--- /dev/null
+++ b/tests/script/unique/arbitrator/dn3_mn1_vnode_corruptFile_offline.sim
@@ -0,0 +1,226 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/deploy.sh -n dnode3 -i 3
+system sh/deploy.sh -n dnode4 -i 4
+
+system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
+
+system sh/cfg.sh -n dnode1 -c walLevel -v 2
+system sh/cfg.sh -n dnode2 -c walLevel -v 2
+system sh/cfg.sh -n dnode3 -c walLevel -v 2
+system sh/cfg.sh -n dnode4 -c walLevel -v 2
+
+system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
+
+system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
+
+system sh/cfg.sh -n dnode1 -c alternativeRole -v 1
+system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
+
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 4
+
+system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
+
+print ============== step0: start tarbitrator
+system sh/exec_tarbitrator.sh -s start
+
+print ============== step1: start dnode1, only deploy mnode
+system sh/exec.sh -n dnode1 -s start
+sleep 3000
+sql connect
+
+print ============== step2: start dnode2/dnode3 and add into cluster , then create database with replica 2, and create table, insert data
+system sh/exec.sh -n dnode2 -s start
+system sh/exec.sh -n dnode3 -s start
+#system sh/exec.sh -n dnode4 -s start
+sql create dnode $hostname2
+sql create dnode $hostname3
+#sql create dnode $hostname4
+sleep 3000
+
+$totalTableNum = 10
+$sleepTimer = 3000
+
+$db = db
+sql create database $db replica 2 maxTables $totalTableNum
+sql use $db
+
+# create table , insert data
+$stb = stb
+sql create table $stb (ts timestamp, c1 int) tags(t1 int)
+$rowNum = 100
+$tblNum = $totalTableNum
+$totalRows = 0
+$tsStart = 1420041600000
+
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql create table $tb using $stb tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
+ $x = $x + 60
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+
+sql select count(*) from $stb
+sleep 1000
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+print ============== step3: stop dnode3, then corrupt vnode data file in dnode3
+system sh/exec.sh -n dnode3 -s stop -x SIGINT
+sleep $sleepTimer
+wait_dnode3_offline_0:
+sql show dnodes
+if $rows != 3 then
+ sleep 2000
+ goto wait_dnode3_offline_0
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
+#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
+#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
+#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
+#$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+$dnode3Status = $data4_3
+$dnode4Status = $data4_4
+#$dnode5Status = $data4_5
+
+if $dnode3Status != offline then
+ sleep 2000
+ goto wait_dnode3_offline_0
+endi
+
+wait_dnode3_vgroup_offline:
+sql show vgroups
+if $rows != 1 then
+ sleep 2000
+ goto wait_dnode3_vgroup_offline
+endi
+print show vgroups:
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
+$dnode3Vtatus = $data4_2
+$dnode2Vtatus = $data7_2
+
+if $dnode3Vtatus != offline then
+ sleep 2000
+ goto wait_dnode3_vgroup_offline
+endi
+if $dnode2Vtatus != master then
+ sleep 2000
+ goto wait_dnode3_vgroup_offline
+endi
+
+# del the second row
+system sed '2d' ../../../sim/dnode3/data/vnode/vnode2/tsdb/data/v1849.data
+sleep 1000
+
+print ============== step4: restart dnode3, and run query
+system sh/exec.sh -n dnode3 -s start
+sleep $sleepTimer
+wait_dnode3_reready:
+sql show dnodes
+if $rows != 3 then
+ sleep 2000
+ goto wait_dnode3_reready
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
+#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
+#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
+#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
+#$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+$dnode3Status = $data4_3
+$dnode4Status = $data4_4
+#$dnode5Status = $data4_5
+
+if $dnode3Status != ready then
+ sleep 2000
+ goto wait_dnode3_reready
+endi
+
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+print ============== step5: stop dnode2, and check if dnode3 sync ok
+system sh/exec.sh -n dnode2 -s stop -x SIGINT
+sleep $sleepTimer
+wait_dnode2_offline_0:
+sql show dnodes
+if $rows != 3 then
+ sleep 2000
+ goto wait_dnode2_offline_0
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
+#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
+#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
+#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
+#$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+$dnode3Status = $data4_3
+$dnode4Status = $data4_4
+#$dnode5Status = $data4_5
+
+if $dnode2Status != offline then
+ sleep 2000
+ goto wait_dnode2_offline_0
+endi
+
+wait_dnode3_vgroup_master:
+sql show vgroups
+if $rows != 1 then
+ sleep 2000
+ goto wait_dnode3_vgroup_master
+endi
+print show vgroups:
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
+$dnode2Vtatus = $data7_2
+$dnode3Vtatus = $data4_2
+
+if $dnode2Vtatus != offline then
+ sleep 2000
+ goto wait_dnode3_vgroup_master
+endi
+if $dnode3Vtatus != master then
+ sleep 2000
+ goto wait_dnode3_vgroup_master
+endi
diff --git a/tests/script/unique/arbitrator/dn3_mn1_vnode_corruptFile_online.sim b/tests/script/unique/arbitrator/dn3_mn1_vnode_corruptFile_online.sim
new file mode 100644
index 0000000000..968a93156e
--- /dev/null
+++ b/tests/script/unique/arbitrator/dn3_mn1_vnode_corruptFile_online.sim
@@ -0,0 +1,154 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/deploy.sh -n dnode3 -i 3
+system sh/deploy.sh -n dnode4 -i 4
+
+system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
+
+system sh/cfg.sh -n dnode1 -c walLevel -v 2
+system sh/cfg.sh -n dnode2 -c walLevel -v 2
+system sh/cfg.sh -n dnode3 -c walLevel -v 2
+system sh/cfg.sh -n dnode4 -c walLevel -v 2
+
+system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
+
+system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
+
+system sh/cfg.sh -n dnode1 -c alternativeRole -v 1
+system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
+
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 4
+
+system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
+
+print ============== step0: start tarbitrator
+system sh/exec_tarbitrator.sh -s start
+
+print ============== step1: start dnode1, only deploy mnode
+system sh/exec.sh -n dnode1 -s start
+sleep 3000
+sql connect
+
+print ============== step2: start dnode2/dnode3 and add into cluster , then create database with replica 2, and create table, insert data
+system sh/exec.sh -n dnode2 -s start
+system sh/exec.sh -n dnode3 -s start
+#system sh/exec.sh -n dnode4 -s start
+sql create dnode $hostname2
+sql create dnode $hostname3
+#sql create dnode $hostname4
+sleep 3000
+
+$totalTableNum = 10
+$sleepTimer = 3000
+
+$db = db
+sql create database $db replica 2 maxTables $totalTableNum
+sql use $db
+
+# create table , insert data
+$stb = stb
+sql create table $stb (ts timestamp, c1 int) tags(t1 int)
+$rowNum = 100
+$tblNum = $totalTableNum
+$totalRows = 0
+$tsStart = 1420041600000
+
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql create table $tb using $stb tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
+ $x = $x + 60
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+
+sql select count(*) from $stb
+sleep 1000
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+print ============== step3: corrupt vnode data file in dnode3, not stop dnode3
+# del the second row
+system sed '2d' ../../../sim/dnode3/data/vnode/vnode2/tsdb/data/v1849.data
+sleep 1000
+
+print ============== step4: run query
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+print ============== step5: stop dnode2, and check if dnode3 sync ok
+system sh/exec.sh -n dnode2 -s stop -x SIGINT
+sleep $sleepTimer
+wait_dnode2_offline_0:
+sql show dnodes
+if $rows != 3 then
+ sleep 2000
+ goto wait_dnode2_offline_0
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
+#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
+#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
+#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
+#$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+$dnode3Status = $data4_3
+$dnode4Status = $data4_4
+#$dnode5Status = $data4_5
+
+if $dnode2Status != offline then
+ sleep 2000
+ goto wait_dnode2_offline_0
+endi
+
+wait_dnode3_vgroup_master:
+sql show vgroups
+if $rows != 1 then
+ sleep 2000
+ goto wait_dnode3_vgroup_master
+endi
+print show vgroups:
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
+$dnode2Vtatus = $data7_2
+$dnode3Vtatus = $data4_2
+
+if $dnode2Vtatus != offline then
+ sleep 2000
+ goto wait_dnode3_vgroup_master
+endi
+if $dnode3Vtatus != master then
+ sleep 2000
+ goto wait_dnode3_vgroup_master
+endi
diff --git a/tests/script/unique/arbitrator/dn3_mn1_vnode_delDir.sim b/tests/script/unique/arbitrator/dn3_mn1_vnode_delDir.sim
new file mode 100644
index 0000000000..82f2aad07b
--- /dev/null
+++ b/tests/script/unique/arbitrator/dn3_mn1_vnode_delDir.sim
@@ -0,0 +1,273 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/deploy.sh -n dnode3 -i 3
+system sh/deploy.sh -n dnode4 -i 4
+
+system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
+
+system sh/cfg.sh -n dnode1 -c walLevel -v 2
+system sh/cfg.sh -n dnode2 -c walLevel -v 2
+system sh/cfg.sh -n dnode3 -c walLevel -v 2
+system sh/cfg.sh -n dnode4 -c walLevel -v 2
+
+system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
+
+system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
+
+system sh/cfg.sh -n dnode1 -c alternativeRole -v 1
+system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
+
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 4
+
+system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
+
+print ============== step0: start tarbitrator
+system sh/exec_tarbitrator.sh -s start
+
+print ============== step1: start dnode1, only deploy mnode
+system sh/exec.sh -n dnode1 -s start
+sleep 3000
+sql connect
+
+print ============== step2: start dnode2/dnode3/dnode4 and add into cluster , then create database with replica 2, and create table, insert data
+system sh/exec.sh -n dnode2 -s start
+system sh/exec.sh -n dnode3 -s start
+system sh/exec.sh -n dnode4 -s start
+sql create dnode $hostname2
+sql create dnode $hostname3
+sql create dnode $hostname4
+sleep 3000
+
+$totalTableNum = 10
+$sleepTimer = 3000
+
+$db = db
+sql create database $db replica 3 maxTables $totalTableNum
+sql use $db
+
+# create table , insert data
+$stb = stb
+sql create table $stb (ts timestamp, c1 int) tags(t1 int)
+$rowNum = 100
+$tblNum = $totalTableNum
+$totalRows = 0
+$tsStart = 1420041600000
+
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql create table $tb using $stb tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
+ $x = $x + 60
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+
+sql select count(*) from $stb
+sleep 1000
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+print ============== step3: stop dnode4, and remove its vnodeX subdirector
+system sh/exec.sh -n dnode4 -s stop -x SIGINT
+sleep $sleepTimer
+wait_dnode4_offline_0:
+sql show dnodes
+if $rows != 4 then
+ sleep 2000
+ goto wait_dnode4_offline_0
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
+#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
+#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
+#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
+#$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+$dnode3Status = $data4_3
+$dnode4Status = $data4_4
+#$dnode5Status = $data4_5
+
+if $dnode4Status != offline then
+ sleep 2000
+ goto wait_dnode4_offline_0
+endi
+
+wait_dnode4_vgroup_offline:
+sql show vgroups
+if $rows != 1 then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+print show vgroups:
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
+$dnode4Vtatus = $data4_2
+$dnode3Vtatus = $data7_2
+
+if $dnode4Vtatus != offline then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+if $dnode3Vtatus != master then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+
+
+system rm -rf ../../../sim/dnode4/data/vnode/*
+sleep 1000
+
+print ============== step4: restart dnode4, waiting sync end
+system sh/exec.sh -n dnode4 -s start
+sleep $sleepTimer
+wait_dnode4_reready:
+sql show dnodes
+if $rows != 4 then
+ sleep 2000
+ goto wait_dnode4_reready
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
+#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
+#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
+#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
+#$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+$dnode3Status = $data4_3
+$dnode4Status = $data4_4
+#$dnode5Status = $data4_5
+
+if $dnode4Status != ready then
+ sleep 2000
+ goto wait_dnode4_reready
+endi
+
+wait_dnode4_vgroup_slave:
+sql show vgroups
+if $rows != 1 then
+ sleep 2000
+ goto wait_dnode4_vgroup_slave
+endi
+print show vgroups:
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
+$dnode4Vtatus = $data4_2
+$dnode3Vtatus = $data7_2
+
+print dnode4Vtatus: $dnode4Vtatus
+print dnode3Vtatus: $dnode3Vtatus
+if $dnode4Vtatus != slave then
+ sleep 2000
+ goto wait_dnode4_vgroup_slave
+endi
+if $dnode3Vtatus != master then
+ sleep 2000
+ goto wait_dnode4_vgroup_slave
+endi
+
+print ============== step5: stop dnode3/dnode2, and check rows
+system sh/exec.sh -n dnode2 -s stop
+system sh/exec.sh -n dnode3 -s stop
+sleep $sleepTimer
+
+wait_dnode23_offline:
+sql show dnodes
+if $rows != 4 then
+ sleep 2000
+ goto wait_dnode23_offline
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
+print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
+#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
+#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
+#$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+$dnode3Status = $data4_3
+$dnode4Status = $data4_4
+#$dnode5Status = $data4_5
+
+if $dnode2Status != offline then
+ sleep 2000
+ goto wait_dnode23_offline
+endi
+if $dnode3Status != offline then
+ sleep 2000
+ goto wait_dnode23_offline
+endi
+if $dnode4Status != ready then
+ sleep 2000
+ goto wait_dnode23_offline
+endi
+
+wait_dnode4_vgroup_master:
+sql show vgroups
+if $rows != 1 then
+ sleep 2000
+ goto wait_dnode4_vgroup_master
+endi
+print show vgroups:
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
+$dnode4Vtatus = $data4_2
+$dnode3Vtatus = $data7_2
+
+print dnode4Vtatus: $dnode4Vtatus
+print dnode3Vtatus: $dnode3Vtatus
+if $dnode4Vtatus != master then
+ sleep 2000
+ goto wait_dnode4_vgroup_master
+endi
+if $dnode3Vtatus != offline then
+ sleep 2000
+ goto wait_dnode4_vgroup_master
+endi
+
+# check using select
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+sql insert into $tb values ( now , 20000 ) ( now + 1a, 20001 ) ( now + 2a, 20002 )
+$totalRows = $totalRows + 3
+
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
diff --git a/tests/script/unique/arbitrator/dn3_mn1_vnode_nomaster.sim b/tests/script/unique/arbitrator/dn3_mn1_vnode_nomaster.sim
index b6a2b7e1d5..4e9afbf31a 100644
--- a/tests/script/unique/arbitrator/dn3_mn1_vnode_nomaster.sim
+++ b/tests/script/unique/arbitrator/dn3_mn1_vnode_nomaster.sim
@@ -55,7 +55,7 @@ sql create dnode $hostname3
sql create dnode $hostname4
sleep 3000
-$totalTableNum = 100
+$totalTableNum = 10
$sleepTimer = 3000
$db = db
@@ -152,7 +152,7 @@ print ============== step4: restart dnode2, then create database with replica 2,
system sh/exec.sh -n dnode2 -s start
sleep 3000
-$totalTableNum = 100
+$totalTableNum = 10
$sleepTimer = 3000
$db = db1
diff --git a/tests/script/unique/arbitrator/dn3_mn2_killDnode.sim b/tests/script/unique/arbitrator/dn3_mn2_killDnode.sim
new file mode 100644
index 0000000000..57a833b8de
--- /dev/null
+++ b/tests/script/unique/arbitrator/dn3_mn2_killDnode.sim
@@ -0,0 +1,126 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/deploy.sh -n dnode3 -i 3
+system sh/deploy.sh -n dnode4 -i 4
+
+system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
+system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
+system sh/cfg.sh -n dnode3 -c numOfMnodes -v 2
+
+system sh/cfg.sh -n dnode1 -c walLevel -v 2
+system sh/cfg.sh -n dnode2 -c walLevel -v 2
+system sh/cfg.sh -n dnode3 -c walLevel -v 2
+system sh/cfg.sh -n dnode4 -c walLevel -v 2
+
+system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
+
+system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
+
+#system sh/cfg.sh -n dnode1 -c alternativeRole -v 2
+#system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
+#system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
+#system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
+
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 4
+
+system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
+
+print ============== step0: start tarbitrator
+system sh/exec_tarbitrator.sh -s start
+
+print ============== step1: start dnode1, only deploy mnode
+system sh/exec.sh -n dnode1 -s start
+sleep 3000
+sql connect
+
+print ============== step2: start dnode2/dnode3 and add into cluster , then create database with replica 3, and create table to max tables
+system sh/exec.sh -n dnode2 -s start
+system sh/exec.sh -n dnode3 -s start
+#system sh/exec.sh -n dnode4 -s start
+sql create dnode $hostname2
+sleep 3000
+sql create dnode $hostname3
+#sql create dnode $hostname4
+sleep 3000
+
+$totalTableNum = 4
+$sleepTimer = 3000
+
+$db = db
+sql create database $db replica 3
+sql use $db
+
+# create table , insert data
+$stb = stb
+sql create table $stb (ts timestamp, c1 int) tags(t1 int)
+$rowNum = 10
+$tblNum = $totalTableNum
+$totalRows = 0
+$tsStart = 1420041600000
+
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql create table $tb using $stb tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x )
+ $x = $x + 10
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+
+sql select count(*) from $stb
+print data00:$data00 totalRows:$totalRows
+if $data00 != $totalRows then
+ return -1
+endi
+
+
+print ============== step3: stop dnode2
+system sh/exec.sh -n dnode2 -s stop
+sleep 3000
+
+sql show mnodes
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
+#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
+$mnode1Status = $data2_1
+$mnode2Status = $data2_2
+$mnode3Status = $data2_3
+#$mnode4Status = $data2_4
+
+if $mnode1Status != master then
+ return -1
+endi
+
+if $mnode2Status != offline then
+ return -1
+endi
+
+sql reset query cache
+sql select count(*) from $stb
+print data00:$data00 totalRows:$totalRows
+if $data00 != $totalRows then
+ return -1
+endi
+
+
diff --git a/tests/script/unique/arbitrator/offline_replica2_alterTable_online.sim b/tests/script/unique/arbitrator/offline_replica2_alterTable_online.sim
new file mode 100644
index 0000000000..26a6359a5c
--- /dev/null
+++ b/tests/script/unique/arbitrator/offline_replica2_alterTable_online.sim
@@ -0,0 +1,205 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/deploy.sh -n dnode3 -i 3
+system sh/deploy.sh -n dnode4 -i 4
+
+system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1
+
+system sh/cfg.sh -n dnode1 -c walLevel -v 2
+system sh/cfg.sh -n dnode2 -c walLevel -v 2
+system sh/cfg.sh -n dnode3 -c walLevel -v 2
+system sh/cfg.sh -n dnode4 -c walLevel -v 2
+
+system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
+
+system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
+
+system sh/cfg.sh -n dnode1 -c alternativeRole -v 1
+system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
+
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
+
+system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode4 -c arbitrator -v $arbitrator
+
+print ============== step0: start tarbitrator
+system sh/exec_tarbitrator.sh -s start
+
+print ============== step1: start dnode1, only deploy mnode
+system sh/exec.sh -n dnode1 -s start
+sleep 3000
+sql connect
+
+print ============== step2: start dnode2/dnode3/dnode4 and add into cluster , then create database with replica 2, and create table, insert data
+system sh/exec.sh -n dnode2 -s start
+#system sh/exec.sh -n dnode3 -s start
+system sh/exec.sh -n dnode4 -s start
+sql create dnode $hostname2
+#sql create dnode $hostname3
+sql create dnode $hostname4
+sleep 3000
+
+$totalTableNum = 10
+$sleepTimer = 3000
+
+$db = db
+print create database $db replica 2 maxTables $totalTableNum
+sql create database $db replica 2 maxTables $totalTableNum
+sql use $db
+
+# create table , insert data
+$stb = stb
+sql create table $stb (ts timestamp, c1 int, c2 int) tags(t1 int)
+$rowNum = 500
+$tblNum = $totalTableNum
+$totalRows = 0
+$tsStart = 1420041600000
+$tsEnd = 0
+
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql create table $tb using $stb tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x , $x ) ( $ts + 1a , $x , $x ) ( $ts + 2a , $x , $x ) ( $ts + 3a , $x , $x ) ( $ts + 4a , $x , $x ) ( $ts + 5a , $x , $x ) ( $ts + 6a , $x , $x ) ( $ts + 7a , $x , $x ) ( $ts + 8a , $x , $x ) ( $ts + 9a , $x , $x ) ( $ts + 10a , $x , $x ) ( $ts + 11a , $x , $x ) ( $ts + 12a , $x , $x ) ( $ts + 13a , $x , $x ) ( $ts + 14a , $x , $x ) ( $ts + 15a , $x , $x ) ( $ts + 16a , $x , $x ) ( $ts + 17a , $x , $x ) ( $ts + 18a , $x , $x ) ( $ts + 19a , $x , $x )
+ $x = $x + 20
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+$tsEnd = $tsStart + $totalRows / $tblNum
+
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+print ============== step3: stop dnode4
+system sh/exec.sh -n dnode4 -s stop -x SIGINT
+sleep $sleepTimer
+wait_dnode4_offline_0:
+sql show dnodes
+if $rows != 3 then
+ sleep 2000
+ goto wait_dnode4_offline_0
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
+#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
+#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
+#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
+#$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+#$dnode3Status = $data4_3
+$dnode4Status = $data4_3
+#$dnode5Status = $data4_5
+
+if $dnode4Status != offline then
+ sleep 2000
+ goto wait_dnode4_offline_0
+endi
+
+wait_dnode4_vgroup_offline:
+sql show vgroups
+if $rows != 1 then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+print show vgroups:
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
+$dnode4Vtatus = $data4_2
+$dnode3Vtatus = $data7_2
+
+if $dnode4Vtatus != offline then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+if $dnode3Vtatus != master then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+
+print ============== step4: alter table and insert more data rows
+sql alter table $stb drop column c1
+sql alter table $stb add column f1 double
+
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql insert into $tb values (now, 10001, 1.0001) (now + 1s, 10002, 1.0002) (now + 2s, 10003, 1.0003) (now + 3s, 10004, 1.0004)
+ $i = $i + 1
+endw
+$addRows = 4 * $tblNum
+$totalRows = $totalRows + $addRows
+
+print ============== step5: restart dnode4, waiting dnode4 synced
+system sh/exec.sh -n dnode4 -s start
+
+wait_dnode4_ready:
+sql show dnodes
+if $rows != 3 then
+ sleep 2000
+ goto wait_dnode4_ready
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
+print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
+#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
+#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
+#$dnode1Status = $data4_1
+#$dnode2Status = $data4_2
+#$dnode3Status = $data4_3
+$dnode4Status = $data4_3
+#$dnode5Status = $data4_5
+
+if $dnode4Status != ready then
+ sleep 2000
+ goto wait_dnode4_ready
+endi
+
+print ============== step6: check result
+
+sql reset query cache
+
+$cnt = 0
+wait_table_dropped:
+$cnt = $cnt + 1
+if $cnt == 20 then
+ return -1
+endi
+sql select count(*) from $stb
+if $data00 != $totalRows then
+ print data00: $data00 totalRows: $totalRows
+ sleep 2000
+ goto wait_table_dropped
+endi
+
+
+
+
diff --git a/tests/script/unique/arbitrator/offline_replica2_alterTag_online.sim b/tests/script/unique/arbitrator/offline_replica2_alterTag_online.sim
new file mode 100644
index 0000000000..b883f077c2
--- /dev/null
+++ b/tests/script/unique/arbitrator/offline_replica2_alterTag_online.sim
@@ -0,0 +1,232 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/deploy.sh -n dnode3 -i 3
+system sh/deploy.sh -n dnode4 -i 4
+
+system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1
+
+system sh/cfg.sh -n dnode1 -c walLevel -v 2
+system sh/cfg.sh -n dnode2 -c walLevel -v 2
+system sh/cfg.sh -n dnode3 -c walLevel -v 2
+system sh/cfg.sh -n dnode4 -c walLevel -v 2
+
+system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
+
+system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
+
+#system sh/cfg.sh -n dnode1 -c alternativeRole -v 1
+#system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
+#system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
+#system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
+
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
+
+system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode4 -c arbitrator -v $arbitrator
+
+print ============== step0: start tarbitrator
+system sh/exec_tarbitrator.sh -s start
+
+print ============== step1: start dnode1, only deploy mnode
+system sh/exec.sh -n dnode1 -s start
+sleep 3000
+sql connect
+
+print ============== step2: start dnode2/dnode3/dnode4 and add into cluster , then create database with replica 2, and create table, insert data
+system sh/exec.sh -n dnode2 -s start
+#system sh/exec.sh -n dnode3 -s start
+system sh/exec.sh -n dnode4 -s start
+sql create dnode $hostname2
+#sql create dnode $hostname3
+sql create dnode $hostname4
+sleep 3000
+
+$totalTableNum = 10
+$sleepTimer = 3000
+$maxTables = $totalTableNum * 2
+
+$db = db
+print create database $db replica 2 maxTables $maxTables
+sql create database $db replica 2 maxTables $maxTables
+sql use $db
+
+# create table , insert data
+$stb = stb
+sql create table $stb (ts timestamp, c1 int, c2 int) tags(t0 int, t1 int)
+$rowNum = 500
+$tblNum = $totalTableNum
+$totalRows = 0
+$tsStart = 1420041600000
+$tsEnd = 0
+
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql create table $tb using $stb tags( $i , $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x , $x ) ( $ts + 1a , $x , $x ) ( $ts + 2a , $x , $x ) ( $ts + 3a , $x , $x ) ( $ts + 4a , $x , $x ) ( $ts + 5a , $x , $x ) ( $ts + 6a , $x , $x ) ( $ts + 7a , $x , $x ) ( $ts + 8a , $x , $x ) ( $ts + 9a , $x , $x ) ( $ts + 10a , $x , $x ) ( $ts + 11a , $x , $x ) ( $ts + 12a , $x , $x ) ( $ts + 13a , $x , $x ) ( $ts + 14a , $x , $x ) ( $ts + 15a , $x , $x ) ( $ts + 16a , $x , $x ) ( $ts + 17a , $x , $x ) ( $ts + 18a , $x , $x ) ( $ts + 19a , $x , $x )
+ $x = $x + 20
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+$tsEnd = $tsStart + $totalRows / $tblNum
+
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+print ============== step3: stop dnode4
+system sh/exec.sh -n dnode4 -s stop -x SIGINT
+sleep $sleepTimer
+wait_dnode4_offline_0:
+sql show dnodes
+if $rows != 3 then
+ sleep 2000
+ goto wait_dnode4_offline_0
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
+#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
+#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
+#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
+#$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+#$dnode3Status = $data4_3
+$dnode4Status = $data4_3
+#$dnode5Status = $data4_5
+
+if $dnode4Status != offline then
+ sleep 2000
+ goto wait_dnode4_offline_0
+endi
+
+wait_dnode4_vgroup_offline:
+sql show vgroups
+if $rows != 1 then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+print show vgroups:
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
+$dnode4Vtatus = $data4_2
+$dnode3Vtatus = $data7_2
+
+if $dnode4Vtatus != offline then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+if $dnode3Vtatus != master then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+
+print ============== step4: alter table and tag, then drop all sub tables, recreate som subtable and insert more data rows
+sql alter table $stb drop column c1
+sql alter table $stb add column f1 double
+
+sql alter table $stb add tag t2 int
+sql alter table $stb add tag t3 int
+sql alter table $stb drop tag t1
+
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql drop table $tb
+ $i = $i + 1
+endw
+
+$totalRows = 0
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql create table $tb using $stb tags( $i , $i , $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x , $x ) ( $ts + 1a , $x , $x ) ( $ts + 2a , $x , $x ) ( $ts + 3a , $x , $x ) ( $ts + 4a , $x , $x ) ( $ts + 5a , $x , $x ) ( $ts + 6a , $x , $x ) ( $ts + 7a , $x , $x ) ( $ts + 8a , $x , $x ) ( $ts + 9a , $x , $x ) ( $ts + 10a , $x , $x ) ( $ts + 11a , $x , $x ) ( $ts + 12a , $x , $x ) ( $ts + 13a , $x , $x ) ( $ts + 14a , $x , $x ) ( $ts + 15a , $x , $x ) ( $ts + 16a , $x , $x ) ( $ts + 17a , $x , $x ) ( $ts + 18a , $x , $x ) ( $ts + 19a , $x , $x )
+ $x = $x + 20
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+$tsEnd = $tsStart + $totalRows / $tblNum
+
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+print ============== step5: restart dnode4, waiting dnode4 synced
+system sh/exec.sh -n dnode4 -s start
+
+wait_dnode4_ready:
+sql show dnodes
+if $rows != 3 then
+ sleep 2000
+ goto wait_dnode4_ready
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
+print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
+#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
+#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
+#$dnode1Status = $data4_1
+#$dnode2Status = $data4_2
+#$dnode3Status = $data4_3
+$dnode4Status = $data4_3
+#$dnode5Status = $data4_5
+
+if $dnode4Status != ready then
+ sleep 2000
+ goto wait_dnode4_ready
+endi
+
+print ============== step6: check result
+
+sql reset query cache
+
+$cnt = 0
+wait_table_dropped:
+$cnt = $cnt + 1
+if $cnt == 20 then
+ return -1
+endi
+sql select count(*) from $stb
+if $data00 != $totalRows then
+ print data00: $data00 totalRows: $totalRows
+ sleep 2000
+ goto wait_table_dropped
+endi
+
+
+
+
diff --git a/tests/script/unique/arbitrator/offline_replica2_createTable_online.sim b/tests/script/unique/arbitrator/offline_replica2_createTable_online.sim
new file mode 100644
index 0000000000..e7b2c70c78
--- /dev/null
+++ b/tests/script/unique/arbitrator/offline_replica2_createTable_online.sim
@@ -0,0 +1,219 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/deploy.sh -n dnode3 -i 3
+system sh/deploy.sh -n dnode4 -i 4
+
+system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1
+
+system sh/cfg.sh -n dnode1 -c walLevel -v 2
+system sh/cfg.sh -n dnode2 -c walLevel -v 2
+system sh/cfg.sh -n dnode3 -c walLevel -v 2
+system sh/cfg.sh -n dnode4 -c walLevel -v 2
+
+system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
+
+system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
+
+system sh/cfg.sh -n dnode1 -c alternativeRole -v 1
+system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
+
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
+
+system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode4 -c arbitrator -v $arbitrator
+
+print ============== step0: start tarbitrator
+system sh/exec_tarbitrator.sh -s start
+
+print ============== step1: start dnode1, only deploy mnode
+system sh/exec.sh -n dnode1 -s start
+sleep 3000
+sql connect
+
+print ============== step2: start dnode2/dnode3/dnode4 and add into cluster , then create database with replica 2, and create table, insert data
+system sh/exec.sh -n dnode2 -s start
+#system sh/exec.sh -n dnode3 -s start
+system sh/exec.sh -n dnode4 -s start
+sql create dnode $hostname2
+#sql create dnode $hostname3
+sql create dnode $hostname4
+sleep 3000
+
+$totalTableNum = 10
+$sleepTimer = 3000
+
+$maxTables = $totalTableNum * 2
+
+$db = db
+print create database $db replica 2 maxTables $maxTables
+sql create database $db replica 2 maxTables $maxTables
+sql use $db
+
+# create table , insert data
+$stb = stb
+sql create table $stb (ts timestamp, c1 int) tags(t1 int)
+$rowNum = 500
+$tblNum = $totalTableNum
+$totalRows = 0
+$tsStart = 1420041600000
+$tsEnd = 0
+
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql create table $tb using $stb tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
+ $x = $x + 60
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+$tsEnd = $tsStart + $totalRows / $tblNum
+
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+print ============== step3: stop dnode4
+system sh/exec.sh -n dnode4 -s stop -x SIGINT
+sleep $sleepTimer
+wait_dnode4_offline_0:
+sql show dnodes
+if $rows != 3 then
+ sleep 2000
+ goto wait_dnode4_offline_0
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
+#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
+#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
+#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
+#$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+#$dnode3Status = $data4_3
+$dnode4Status = $data4_3
+#$dnode5Status = $data4_5
+
+if $dnode4Status != offline then
+ sleep 2000
+ goto wait_dnode4_offline_0
+endi
+
+wait_dnode4_vgroup_offline:
+sql show vgroups
+if $rows != 1 then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+print show vgroups:
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
+$dnode4Vtatus = $data4_2
+$dnode3Vtatus = $data7_2
+
+if $dnode4Vtatus != offline then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+if $dnode3Vtatus != master then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+
+print ============== step4: create more tables and insert data rows
+$tsStart = $tsEnd + 1000
+$i = $tblNum
+$tblNum = $tblNum * 2
+while $i < $tblNum
+ $tb = tb . $i
+ sql create table $tb using $stb tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
+ $x = $x + 60
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+print ============== step5: restart dnode4, waiting dnode4 synced
+system sh/exec.sh -n dnode4 -s start
+
+wait_dnode4_ready:
+sql show dnodes
+if $rows != 3 then
+ sleep 2000
+ goto wait_dnode4_ready
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
+print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
+#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
+#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
+#$dnode1Status = $data4_1
+#$dnode2Status = $data4_2
+#$dnode3Status = $data4_3
+$dnode4Status = $data4_3
+#$dnode5Status = $data4_5
+
+if $dnode4Status != ready then
+ sleep 2000
+ goto wait_dnode4_ready
+endi
+
+print ============== step6: check result
+
+sql reset query cache
+
+$cnt = 0
+wait_table_altered:
+$cnt = $cnt + 1
+if $cnt == 20 then
+ return -1
+endi
+sql select count(*) from $stb
+if $data00 != $totalRows then
+ print data00: $data00
+ sleep 2000
+ goto wait_table_altered
+endi
+
+
+
+
diff --git a/tests/script/unique/arbitrator/offline_replica2_dropDb_online.sim b/tests/script/unique/arbitrator/offline_replica2_dropDb_online.sim
new file mode 100644
index 0000000000..20c575d382
--- /dev/null
+++ b/tests/script/unique/arbitrator/offline_replica2_dropDb_online.sim
@@ -0,0 +1,169 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/deploy.sh -n dnode3 -i 3
+system sh/deploy.sh -n dnode4 -i 4
+
+system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1
+
+system sh/cfg.sh -n dnode1 -c walLevel -v 2
+system sh/cfg.sh -n dnode2 -c walLevel -v 2
+system sh/cfg.sh -n dnode3 -c walLevel -v 2
+system sh/cfg.sh -n dnode4 -c walLevel -v 2
+
+system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
+
+system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
+
+system sh/cfg.sh -n dnode1 -c alternativeRole -v 1
+system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
+
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
+
+system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode4 -c arbitrator -v $arbitrator
+
+print ============== step0: start tarbitrator
+system sh/exec_tarbitrator.sh -s start
+
+print ============== step1: start dnode1, only deploy mnode
+system sh/exec.sh -n dnode1 -s start
+sleep 3000
+sql connect
+
+print ============== step2: start dnode2/dnode3/dnode4 and add into cluster , then create database with replica 2, and create table, insert data
+system sh/exec.sh -n dnode2 -s start
+#system sh/exec.sh -n dnode3 -s start
+system sh/exec.sh -n dnode4 -s start
+sql create dnode $hostname2
+#sql create dnode $hostname3
+sql create dnode $hostname4
+sleep 3000
+
+$totalTableNum = 10
+$sleepTimer = 3000
+
+$db = db
+print create database $db replica 2 maxTables $totalTableNum
+sql create database $db replica 2 maxTables $totalTableNum
+sql use $db
+
+# create table , insert data
+$stb = stb
+sql create table $stb (ts timestamp, c1 int) tags(t1 int)
+$rowNum = 500
+$tblNum = $totalTableNum
+$totalRows = 0
+$tsStart = 1420041600000
+$tsEnd = 0
+
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql create table $tb using $stb tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
+ $x = $x + 60
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+$tsEnd = $tsStart + $totalRows / $tblNum
+
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+print ============== step3: stop dnode4
+system sh/exec.sh -n dnode4 -s stop -x SIGINT
+sleep $sleepTimer
+wait_dnode4_offline_0:
+sql show dnodes
+if $rows != 3 then
+ sleep 2000
+ goto wait_dnode4_offline_0
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
+#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
+#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
+#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
+#$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+#$dnode3Status = $data4_3
+$dnode4Status = $data4_3
+#$dnode5Status = $data4_5
+
+if $dnode4Status != offline then
+ sleep 2000
+ goto wait_dnode4_offline_0
+endi
+
+wait_dnode4_vgroup_offline:
+sql show vgroups
+if $rows != 1 then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+print show vgroups:
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
+$dnode4Vtatus = $data4_2
+$dnode3Vtatus = $data7_2
+
+if $dnode4Vtatus != offline then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+if $dnode3Vtatus != master then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+
+print ============== step4: drop db
+sql drop database $db
+
+print ============== step5: restart dnode4
+system sh/exec.sh -n dnode4 -s start
+
+print ============== step6: check result
+sql reset query cache
+$cnt = 0
+wait_database_dropped:
+$cnt = $cnt + 1
+if $cnt == 20 then
+ return -1
+endi
+sql show databases
+if $rows != 0 then
+ sleep 2000
+ goto wait_database_dropped
+endi
+
+
+
+
diff --git a/tests/script/unique/arbitrator/offline_replica2_dropTable_online.sim b/tests/script/unique/arbitrator/offline_replica2_dropTable_online.sim
new file mode 100644
index 0000000000..9f72cde440
--- /dev/null
+++ b/tests/script/unique/arbitrator/offline_replica2_dropTable_online.sim
@@ -0,0 +1,203 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/deploy.sh -n dnode3 -i 3
+system sh/deploy.sh -n dnode4 -i 4
+
+system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1
+
+system sh/cfg.sh -n dnode1 -c walLevel -v 2
+system sh/cfg.sh -n dnode2 -c walLevel -v 2
+system sh/cfg.sh -n dnode3 -c walLevel -v 2
+system sh/cfg.sh -n dnode4 -c walLevel -v 2
+
+system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
+
+system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
+
+system sh/cfg.sh -n dnode1 -c alternativeRole -v 1
+system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
+
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
+
+system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode4 -c arbitrator -v $arbitrator
+
+print ============== step0: start tarbitrator
+system sh/exec_tarbitrator.sh -s start
+
+print ============== step1: start dnode1, only deploy mnode
+system sh/exec.sh -n dnode1 -s start
+sleep 3000
+sql connect
+
+print ============== step2: start dnode2/dnode3/dnode4 and add into cluster , then create database with replica 2, and create table, insert data
+system sh/exec.sh -n dnode2 -s start
+#system sh/exec.sh -n dnode3 -s start
+system sh/exec.sh -n dnode4 -s start
+sql create dnode $hostname2
+#sql create dnode $hostname3
+sql create dnode $hostname4
+sleep 3000
+
+$totalTableNum = 10
+$sleepTimer = 3000
+
+$db = db
+print create database $db replica 2 maxTables $totalTableNum
+sql create database $db replica 2 maxTables $totalTableNum
+sql use $db
+
+# create table , insert data
+$stb = stb
+sql create table $stb (ts timestamp, c1 int) tags(t1 int)
+$rowNum = 500
+$tblNum = $totalTableNum
+$totalRows = 0
+$tsStart = 1420041600000
+$tsEnd = 0
+
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql create table $tb using $stb tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
+ $x = $x + 60
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+$tsEnd = $tsStart + $totalRows / $tblNum
+
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+print ============== step3: stop dnode4
+system sh/exec.sh -n dnode4 -s stop -x SIGINT
+sleep $sleepTimer
+wait_dnode4_offline_0:
+sql show dnodes
+if $rows != 3 then
+ sleep 2000
+ goto wait_dnode4_offline_0
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
+#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
+#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
+#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
+#$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+#$dnode3Status = $data4_3
+$dnode4Status = $data4_3
+#$dnode5Status = $data4_5
+
+if $dnode4Status != offline then
+ sleep 2000
+ goto wait_dnode4_offline_0
+endi
+
+wait_dnode4_vgroup_offline:
+sql show vgroups
+if $rows != 1 then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+print show vgroups:
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
+$dnode4Vtatus = $data4_2
+$dnode3Vtatus = $data7_2
+
+if $dnode4Vtatus != offline then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+if $dnode3Vtatus != master then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+
+print ============== step4: drop some tables
+$i = 1
+$dropTblNum = 6
+
+while $i < $dropTblNum
+ $tb = tb . $i
+ sql drop table if exists $tb
+ $i = $i + 1
+endw
+$tblNum = $tblNum - 5
+
+print ============== step5: restart dnode4, waiting dnode4 synced
+system sh/exec.sh -n dnode4 -s start
+
+wait_dnode4_ready:
+sql show dnodes
+if $rows != 3 then
+ sleep 2000
+ goto wait_dnode4_ready
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
+print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
+#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
+#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
+#$dnode1Status = $data4_1
+#$dnode2Status = $data4_2
+#$dnode3Status = $data4_3
+$dnode4Status = $data4_3
+#$dnode5Status = $data4_5
+
+if $dnode4Status != ready then
+ sleep 2000
+ goto wait_dnode4_ready
+endi
+
+print ============== step6: check result
+
+sql reset query cache
+
+$cnt = 0
+wait_table_dropped:
+$cnt = $cnt + 1
+if $cnt == 20 then
+ return -1
+endi
+sql select count(tbname) from $stb
+if $data00 != $tblNum then
+ print data00: $data00 tblNum: $tblNum
+ sleep 2000
+ goto wait_table_dropped
+endi
+
+
+
+
diff --git a/tests/script/unique/arbitrator/offline_replica3_alterTable_online.sim b/tests/script/unique/arbitrator/offline_replica3_alterTable_online.sim
new file mode 100644
index 0000000000..43f9dfff14
--- /dev/null
+++ b/tests/script/unique/arbitrator/offline_replica3_alterTable_online.sim
@@ -0,0 +1,205 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/deploy.sh -n dnode3 -i 3
+system sh/deploy.sh -n dnode4 -i 4
+
+system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1
+
+system sh/cfg.sh -n dnode1 -c walLevel -v 2
+system sh/cfg.sh -n dnode2 -c walLevel -v 2
+system sh/cfg.sh -n dnode3 -c walLevel -v 2
+system sh/cfg.sh -n dnode4 -c walLevel -v 2
+
+system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
+
+system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
+
+system sh/cfg.sh -n dnode1 -c alternativeRole -v 1
+system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
+
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
+
+system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode4 -c arbitrator -v $arbitrator
+
+print ============== step0: start tarbitrator
+system sh/exec_tarbitrator.sh -s start
+
+print ============== step1: start dnode1, only deploy mnode
+system sh/exec.sh -n dnode1 -s start
+sleep 3000
+sql connect
+
+print ============== step2: start dnode2/dnode3/dnode4 and add into cluster , then create database with replica 3, and create table, insert data
+system sh/exec.sh -n dnode2 -s start
+system sh/exec.sh -n dnode3 -s start
+system sh/exec.sh -n dnode4 -s start
+sql create dnode $hostname2
+sql create dnode $hostname3
+sql create dnode $hostname4
+sleep 3000
+
+$totalTableNum = 10
+$sleepTimer = 3000
+
+$db = db
+print create database $db replica 3 maxTables $totalTableNum
+sql create database $db replica 3 maxTables $totalTableNum
+sql use $db
+
+# create table , insert data
+$stb = stb
+sql create table $stb (ts timestamp, c1 int, c2 int) tags(t1 int)
+$rowNum = 500
+$tblNum = $totalTableNum
+$totalRows = 0
+$tsStart = 1420041600000
+$tsEnd = 0
+
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql create table $tb using $stb tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x , $x ) ( $ts + 1a , $x , $x ) ( $ts + 2a , $x , $x ) ( $ts + 3a , $x , $x ) ( $ts + 4a , $x , $x ) ( $ts + 5a , $x , $x ) ( $ts + 6a , $x , $x ) ( $ts + 7a , $x , $x ) ( $ts + 8a , $x , $x ) ( $ts + 9a , $x , $x ) ( $ts + 10a , $x , $x ) ( $ts + 11a , $x , $x ) ( $ts + 12a , $x , $x ) ( $ts + 13a , $x , $x ) ( $ts + 14a , $x , $x ) ( $ts + 15a , $x , $x ) ( $ts + 16a , $x , $x ) ( $ts + 17a , $x , $x ) ( $ts + 18a , $x , $x ) ( $ts + 19a , $x , $x )
+ $x = $x + 20
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+$tsEnd = $tsStart + $totalRows / $tblNum
+
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+print ============== step3: stop dnode4
+system sh/exec.sh -n dnode4 -s stop -x SIGINT
+sleep $sleepTimer
+wait_dnode4_offline_0:
+sql show dnodes
+if $rows != 4 then
+ sleep 2000
+ goto wait_dnode4_offline_0
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
+#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
+#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
+#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
+#$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+$dnode3Status = $data4_3
+$dnode4Status = $data4_4
+#$dnode5Status = $data4_5
+
+if $dnode4Status != offline then
+ sleep 2000
+ goto wait_dnode4_offline_0
+endi
+
+wait_dnode4_vgroup_offline:
+sql show vgroups
+if $rows != 1 then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+print show vgroups:
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
+$dnode4Vtatus = $data4_2
+$dnode3Vtatus = $data7_2
+
+if $dnode4Vtatus != offline then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+if $dnode3Vtatus != master then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+
+print ============== step4: alter table and insert more data rows
+sql alter table $stb drop column c1
+sql alter table $stb add column f1 double
+
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql insert into $tb values (now, 10001, 1.0001) (now + 1s, 10002, 1.0002) (now + 2s, 10003, 1.0003) (now + 3s, 10004, 1.0004)
+ $i = $i + 1
+endw
+$addRows = 4 * $tblNum
+$totalRows = $totalRows + $addRows
+
+print ============== step5: restart dnode4, waiting dnode4 synced
+system sh/exec.sh -n dnode4 -s start
+
+wait_dnode4_ready:
+sql show dnodes
+if $rows != 4 then
+ sleep 2000
+ goto wait_dnode4_ready
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
+print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
+#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
+#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
+#$dnode1Status = $data4_1
+#$dnode2Status = $data4_2
+$dnode3Status = $data4_3
+$dnode4Status = $data4_4
+#$dnode5Status = $data4_5
+
+if $dnode4Status != ready then
+ sleep 2000
+ goto wait_dnode4_ready
+endi
+
+print ============== step6: check result
+
+sql reset query cache
+
+$cnt = 0
+wait_table_dropped:
+$cnt = $cnt + 1
+if $cnt == 20 then
+ return -1
+endi
+sql select count(*) from $stb
+if $data00 != $totalRows then
+ print data00: $data00 totalRows: $totalRows
+ sleep 2000
+ goto wait_table_dropped
+endi
+
+
+
+
diff --git a/tests/script/unique/arbitrator/offline_replica3_alterTag_online.sim b/tests/script/unique/arbitrator/offline_replica3_alterTag_online.sim
new file mode 100644
index 0000000000..e27bbafba7
--- /dev/null
+++ b/tests/script/unique/arbitrator/offline_replica3_alterTag_online.sim
@@ -0,0 +1,232 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/deploy.sh -n dnode3 -i 3
+system sh/deploy.sh -n dnode4 -i 4
+
+system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1
+
+system sh/cfg.sh -n dnode1 -c walLevel -v 2
+system sh/cfg.sh -n dnode2 -c walLevel -v 2
+system sh/cfg.sh -n dnode3 -c walLevel -v 2
+system sh/cfg.sh -n dnode4 -c walLevel -v 2
+
+system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
+
+system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
+
+#system sh/cfg.sh -n dnode1 -c alternativeRole -v 1
+#system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
+#system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
+#system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
+
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
+
+system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode4 -c arbitrator -v $arbitrator
+
+print ============== step0: start tarbitrator
+system sh/exec_tarbitrator.sh -s start
+
+print ============== step1: start dnode1, only deploy mnode
+system sh/exec.sh -n dnode1 -s start
+sleep 3000
+sql connect
+
+print ============== step2: start dnode2/dnode3/dnode4 and add into cluster , then create database with replica 3, and create table, insert data
+system sh/exec.sh -n dnode2 -s start
+system sh/exec.sh -n dnode3 -s start
+system sh/exec.sh -n dnode4 -s start
+sql create dnode $hostname2
+sql create dnode $hostname3
+sql create dnode $hostname4
+sleep 3000
+
+$totalTableNum = 10
+$sleepTimer = 3000
+$maxTables = $totalTableNum * 2
+
+$db = db
+print create database $db replica 3 maxTables $maxTables
+sql create database $db replica 3 maxTables $maxTables
+sql use $db
+
+# create table , insert data
+$stb = stb
+sql create table $stb (ts timestamp, c1 int, c2 int) tags(t0 int, t1 int)
+$rowNum = 500
+$tblNum = $totalTableNum
+$totalRows = 0
+$tsStart = 1420041600000
+$tsEnd = 0
+
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql create table $tb using $stb tags( $i , $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x , $x ) ( $ts + 1a , $x , $x ) ( $ts + 2a , $x , $x ) ( $ts + 3a , $x , $x ) ( $ts + 4a , $x , $x ) ( $ts + 5a , $x , $x ) ( $ts + 6a , $x , $x ) ( $ts + 7a , $x , $x ) ( $ts + 8a , $x , $x ) ( $ts + 9a , $x , $x ) ( $ts + 10a , $x , $x ) ( $ts + 11a , $x , $x ) ( $ts + 12a , $x , $x ) ( $ts + 13a , $x , $x ) ( $ts + 14a , $x , $x ) ( $ts + 15a , $x , $x ) ( $ts + 16a , $x , $x ) ( $ts + 17a , $x , $x ) ( $ts + 18a , $x , $x ) ( $ts + 19a , $x , $x )
+ $x = $x + 20
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+$tsEnd = $tsStart + $totalRows / $tblNum
+
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+print ============== step3: stop dnode4
+system sh/exec.sh -n dnode4 -s stop -x SIGINT
+sleep $sleepTimer
+wait_dnode4_offline_0:
+sql show dnodes
+if $rows != 4 then
+ sleep 2000
+ goto wait_dnode4_offline_0
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
+#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
+#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
+#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
+#$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+$dnode3Status = $data4_3
+$dnode4Status = $data4_4
+#$dnode5Status = $data4_5
+
+if $dnode4Status != offline then
+ sleep 2000
+ goto wait_dnode4_offline_0
+endi
+
+wait_dnode4_vgroup_offline:
+sql show vgroups
+if $rows != 1 then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+print show vgroups:
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
+$dnode4Vtatus = $data4_2
+$dnode3Vtatus = $data7_2
+
+if $dnode4Vtatus != offline then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+if $dnode3Vtatus != master then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+
+print ============== step4: alter table and tag, then drop all sub tables, recreate som subtable and insert more data rows
+sql alter table $stb drop column c1
+sql alter table $stb add column f1 double
+
+sql alter table $stb add tag t2 int
+sql alter table $stb add tag t3 int
+sql alter table $stb drop tag t1
+
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql drop table $tb
+ $i = $i + 1
+endw
+
+$totalRows = 0
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql create table $tb using $stb tags( $i , $i , $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x , $x ) ( $ts + 1a , $x , $x ) ( $ts + 2a , $x , $x ) ( $ts + 3a , $x , $x ) ( $ts + 4a , $x , $x ) ( $ts + 5a , $x , $x ) ( $ts + 6a , $x , $x ) ( $ts + 7a , $x , $x ) ( $ts + 8a , $x , $x ) ( $ts + 9a , $x , $x ) ( $ts + 10a , $x , $x ) ( $ts + 11a , $x , $x ) ( $ts + 12a , $x , $x ) ( $ts + 13a , $x , $x ) ( $ts + 14a , $x , $x ) ( $ts + 15a , $x , $x ) ( $ts + 16a , $x , $x ) ( $ts + 17a , $x , $x ) ( $ts + 18a , $x , $x ) ( $ts + 19a , $x , $x )
+ $x = $x + 20
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+$tsEnd = $tsStart + $totalRows / $tblNum
+
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+print ============== step5: restart dnode4, waiting dnode4 synced
+system sh/exec.sh -n dnode4 -s start
+
+wait_dnode4_ready:
+sql show dnodes
+if $rows != 4 then
+ sleep 2000
+ goto wait_dnode4_ready
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
+print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
+#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
+#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
+#$dnode1Status = $data4_1
+#$dnode2Status = $data4_2
+$dnode3Status = $data4_3
+$dnode4Status = $data4_4
+#$dnode5Status = $data4_5
+
+if $dnode4Status != ready then
+ sleep 2000
+ goto wait_dnode4_ready
+endi
+
+print ============== step6: check result
+
+sql reset query cache
+
+$cnt = 0
+wait_table_dropped:
+$cnt = $cnt + 1
+if $cnt == 20 then
+ return -1
+endi
+sql select count(*) from $stb
+if $data00 != $totalRows then
+ print data00: $data00 totalRows: $totalRows
+ sleep 2000
+ goto wait_table_dropped
+endi
+
+
+
+
diff --git a/tests/script/unique/arbitrator/offline_replica3_createTable_online.sim b/tests/script/unique/arbitrator/offline_replica3_createTable_online.sim
new file mode 100644
index 0000000000..1e47157975
--- /dev/null
+++ b/tests/script/unique/arbitrator/offline_replica3_createTable_online.sim
@@ -0,0 +1,218 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/deploy.sh -n dnode3 -i 3
+system sh/deploy.sh -n dnode4 -i 4
+
+system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1
+
+system sh/cfg.sh -n dnode1 -c walLevel -v 2
+system sh/cfg.sh -n dnode2 -c walLevel -v 2
+system sh/cfg.sh -n dnode3 -c walLevel -v 2
+system sh/cfg.sh -n dnode4 -c walLevel -v 2
+
+system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
+
+system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
+
+system sh/cfg.sh -n dnode1 -c alternativeRole -v 1
+system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
+
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
+
+system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode4 -c arbitrator -v $arbitrator
+
+print ============== step0: start tarbitrator
+system sh/exec_tarbitrator.sh -s start
+
+print ============== step1: start dnode1, only deploy mnode
+system sh/exec.sh -n dnode1 -s start
+sleep 3000
+sql connect
+
+print ============== step2: start dnode2/dnode3/dnode4 and add into cluster , then create database with replica 3, and create table, insert data
+system sh/exec.sh -n dnode2 -s start
+system sh/exec.sh -n dnode3 -s start
+system sh/exec.sh -n dnode4 -s start
+sql create dnode $hostname2
+sql create dnode $hostname3
+sql create dnode $hostname4
+sleep 3000
+
+$totalTableNum = 10
+$sleepTimer = 3000
+$maxTables = $totalTableNum * 2
+
+$db = db
+print create database $db replica 3 maxTables $maxTables
+sql create database $db replica 3 maxTables $maxTables
+sql use $db
+
+# create table , insert data
+$stb = stb
+sql create table $stb (ts timestamp, c1 int) tags(t1 int)
+$rowNum = 500
+$tblNum = $totalTableNum
+$totalRows = 0
+$tsStart = 1420041600000
+$tsEnd = 0
+
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql create table $tb using $stb tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
+ $x = $x + 60
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+$tsEnd = $tsStart + $totalRows / $tblNum
+
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+print ============== step3: stop dnode4
+system sh/exec.sh -n dnode4 -s stop -x SIGINT
+sleep $sleepTimer
+wait_dnode4_offline_0:
+sql show dnodes
+if $rows != 4 then
+ sleep 2000
+ goto wait_dnode4_offline_0
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
+#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
+#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
+#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
+#$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+$dnode3Status = $data4_3
+$dnode4Status = $data4_4
+#$dnode5Status = $data4_5
+
+if $dnode4Status != offline then
+ sleep 2000
+ goto wait_dnode4_offline_0
+endi
+
+wait_dnode4_vgroup_offline:
+sql show vgroups
+if $rows != 1 then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+print show vgroups:
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
+$dnode4Vtatus = $data4_2
+$dnode3Vtatus = $data7_2
+
+if $dnode4Vtatus != offline then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+if $dnode3Vtatus != master then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+
+print ============== step4: create more tables and insert data rows
+$tsStart = $tsEnd + 1000
+$i = $tblNum
+$tblNum = $tblNum * 2
+while $i < $tblNum
+ $tb = tb . $i
+ sql create table $tb using $stb tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
+ $x = $x + 60
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+print ============== step5: restart dnode4, waiting dnode4 synced
+system sh/exec.sh -n dnode4 -s start
+
+wait_dnode4_ready:
+sql show dnodes
+if $rows != 4 then
+ sleep 2000
+ goto wait_dnode4_ready
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
+print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
+#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
+#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
+#$dnode1Status = $data4_1
+#$dnode2Status = $data4_2
+$dnode3Status = $data4_3
+$dnode4Status = $data4_4
+#$dnode5Status = $data4_5
+
+if $dnode4Status != ready then
+ sleep 2000
+ goto wait_dnode4_ready
+endi
+
+print ============== step6: check result
+
+sql reset query cache
+
+$cnt = 0
+wait_table_altered:
+$cnt = $cnt + 1
+if $cnt == 20 then
+ return -1
+endi
+sql select count(*) from $stb
+if $data00 != $totalRows then
+ print data00: $data00
+ sleep 2000
+ goto wait_table_altered
+endi
+
+
+
+
diff --git a/tests/script/unique/arbitrator/offline_replica3_dropDb_online.sim b/tests/script/unique/arbitrator/offline_replica3_dropDb_online.sim
new file mode 100644
index 0000000000..c9fc91527d
--- /dev/null
+++ b/tests/script/unique/arbitrator/offline_replica3_dropDb_online.sim
@@ -0,0 +1,169 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/deploy.sh -n dnode3 -i 3
+system sh/deploy.sh -n dnode4 -i 4
+
+system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1
+
+system sh/cfg.sh -n dnode1 -c walLevel -v 2
+system sh/cfg.sh -n dnode2 -c walLevel -v 2
+system sh/cfg.sh -n dnode3 -c walLevel -v 2
+system sh/cfg.sh -n dnode4 -c walLevel -v 2
+
+system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
+
+system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
+
+system sh/cfg.sh -n dnode1 -c alternativeRole -v 1
+system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
+
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
+
+system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode4 -c arbitrator -v $arbitrator
+
+print ============== step0: start tarbitrator
+system sh/exec_tarbitrator.sh -s start
+
+print ============== step1: start dnode1, only deploy mnode
+system sh/exec.sh -n dnode1 -s start
+sleep 3000
+sql connect
+
+print ============== step2: start dnode2/dnode3/dnode4 and add into cluster , then create database with replica 3, and create table, insert data
+system sh/exec.sh -n dnode2 -s start
+system sh/exec.sh -n dnode3 -s start
+system sh/exec.sh -n dnode4 -s start
+sql create dnode $hostname2
+sql create dnode $hostname3
+sql create dnode $hostname4
+sleep 3000
+
+$totalTableNum = 10
+$sleepTimer = 3000
+
+$db = db
+print create database $db replica 3 maxTables $totalTableNum
+sql create database $db replica 3 maxTables $totalTableNum
+sql use $db
+
+# create table , insert data
+$stb = stb
+sql create table $stb (ts timestamp, c1 int) tags(t1 int)
+$rowNum = 500
+$tblNum = $totalTableNum
+$totalRows = 0
+$tsStart = 1420041600000
+$tsEnd = 0
+
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql create table $tb using $stb tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
+ $x = $x + 60
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+$tsEnd = $tsStart + $totalRows / $tblNum
+
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+print ============== step3: stop dnode4
+system sh/exec.sh -n dnode4 -s stop -x SIGINT
+sleep $sleepTimer
+wait_dnode4_offline_0:
+sql show dnodes
+if $rows != 4 then
+ sleep 2000
+ goto wait_dnode4_offline_0
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
+#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
+#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
+#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
+#$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+$dnode3Status = $data4_3
+$dnode4Status = $data4_4
+#$dnode5Status = $data4_5
+
+if $dnode4Status != offline then
+ sleep 2000
+ goto wait_dnode4_offline_0
+endi
+
+wait_dnode4_vgroup_offline:
+sql show vgroups
+if $rows != 1 then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+print show vgroups:
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
+$dnode4Vtatus = $data4_2
+$dnode3Vtatus = $data7_2
+
+if $dnode4Vtatus != offline then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+if $dnode3Vtatus != master then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+
+print ============== step4: drop db
+sql drop database $db
+
+print ============== step5: restart dnode4
+system sh/exec.sh -n dnode4 -s start
+
+print ============== step6: check result
+sql reset query cache
+$cnt = 0
+wait_database_dropped:
+$cnt = $cnt + 1
+if $cnt == 20 then
+ return -1
+endi
+sql show databases
+if $rows != 0 then
+ sleep 2000
+ goto wait_database_dropped
+endi
+
+
+
+
diff --git a/tests/script/unique/arbitrator/offline_replica3_dropTable_online.sim b/tests/script/unique/arbitrator/offline_replica3_dropTable_online.sim
new file mode 100644
index 0000000000..aecf41b892
--- /dev/null
+++ b/tests/script/unique/arbitrator/offline_replica3_dropTable_online.sim
@@ -0,0 +1,203 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/deploy.sh -n dnode3 -i 3
+system sh/deploy.sh -n dnode4 -i 4
+
+system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1
+
+system sh/cfg.sh -n dnode1 -c walLevel -v 2
+system sh/cfg.sh -n dnode2 -c walLevel -v 2
+system sh/cfg.sh -n dnode3 -c walLevel -v 2
+system sh/cfg.sh -n dnode4 -c walLevel -v 2
+
+system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
+
+system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
+
+system sh/cfg.sh -n dnode1 -c alternativeRole -v 1
+system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
+
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
+
+system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode4 -c arbitrator -v $arbitrator
+
+print ============== step0: start tarbitrator
+system sh/exec_tarbitrator.sh -s start
+
+print ============== step1: start dnode1, only deploy mnode
+system sh/exec.sh -n dnode1 -s start
+sleep 3000
+sql connect
+
+print ============== step2: start dnode2/dnode3/dnode4 and add into cluster , then create database with replica 3, and create table, insert data
+system sh/exec.sh -n dnode2 -s start
+system sh/exec.sh -n dnode3 -s start
+system sh/exec.sh -n dnode4 -s start
+sql create dnode $hostname2
+sql create dnode $hostname3
+sql create dnode $hostname4
+sleep 3000
+
+$totalTableNum = 10
+$sleepTimer = 3000
+
+$db = db
+print create database $db replica 3 maxTables $totalTableNum
+sql create database $db replica 3 maxTables $totalTableNum
+sql use $db
+
+# create table , insert data
+$stb = stb
+sql create table $stb (ts timestamp, c1 int) tags(t1 int)
+$rowNum = 500
+$tblNum = $totalTableNum
+$totalRows = 0
+$tsStart = 1420041600000
+$tsEnd = 0
+
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql create table $tb using $stb tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
+ $x = $x + 60
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+$tsEnd = $tsStart + $totalRows / $tblNum
+
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+print ============== step3: stop dnode4
+system sh/exec.sh -n dnode4 -s stop -x SIGINT
+sleep $sleepTimer
+wait_dnode4_offline_0:
+sql show dnodes
+if $rows != 4 then
+ sleep 2000
+ goto wait_dnode4_offline_0
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
+#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
+#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
+#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
+#$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+$dnode3Status = $data4_3
+$dnode4Status = $data4_4
+#$dnode5Status = $data4_5
+
+if $dnode4Status != offline then
+ sleep 2000
+ goto wait_dnode4_offline_0
+endi
+
+wait_dnode4_vgroup_offline:
+sql show vgroups
+if $rows != 1 then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+print show vgroups:
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
+$dnode4Vtatus = $data4_2
+$dnode3Vtatus = $data7_2
+
+if $dnode4Vtatus != offline then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+if $dnode3Vtatus != master then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+
+print ============== step4: drop some tables
+$i = 1
+$dropTblNum = 6
+
+while $i < $dropTblNum
+ $tb = tb . $i
+ sql drop table if exists $tb
+ $i = $i + 1
+endw
+$tblNum = $tblNum - 5
+
+print ============== step5: restart dnode4, waiting dnode4 synced
+system sh/exec.sh -n dnode4 -s start
+
+wait_dnode4_ready:
+sql show dnodes
+if $rows != 4 then
+ sleep 2000
+ goto wait_dnode4_ready
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
+print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
+#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
+#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
+#$dnode1Status = $data4_1
+#$dnode2Status = $data4_2
+$dnode3Status = $data4_3
+$dnode4Status = $data4_4
+#$dnode5Status = $data4_5
+
+if $dnode4Status != ready then
+ sleep 2000
+ goto wait_dnode4_ready
+endi
+
+print ============== step6: check result
+
+sql reset query cache
+
+$cnt = 0
+wait_table_dropped:
+$cnt = $cnt + 1
+if $cnt == 20 then
+ return -1
+endi
+sql select count(tbname) from $stb
+if $data00 != $tblNum then
+ print data00: $data00 tblNum: $tblNum
+ sleep 2000
+ goto wait_table_dropped
+endi
+
+
+
+
diff --git a/tests/script/unique/arbitrator/sync_replica2_alterTable_add.sim b/tests/script/unique/arbitrator/sync_replica2_alterTable_add.sim
new file mode 100644
index 0000000000..9730842938
--- /dev/null
+++ b/tests/script/unique/arbitrator/sync_replica2_alterTable_add.sim
@@ -0,0 +1,193 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/deploy.sh -n dnode3 -i 3
+system sh/deploy.sh -n dnode4 -i 4
+
+system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1
+
+system sh/cfg.sh -n dnode1 -c walLevel -v 2
+system sh/cfg.sh -n dnode2 -c walLevel -v 2
+system sh/cfg.sh -n dnode3 -c walLevel -v 2
+system sh/cfg.sh -n dnode4 -c walLevel -v 2
+
+system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
+
+system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
+
+system sh/cfg.sh -n dnode1 -c alternativeRole -v 1
+system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
+
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
+
+system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode4 -c arbitrator -v $arbitrator
+
+print ============== step0: start tarbitrator
+system sh/exec_tarbitrator.sh -s start
+
+print ============== step1: start dnode1, only deploy mnode
+system sh/exec.sh -n dnode1 -s start
+sleep 3000
+sql connect
+
+print ============== step2: start dnode2/dnode3/dnode4 and add into cluster , then create database with replica 2, and create table, insert data
+system sh/exec.sh -n dnode2 -s start
+#system sh/exec.sh -n dnode3 -s start
+system sh/exec.sh -n dnode4 -s start
+sql create dnode $hostname2
+#sql create dnode $hostname3
+sql create dnode $hostname4
+sleep 3000
+
+$totalTableNum = 10
+$sleepTimer = 3000
+
+$db = db
+print create database $db replica 2 maxTables $totalTableNum
+sql create database $db replica 2 maxTables $totalTableNum
+sql use $db
+
+# create table , insert data
+$stb = stb
+sql create table $stb (ts timestamp, c1 int) tags(t1 int)
+$rowNum = 500
+$tblNum = $totalTableNum
+$totalRows = 0
+$tsStart = 1420041600000
+$tsEnd = 0
+
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql create table $tb using $stb tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
+ $x = $x + 60
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+$tsEnd = $tsStart + $totalRows / $tblNum
+
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+print ============== step3: stop dnode4
+system sh/exec.sh -n dnode4 -s stop -x SIGINT
+sleep $sleepTimer
+wait_dnode4_offline_0:
+sql show dnodes
+if $rows != 3 then
+ sleep 2000
+ goto wait_dnode4_offline_0
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
+#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
+#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
+#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
+#$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+#$dnode3Status = $data4_3
+$dnode4Status = $data4_3
+#$dnode5Status = $data4_5
+
+if $dnode4Status != offline then
+ sleep 2000
+ goto wait_dnode4_offline_0
+endi
+
+wait_dnode4_vgroup_offline:
+sql show vgroups
+if $rows != 1 then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+print show vgroups:
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
+$dnode4Vtatus = $data4_2
+$dnode3Vtatus = $data7_2
+
+if $dnode4Vtatus != offline then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+if $dnode3Vtatus != master then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+
+print ============== step4: insert more data rows
+$tsStart = $tsEnd + 1000
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
+ $x = $x + 60
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+
+sql select count(*) from $stb
+print data00:$data00 totalRows:$totalRows
+if $data00 != $totalRows then
+ return -1
+endi
+
+print ============== step5: restart dnode4, while alter table and insert data in other thead when dnode4 is syncing
+system sh/exec.sh -n dnode4 -s start
+run_back unique/arbitrator/sync_replica_alterTable_background_add.sim
+
+print ============== step6: check result
+#in background.sim, add one column and insert 36 rows
+$totalRows = $totalRows + 36
+
+$cnt = 0
+wait_table_altered:
+$cnt = $cnt + 1
+if $cnt == 20 then
+ return -1
+endi
+sql select count(*) from $stb
+if $data00 != $totalRows then
+ print data00:$data00 totalRows:$totalRows
+ sleep 2000
+ goto wait_table_altered
+endi
+
+
+
+
diff --git a/tests/script/unique/arbitrator/sync_replica2_alterTable_drop.sim b/tests/script/unique/arbitrator/sync_replica2_alterTable_drop.sim
new file mode 100644
index 0000000000..21957871a5
--- /dev/null
+++ b/tests/script/unique/arbitrator/sync_replica2_alterTable_drop.sim
@@ -0,0 +1,193 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/deploy.sh -n dnode3 -i 3
+system sh/deploy.sh -n dnode4 -i 4
+
+system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1
+
+system sh/cfg.sh -n dnode1 -c walLevel -v 2
+system sh/cfg.sh -n dnode2 -c walLevel -v 2
+system sh/cfg.sh -n dnode3 -c walLevel -v 2
+system sh/cfg.sh -n dnode4 -c walLevel -v 2
+
+system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
+
+system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
+
+system sh/cfg.sh -n dnode1 -c alternativeRole -v 1
+system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
+
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
+
+system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode4 -c arbitrator -v $arbitrator
+
+print ============== step0: start tarbitrator
+system sh/exec_tarbitrator.sh -s start
+
+print ============== step1: start dnode1, only deploy mnode
+system sh/exec.sh -n dnode1 -s start
+sleep 3000
+sql connect
+
+print ============== step2: start dnode2/dnode3/dnode4 and add into cluster , then create database with replica 2, and create table, insert data
+system sh/exec.sh -n dnode2 -s start
+#system sh/exec.sh -n dnode3 -s start
+system sh/exec.sh -n dnode4 -s start
+sql create dnode $hostname2
+#sql create dnode $hostname3
+sql create dnode $hostname4
+sleep 3000
+
+$totalTableNum = 10
+$sleepTimer = 3000
+
+$db = db
+print create database $db replica 2 maxTables $totalTableNum
+sql create database $db replica 2 maxTables $totalTableNum
+sql use $db
+
+# create table , insert data
+$stb = stb
+sql create table $stb (ts timestamp, c1 int) tags(t1 int)
+$rowNum = 500
+$tblNum = $totalTableNum
+$totalRows = 0
+$tsStart = 1420041600000
+$tsEnd = 0
+
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql create table $tb using $stb tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
+ $x = $x + 60
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+$tsEnd = $tsStart + $totalRows / $tblNum
+
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+print ============== step3: stop dnode4
+system sh/exec.sh -n dnode4 -s stop -x SIGINT
+sleep $sleepTimer
+wait_dnode4_offline_0:
+sql show dnodes
+if $rows != 3 then
+ sleep 2000
+ goto wait_dnode4_offline_0
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
+#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
+#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
+#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
+#$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+#$dnode3Status = $data4_3
+$dnode4Status = $data4_3
+#$dnode5Status = $data4_5
+
+if $dnode4Status != offline then
+ sleep 2000
+ goto wait_dnode4_offline_0
+endi
+
+wait_dnode4_vgroup_offline:
+sql show vgroups
+if $rows != 1 then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+print show vgroups:
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
+$dnode4Vtatus = $data4_2
+$dnode3Vtatus = $data7_2
+
+if $dnode4Vtatus != offline then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+if $dnode3Vtatus != master then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+
+print ============== step4: insert more data rows
+$tsStart = $tsEnd + 1000
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
+ $x = $x + 60
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+print ============== step5: restart dnode4, while alter table and insert data in other thead when dnode4 is syncing
+system sh/exec.sh -n dnode4 -s start
+run_back unique/arbitrator/sync_replica_alterTable_background_drop.sim
+
+print ============== step6: check result
+#in background.sim, drop one column and add one new column, then insert 200 rows
+$totalRows = $totalRows + 36
+
+$cnt = 0
+wait_table_altered:
+$cnt = $cnt + 1
+if $cnt == 20 then
+ return -1
+endi
+sql select count(*) from $stb
+if $data00 != $totalRows then
+ print data00: $data00
+ sleep 2000
+ goto wait_table_altered
+endi
+
+
+
+
diff --git a/tests/script/unique/arbitrator/sync_replica2_dropDb.sim b/tests/script/unique/arbitrator/sync_replica2_dropDb.sim
new file mode 100644
index 0000000000..9836284af9
--- /dev/null
+++ b/tests/script/unique/arbitrator/sync_replica2_dropDb.sim
@@ -0,0 +1,189 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/deploy.sh -n dnode3 -i 3
+system sh/deploy.sh -n dnode4 -i 4
+
+system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1
+
+system sh/cfg.sh -n dnode1 -c walLevel -v 2
+system sh/cfg.sh -n dnode2 -c walLevel -v 2
+system sh/cfg.sh -n dnode3 -c walLevel -v 2
+system sh/cfg.sh -n dnode4 -c walLevel -v 2
+
+system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
+
+system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
+
+system sh/cfg.sh -n dnode1 -c alternativeRole -v 1
+system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
+
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
+
+system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode4 -c arbitrator -v $arbitrator
+
+print ============== step0: start tarbitrator
+system sh/exec_tarbitrator.sh -s start
+
+print ============== step1: start dnode1, only deploy mnode
+system sh/exec.sh -n dnode1 -s start
+sleep 3000
+sql connect
+
+print ============== step2: start dnode2/dnode3/dnode4 and add into cluster , then create database with replica 2, and create table, insert data
+system sh/exec.sh -n dnode2 -s start
+#system sh/exec.sh -n dnode3 -s start
+system sh/exec.sh -n dnode4 -s start
+sql create dnode $hostname2
+#sql create dnode $hostname3
+sql create dnode $hostname4
+sleep 3000
+
+$totalTableNum = 10
+$sleepTimer = 3000
+
+$db = db
+print create database $db replica 2 maxTables $totalTableNum
+sql create database $db replica 2 maxTables $totalTableNum
+sql use $db
+
+# create table , insert data
+$stb = stb
+sql create table $stb (ts timestamp, c1 int) tags(t1 int)
+$rowNum = 500
+$tblNum = $totalTableNum
+$totalRows = 0
+$tsStart = 1420041600000
+$tsEnd = 0
+
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql create table $tb using $stb tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
+ $x = $x + 60
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+$tsEnd = $tsStart + $totalRows / $tblNum
+
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+print ============== step3: stop dnode4
+system sh/exec.sh -n dnode4 -s stop -x SIGINT
+sleep $sleepTimer
+wait_dnode4_offline_0:
+sql show dnodes
+if $rows != 3 then
+ sleep 2000
+ goto wait_dnode4_offline_0
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
+#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
+#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
+#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
+#$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+#$dnode3Status = $data4_3
+$dnode4Status = $data4_3
+#$dnode5Status = $data4_5
+
+if $dnode4Status != offline then
+ sleep 2000
+ goto wait_dnode4_offline_0
+endi
+
+wait_dnode4_vgroup_offline:
+sql show vgroups
+if $rows != 1 then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+print show vgroups:
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
+$dnode4Vtatus = $data4_2
+$dnode3Vtatus = $data7_2
+
+if $dnode4Vtatus != offline then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+if $dnode3Vtatus != master then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+
+print ============== step4: insert more data rows
+$tsStart = $tsEnd + 1000
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
+ $x = $x + 60
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+print ============== step5: restart dnode4, while drop database in other thead when dnode4 is syncing
+system sh/exec.sh -n dnode4 -s start
+run_back unique/arbitrator/sync_replica_dropDb_background.sim
+
+print ============== step6: check result
+$cnt = 0
+wait_database_dropped:
+$cnt = $cnt + 1
+if $cnt == 20 then
+ return -1
+endi
+sql show databases
+if $rows != 0 then
+ sleep 2000
+ goto wait_database_dropped
+endi
+
+
+
+
diff --git a/tests/script/unique/arbitrator/sync_replica2_dropTable.sim b/tests/script/unique/arbitrator/sync_replica2_dropTable.sim
new file mode 100644
index 0000000000..4793e8e535
--- /dev/null
+++ b/tests/script/unique/arbitrator/sync_replica2_dropTable.sim
@@ -0,0 +1,199 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/deploy.sh -n dnode3 -i 3
+system sh/deploy.sh -n dnode4 -i 4
+
+system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1
+
+system sh/cfg.sh -n dnode1 -c walLevel -v 2
+system sh/cfg.sh -n dnode2 -c walLevel -v 2
+system sh/cfg.sh -n dnode3 -c walLevel -v 2
+system sh/cfg.sh -n dnode4 -c walLevel -v 2
+
+system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
+
+system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
+
+system sh/cfg.sh -n dnode1 -c alternativeRole -v 1
+system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
+
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
+
+system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode4 -c arbitrator -v $arbitrator
+
+print ============== step0: start tarbitrator
+system sh/exec_tarbitrator.sh -s start
+
+print ============== step1: start dnode1, only deploy mnode
+system sh/exec.sh -n dnode1 -s start
+sleep 3000
+sql connect
+
+print ============== step2: start dnode2/dnode3/dnode4 and add into cluster , then create database with replica 2, and create table, insert data
+system sh/exec.sh -n dnode2 -s start
+#system sh/exec.sh -n dnode3 -s start
+system sh/exec.sh -n dnode4 -s start
+sql create dnode $hostname2
+#sql create dnode $hostname3
+sql create dnode $hostname4
+sleep 3000
+
+$totalTableNum = 10
+$sleepTimer = 3000
+
+$db = db
+print create database $db replica 2 maxTables $totalTableNum
+sql create database $db replica 2 maxTables $totalTableNum
+sql use $db
+
+# create table , insert data
+$stb = stb
+sql create table $stb (ts timestamp, c1 int) tags(t1 int)
+$rowNum = 500
+$tblNum = $totalTableNum
+$totalRows = 0
+$tsStart = 1420041600000
+$tsEnd = 0
+
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql create table $tb using $stb tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
+ $x = $x + 60
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+$tsEnd = $tsStart + $totalRows / $tblNum
+
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+print ============== step3: stop dnode4
+system sh/exec.sh -n dnode4 -s stop -x SIGINT
+sleep $sleepTimer
+wait_dnode4_offline_0:
+sql show dnodes
+if $rows != 3 then
+ sleep 2000
+ goto wait_dnode4_offline_0
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
+#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
+#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
+#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
+#$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+$dnode4Status = $data4_3
+#$dnode4Status = $data4_4
+#$dnode5Status = $data4_5
+
+if $dnode4Status != offline then
+ sleep 2000
+ goto wait_dnode4_offline_0
+endi
+
+wait_dnode4_vgroup_offline:
+sql show vgroups
+if $rows != 1 then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+print show vgroups:
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
+$dnode4Vtatus = $data4_2
+$dnode3Vtatus = $data7_2
+
+if $dnode4Vtatus != offline then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+if $dnode3Vtatus != master then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+
+print ============== step4: insert more data rows
+$tsStart = $tsEnd + 1000
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
+ $x = $x + 60
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+
+sql select count(*) from $stb
+print data00:$data00 totalRows:$totalRows
+if $data00 != $totalRows then
+ return -1
+endi
+
+print ============== step5: restart dnode4, while drop some tables in other thread when dnode4 is syncing
+system sh/exec.sh -n dnode4 -s start
+run_back unique/arbitrator/sync_replica_dropTable_background.sim
+
+print ============== step6: check result
+#in background.sim, drop 5 tables
+$totalRows = $totalRows - 5400
+
+$cnt = 0
+wait_table_dropped:
+$cnt = $cnt + 1
+if $cnt == 20 then
+ return -1
+endi
+sql select count(*) from $stb
+if $data00 != $totalRows then
+ print data00:$data00 totalRows:$totalRows
+ sleep 2000
+ goto wait_table_dropped
+endi
+
+$tblNum = $tblNum - 5
+sql select count(tbname) from $stb
+if $data00 != $tblNum then
+ print data00: $data00 tblNum: $tblNum
+ sleep 2000
+ goto wait_table_dropped
+endi
+
+
diff --git a/tests/script/unique/arbitrator/sync_replica3_alterTable_add.sim b/tests/script/unique/arbitrator/sync_replica3_alterTable_add.sim
new file mode 100644
index 0000000000..9277ad2c85
--- /dev/null
+++ b/tests/script/unique/arbitrator/sync_replica3_alterTable_add.sim
@@ -0,0 +1,193 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/deploy.sh -n dnode3 -i 3
+system sh/deploy.sh -n dnode4 -i 4
+
+system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1
+
+system sh/cfg.sh -n dnode1 -c walLevel -v 2
+system sh/cfg.sh -n dnode2 -c walLevel -v 2
+system sh/cfg.sh -n dnode3 -c walLevel -v 2
+system sh/cfg.sh -n dnode4 -c walLevel -v 2
+
+system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
+
+system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
+
+system sh/cfg.sh -n dnode1 -c alternativeRole -v 1
+system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
+
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
+
+system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode4 -c arbitrator -v $arbitrator
+
+print ============== step0: start tarbitrator
+system sh/exec_tarbitrator.sh -s start
+
+print ============== step1: start dnode1, only deploy mnode
+system sh/exec.sh -n dnode1 -s start
+sleep 3000
+sql connect
+
+print ============== step2: start dnode2/dnode3/dnode4 and add into cluster , then create database with replica 3, and create table, insert data
+system sh/exec.sh -n dnode2 -s start
+system sh/exec.sh -n dnode3 -s start
+system sh/exec.sh -n dnode4 -s start
+sql create dnode $hostname2
+sql create dnode $hostname3
+sql create dnode $hostname4
+sleep 3000
+
+$totalTableNum = 10
+$sleepTimer = 3000
+
+$db = db
+print create database $db replica 3 maxTables $totalTableNum
+sql create database $db replica 3 maxTables $totalTableNum
+sql use $db
+
+# create table , insert data
+$stb = stb
+sql create table $stb (ts timestamp, c1 int) tags(t1 int)
+$rowNum = 500
+$tblNum = $totalTableNum
+$totalRows = 0
+$tsStart = 1420041600000
+$tsEnd = 0
+
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql create table $tb using $stb tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
+ $x = $x + 60
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+$tsEnd = $tsStart + $totalRows / $tblNum
+
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+print ============== step3: stop dnode4
+system sh/exec.sh -n dnode4 -s stop -x SIGINT
+sleep $sleepTimer
+wait_dnode4_offline_0:
+sql show dnodes
+if $rows != 4 then
+ sleep 2000
+ goto wait_dnode4_offline_0
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
+#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
+#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
+#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
+#$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+$dnode3Status = $data4_3
+$dnode4Status = $data4_4
+#$dnode5Status = $data4_5
+
+if $dnode4Status != offline then
+ sleep 2000
+ goto wait_dnode4_offline_0
+endi
+
+wait_dnode4_vgroup_offline:
+sql show vgroups
+if $rows != 1 then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+print show vgroups:
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
+$dnode4Vtatus = $data4_2
+$dnode3Vtatus = $data7_2
+
+if $dnode4Vtatus != offline then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+if $dnode3Vtatus != master then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+
+print ============== step4: insert more data rows
+$tsStart = $tsEnd + 1000
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
+ $x = $x + 60
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+print ============== step5: restart dnode4, while alter table and insert data in other thead when dnode4 is syncing
+system sh/exec.sh -n dnode4 -s start
+run_back unique/arbitrator/sync_replica_alterTable_background_add.sim
+
+print ============== step6: check result
+#in background.sim, add one column and insert 200 rows
+$totalRows = $totalRows + 36
+
+$cnt = 0
+wait_table_altered:
+$cnt = $cnt + 1
+if $cnt == 20 then
+ return -1
+endi
+sql select count(*) from $stb
+if $data00 != $totalRows then
+ print data00: $data00 totalRows: $totalRows
+ sleep 2000
+ goto wait_table_altered
+endi
+
+
+
+
diff --git a/tests/script/unique/arbitrator/sync_replica3_alterTable_drop.sim b/tests/script/unique/arbitrator/sync_replica3_alterTable_drop.sim
new file mode 100644
index 0000000000..6593d6933b
--- /dev/null
+++ b/tests/script/unique/arbitrator/sync_replica3_alterTable_drop.sim
@@ -0,0 +1,193 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/deploy.sh -n dnode3 -i 3
+system sh/deploy.sh -n dnode4 -i 4
+
+system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1
+
+system sh/cfg.sh -n dnode1 -c walLevel -v 2
+system sh/cfg.sh -n dnode2 -c walLevel -v 2
+system sh/cfg.sh -n dnode3 -c walLevel -v 2
+system sh/cfg.sh -n dnode4 -c walLevel -v 2
+
+system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
+
+system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
+
+system sh/cfg.sh -n dnode1 -c alternativeRole -v 1
+system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
+
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
+
+system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode4 -c arbitrator -v $arbitrator
+
+print ============== step0: start tarbitrator
+system sh/exec_tarbitrator.sh -s start
+
+print ============== step1: start dnode1, only deploy mnode
+system sh/exec.sh -n dnode1 -s start
+sleep 3000
+sql connect
+
+print ============== step2: start dnode2/dnode3/dnode4 and add into cluster , then create database with replica 3, and create table, insert data
+system sh/exec.sh -n dnode2 -s start
+system sh/exec.sh -n dnode3 -s start
+system sh/exec.sh -n dnode4 -s start
+sql create dnode $hostname2
+sql create dnode $hostname3
+sql create dnode $hostname4
+sleep 3000
+
+$totalTableNum = 10
+$sleepTimer = 3000
+
+$db = db
+print create database $db replica 3 maxTables $totalTableNum
+sql create database $db replica 3 maxTables $totalTableNum
+sql use $db
+
+# create table , insert data
+$stb = stb
+sql create table $stb (ts timestamp, c1 int) tags(t1 int)
+$rowNum = 500
+$tblNum = $totalTableNum
+$totalRows = 0
+$tsStart = 1420041600000
+$tsEnd = 0
+
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql create table $tb using $stb tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
+ $x = $x + 60
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+$tsEnd = $tsStart + $totalRows / $tblNum
+
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+print ============== step3: stop dnode4
+system sh/exec.sh -n dnode4 -s stop -x SIGINT
+sleep $sleepTimer
+wait_dnode4_offline_0:
+sql show dnodes
+if $rows != 4 then
+ sleep 2000
+ goto wait_dnode4_offline_0
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
+#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
+#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
+#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
+#$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+$dnode3Status = $data4_3
+$dnode4Status = $data4_4
+#$dnode5Status = $data4_5
+
+if $dnode4Status != offline then
+ sleep 2000
+ goto wait_dnode4_offline_0
+endi
+
+wait_dnode4_vgroup_offline:
+sql show vgroups
+if $rows != 1 then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+print show vgroups:
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
+$dnode4Vtatus = $data4_2
+$dnode3Vtatus = $data7_2
+
+if $dnode4Vtatus != offline then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+if $dnode3Vtatus != master then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+
+print ============== step4: insert more data rows
+$tsStart = $tsEnd + 1000
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
+ $x = $x + 60
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+print ============== step5: restart dnode4, while alter table and insert data in other thead when dnode4 is syncing
+system sh/exec.sh -n dnode4 -s start
+run_back unique/arbitrator/sync_replica_alterTable_background_drop.sim
+
+print ============== step6: check result
+#in background.sim, drop one column and add one new column, then insert 36 rows
+$totalRows = $totalRows + 36
+
+$cnt = 0
+wait_table_altered:
+$cnt = $cnt + 1
+if $cnt == 20 then
+ return -1
+endi
+sql select count(*) from $stb
+if $data00 != $totalRows then
+ print data00: $data00
+ sleep 2000
+ goto wait_table_altered
+endi
+
+
+
+
diff --git a/tests/script/unique/arbitrator/sync_replica3_dropDb.sim b/tests/script/unique/arbitrator/sync_replica3_dropDb.sim
new file mode 100644
index 0000000000..7099b1dc8e
--- /dev/null
+++ b/tests/script/unique/arbitrator/sync_replica3_dropDb.sim
@@ -0,0 +1,189 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/deploy.sh -n dnode3 -i 3
+system sh/deploy.sh -n dnode4 -i 4
+
+system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1
+
+system sh/cfg.sh -n dnode1 -c walLevel -v 2
+system sh/cfg.sh -n dnode2 -c walLevel -v 2
+system sh/cfg.sh -n dnode3 -c walLevel -v 2
+system sh/cfg.sh -n dnode4 -c walLevel -v 2
+
+system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
+
+system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
+
+system sh/cfg.sh -n dnode1 -c alternativeRole -v 1
+system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
+
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
+
+system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode4 -c arbitrator -v $arbitrator
+
+print ============== step0: start tarbitrator
+system sh/exec_tarbitrator.sh -s start
+
+print ============== step1: start dnode1, only deploy mnode
+system sh/exec.sh -n dnode1 -s start
+sleep 3000
+sql connect
+
+print ============== step2: start dnode2/dnode3/dnode4 and add into cluster , then create database with replica 3, and create table, insert data
+system sh/exec.sh -n dnode2 -s start
+system sh/exec.sh -n dnode3 -s start
+system sh/exec.sh -n dnode4 -s start
+sql create dnode $hostname2
+sql create dnode $hostname3
+sql create dnode $hostname4
+sleep 3000
+
+$totalTableNum = 10
+$sleepTimer = 3000
+
+$db = db
+print create database $db replica 3 maxTables $totalTableNum
+sql create database $db replica 3 maxTables $totalTableNum
+sql use $db
+
+# create table , insert data
+$stb = stb
+sql create table $stb (ts timestamp, c1 int) tags(t1 int)
+$rowNum = 500
+$tblNum = $totalTableNum
+$totalRows = 0
+$tsStart = 1420041600000
+$tsEnd = 0
+
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql create table $tb using $stb tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
+ $x = $x + 60
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+$tsEnd = $tsStart + $totalRows / $tblNum
+
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+print ============== step3: stop dnode4
+system sh/exec.sh -n dnode4 -s stop -x SIGINT
+sleep $sleepTimer
+wait_dnode4_offline_0:
+sql show dnodes
+if $rows != 4 then
+ sleep 2000
+ goto wait_dnode4_offline_0
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
+#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
+#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
+#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
+#$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+$dnode3Status = $data4_3
+$dnode4Status = $data4_4
+#$dnode5Status = $data4_5
+
+if $dnode4Status != offline then
+ sleep 2000
+ goto wait_dnode4_offline_0
+endi
+
+wait_dnode4_vgroup_offline:
+sql show vgroups
+if $rows != 1 then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+print show vgroups:
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
+$dnode4Vtatus = $data4_2
+$dnode3Vtatus = $data7_2
+
+if $dnode4Vtatus != offline then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+if $dnode3Vtatus != master then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+
+print ============== step4: insert more data rows
+$tsStart = $tsEnd + 1000
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
+ $x = $x + 60
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+print ============== step5: restart dnode4, while drop database in other thead when dnode4 is syncing
+system sh/exec.sh -n dnode4 -s start
+run_back unique/arbitrator/sync_replica_dropDb_background.sim
+
+print ============== step6: check result
+$cnt = 0
+wait_database_dropped:
+$cnt = $cnt + 1
+if $cnt == 20 then
+ return -1
+endi
+sql show databases
+if $rows != 0 then
+ sleep 2000
+ goto wait_database_dropped
+endi
+
+
+
+
diff --git a/tests/script/unique/arbitrator/sync_replica3_dropTable.sim b/tests/script/unique/arbitrator/sync_replica3_dropTable.sim
new file mode 100644
index 0000000000..f902b41de5
--- /dev/null
+++ b/tests/script/unique/arbitrator/sync_replica3_dropTable.sim
@@ -0,0 +1,199 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/deploy.sh -n dnode3 -i 3
+system sh/deploy.sh -n dnode4 -i 4
+
+system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1
+
+system sh/cfg.sh -n dnode1 -c walLevel -v 2
+system sh/cfg.sh -n dnode2 -c walLevel -v 2
+system sh/cfg.sh -n dnode3 -c walLevel -v 2
+system sh/cfg.sh -n dnode4 -c walLevel -v 2
+
+system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
+
+system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
+
+system sh/cfg.sh -n dnode1 -c alternativeRole -v 1
+system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
+system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
+
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
+
+system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode4 -c arbitrator -v $arbitrator
+
+print ============== step0: start tarbitrator
+system sh/exec_tarbitrator.sh -s start
+
+print ============== step1: start dnode1, only deploy mnode
+system sh/exec.sh -n dnode1 -s start
+sleep 3000
+sql connect
+
+print ============== step2: start dnode2/dnode3/dnode4 and add into cluster , then create database with replica 3, and create table, insert data
+system sh/exec.sh -n dnode2 -s start
+system sh/exec.sh -n dnode3 -s start
+system sh/exec.sh -n dnode4 -s start
+sql create dnode $hostname2
+sql create dnode $hostname3
+sql create dnode $hostname4
+sleep 3000
+
+$totalTableNum = 10
+$sleepTimer = 3000
+
+$db = db
+print create database $db replica 3 maxTables $totalTableNum
+sql create database $db replica 3 maxTables $totalTableNum
+sql use $db
+
+# create table , insert data
+$stb = stb
+sql create table $stb (ts timestamp, c1 int) tags(t1 int)
+$rowNum = 500
+$tblNum = $totalTableNum
+$totalRows = 0
+$tsStart = 1420041600000
+$tsEnd = 0
+
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql create table $tb using $stb tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
+ $x = $x + 60
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+$tsEnd = $tsStart + $totalRows / $tblNum
+
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+print ============== step3: stop dnode4
+system sh/exec.sh -n dnode4 -s stop -x SIGINT
+sleep $sleepTimer
+wait_dnode4_offline_0:
+sql show dnodes
+if $rows != 4 then
+ sleep 2000
+ goto wait_dnode4_offline_0
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
+#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
+#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
+#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
+#$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+$dnode3Status = $data4_3
+$dnode4Status = $data4_4
+#$dnode5Status = $data4_5
+
+if $dnode4Status != offline then
+ sleep 2000
+ goto wait_dnode4_offline_0
+endi
+
+wait_dnode4_vgroup_offline:
+sql show vgroups
+if $rows != 1 then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+print show vgroups:
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
+$dnode4Vtatus = $data4_2
+$dnode3Vtatus = $data7_2
+
+if $dnode4Vtatus != offline then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+if $dnode3Vtatus != master then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+sleep 2000
+print ============== step4: insert more data rows
+$tsStart = $tsEnd + 1000
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
+ $x = $x + 60
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+print ============== step5: restart dnode4, while drop database in other thead when dnode4 is syncing
+system sh/exec.sh -n dnode4 -s start
+run_back unique/arbitrator/sync_replica_dropTable_background.sim
+
+print ============== step6: check result
+#in background.sim, drop 10 tables
+$totalRows = $totalRows - 5400
+
+$cnt = 0
+wait_table_dropped:
+$cnt = $cnt + 1
+if $cnt == 20 then
+ return -1
+endi
+sql select count(*) from $stb
+if $data00 != $totalRows then
+ print data00: $data00
+ sleep 2000
+ goto wait_table_dropped
+endi
+
+$tblNum = $tblNum - 5
+sql select count(tbname) from $stb
+if $data00 != $tblNum then
+ print data00: $data00
+ sleep 2000
+ goto wait_table_dropped
+endi
+
+
diff --git a/tests/script/unique/arbitrator/sync_replica_alterTable_background_add.sim b/tests/script/unique/arbitrator/sync_replica_alterTable_background_add.sim
new file mode 100644
index 0000000000..3867aa3699
--- /dev/null
+++ b/tests/script/unique/arbitrator/sync_replica_alterTable_background_add.sim
@@ -0,0 +1,25 @@
+sql connect
+
+$db = db
+$stb = stb
+print =============== sync_replica_alterTable_background_add.sim step0: alter table and insert data
+$totalTableNum = 10
+
+sql use $db
+
+#sql create table $stb (ts timestamp, c1 int) tags(t1 int)
+print alter table $stb add column f1 float
+sql alter table $stb add column f1 float
+
+$tblNum = $totalTableNum
+$alterTblNum = 10
+
+$i = 1
+while $i < $alterTblNum
+ $tb = tb . $i
+ sql insert into $tb values (now, 10001, 10001) (now + 1s, 10002, 10002) (now + 2s, 10003, 10003) (now + 3s, 10004, 10004)
+ $i = $i + 1
+endw
+
+
+
diff --git a/tests/script/unique/arbitrator/sync_replica_alterTable_background_drop.sim b/tests/script/unique/arbitrator/sync_replica_alterTable_background_drop.sim
new file mode 100644
index 0000000000..fb8bc60972
--- /dev/null
+++ b/tests/script/unique/arbitrator/sync_replica_alterTable_background_drop.sim
@@ -0,0 +1,25 @@
+sql connect
+
+$db = db
+$stb = stb
+print =============== sync_replica_alterTable_background_drop.sim step0: alter table and insert data
+$totalTableNum = 100
+
+sql use $db
+
+#sql create table $stb (ts timestamp, c1 int) tags(t1 int)
+sql alter table $stb add column f1 double
+sql alter table $stb drop column c1
+
+$tblNum = $totalTableNum
+$alterTblNum = 10
+
+$i = 1
+while $i < $alterTblNum
+ $tb = tb . $i
+ sql insert into $tb values (now, 10001) (now + 1s, 10002) (now + 2s, 10003) (now + 3s, 10004)
+ $i = $i + 1
+endw
+
+
+
diff --git a/tests/script/unique/arbitrator/sync_replica_dropDb_background.sim b/tests/script/unique/arbitrator/sync_replica_dropDb_background.sim
new file mode 100644
index 0000000000..440b525bd7
--- /dev/null
+++ b/tests/script/unique/arbitrator/sync_replica_dropDb_background.sim
@@ -0,0 +1,6 @@
+sql connect
+
+$db = db
+$stb = stb
+print =============== sync_replica_dropDb_background.sim step0: drop db
+sql drop database if exists $db
diff --git a/tests/script/unique/arbitrator/sync_replica_dropTable_background.sim b/tests/script/unique/arbitrator/sync_replica_dropTable_background.sim
new file mode 100644
index 0000000000..485253027a
--- /dev/null
+++ b/tests/script/unique/arbitrator/sync_replica_dropTable_background.sim
@@ -0,0 +1,18 @@
+sql connect
+
+$db = db
+$stb = stb
+print =============== sync_replica_dropTable_background.sim step0: drop table
+$totalTableNum = 6
+
+sql use $db
+
+$tblNum = $totalTableNum
+$dropTblNum = 6
+
+$i = 1
+while $i < $dropTblNum
+ $tb = tb . $i
+ sql drop table if exists $tb
+ $i = $i + 1
+endw
diff --git a/tests/script/unique/arbitrator/testSuite.sim b/tests/script/unique/arbitrator/testSuite.sim
new file mode 100644
index 0000000000..d286ecfaf2
--- /dev/null
+++ b/tests/script/unique/arbitrator/testSuite.sim
@@ -0,0 +1,36 @@
+run unique/arbitrator/dn2_mn1_cache_file_sync.sim
+run unique/arbitrator/dn2_mn1_cache_file_sync_second.sim
+run unique/arbitrator/dn3_mn1_full_createTableFail.sim
+run unique/arbitrator/dn3_mn1_full_dropDnodeFail.sim
+run unique/arbitrator/dn3_mn1_multiCreateDropTable.sim
+run unique/arbitrator/dn3_mn1_nw_disable_timeout_autoDropDnode.sim
+run unique/arbitrator/dn3_mn1_replica2_wal1_AddDelDnode.sim
+run unique/arbitrator/dn3_mn1_replica_change_dropDnod.sim
+run unique/arbitrator/dn3_mn1_replica_change.sim
+run unique/arbitrator/dn3_mn1_stopDnode_timeout.sim
+run unique/arbitrator/dn3_mn1_vnode_change.sim
+run unique/arbitrator/dn3_mn1_vnode_corruptFile_offline.sim
+run unique/arbitrator/dn3_mn1_vnode_corruptFile_online.sim
+####run unique/arbitrator/dn3_mn1_vnode_delDir.sim
+run unique/arbitrator/dn3_mn1_vnode_nomaster.sim
+run unique/arbitrator/dn3_mn2_killDnode.sim
+run unique/arbitrator/insert_duplicationTs.sim
+run unique/arbitrator/offline_replica2_alterTable_online.sim
+run unique/arbitrator/offline_replica2_alterTag_online.sim
+run unique/arbitrator/offline_replica2_createTable_online.sim
+run unique/arbitrator/offline_replica2_dropDb_online.sim
+run unique/arbitrator/offline_replica2_dropTable_online.sim
+run unique/arbitrator/offline_replica3_alterTable_online.sim
+run unique/arbitrator/offline_replica3_alterTag_online.sim
+run unique/arbitrator/offline_replica3_createTable_online.sim
+run unique/arbitrator/offline_replica3_dropDb_online.sim
+run unique/arbitrator/offline_replica3_dropTable_online.sim
+run unique/arbitrator/replica_changeWithArbitrator.sim
+run unique/arbitrator/sync_replica2_alterTable_add.sim
+run unique/arbitrator/sync_replica2_alterTable_drop.sim
+run unique/arbitrator/sync_replica2_dropDb.sim
+run unique/arbitrator/sync_replica2_dropTable.sim
+run unique/arbitrator/sync_replica3_alterTable_add.sim
+run unique/arbitrator/sync_replica3_alterTable_drop.sim
+run unique/arbitrator/sync_replica3_dropDb.sim
+run unique/arbitrator/sync_replica3_dropTable.sim
diff --git a/tests/script/unique/big/balance.sim b/tests/script/unique/big/balance.sim
index 09fca14931..b2a15b0b96 100644
--- a/tests/script/unique/big/balance.sim
+++ b/tests/script/unique/big/balance.sim
@@ -171,6 +171,7 @@ if $data2_3 != 2 then
endi
system sh/exec.sh -n dnode2 -s stop -x SIGINT
+sleep 5000
sql reset query cache
sleep 1000
@@ -227,6 +228,7 @@ if $data2_3 != null then
endi
system sh/exec.sh -n dnode3 -s stop -x SIGINT
+sleep 5000
sql reset query cache
sleep 1000
diff --git a/tests/script/unique/cluster/balance1.sim b/tests/script/unique/cluster/balance1.sim
index 80dfd81e34..c402c111cf 100644
--- a/tests/script/unique/cluster/balance1.sim
+++ b/tests/script/unique/cluster/balance1.sim
@@ -163,7 +163,7 @@ print dnode4 ==> $dnode4Role
print ============================== step5
print ========= add dnode2
system sh/exec.sh -n dnode2 -s stop -x SIGINT
-sleep 3000
+sleep 5000
system sh/exec.sh -n dnode3 -s start
sql create dnode $hostname3
sleep 9000
diff --git a/tests/script/unique/cluster/balance3.sim b/tests/script/unique/cluster/balance3.sim
index 77eba2a2a5..8920ec9fc1 100644
--- a/tests/script/unique/cluster/balance3.sim
+++ b/tests/script/unique/cluster/balance3.sim
@@ -381,6 +381,7 @@ if $dnode7Vnodes != 3 then
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
+sleep 5000
print ============================== step11
print ========= add db4
@@ -429,6 +430,7 @@ if $dnode7Vnodes != 4 then
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
+sleep 5000
print ============================== step13
sql reset query cache
diff --git a/tests/script/unique/column/replica3.sim b/tests/script/unique/column/replica3.sim
index c21f71dc2c..2d6c194ef8 100644
--- a/tests/script/unique/column/replica3.sim
+++ b/tests/script/unique/column/replica3.sim
@@ -29,8 +29,8 @@ while $x < 1010
$x = $x + 1
endw
-sql_error create database d1 replica 2 wal 0
-sql create database d2 replica 1 wal 0
+sql_error create database d1 replica 2 wallevel 0
+sql_error create database d2 replica 1 wallevel 0
sql_error alter database d2 replica 2
system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/unique/db/commit.sim b/tests/script/unique/db/commit.sim
index 98256b01dc..caff3c6a78 100644
--- a/tests/script/unique/db/commit.sim
+++ b/tests/script/unique/db/commit.sim
@@ -50,7 +50,7 @@ print ======== step2 stop dnode
system sh/exec.sh -n dnode2 -s stop -x SIGINT
sleep 5000
system sh/exec.sh -n dnode2 -s start
-sleep 3000
+sleep 5000
sql select * from tb order by ts desc
print ===> rows $rows
@@ -101,7 +101,7 @@ print ======== step5 stop dnode
system sh/exec.sh -n dnode2 -s stop -x SIGINT
sleep 5000
system sh/exec.sh -n dnode2 -s start
-sleep 3000
+sleep 5000
sql select * from tb
print ===> rows $rows
diff --git a/tests/script/unique/db/delete.sim b/tests/script/unique/db/delete.sim
index d9b6e8a9f9..819da67c8a 100644
--- a/tests/script/unique/db/delete.sim
+++ b/tests/script/unique/db/delete.sim
@@ -69,7 +69,7 @@ system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
system sh/exec.sh -n dnode3 -s stop -x SIGINT
-sleep 1000
+sleep 5000
system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode2 -s start
system sh/exec.sh -n dnode3 -s start
diff --git a/tests/script/unique/db/replica_add12.sim b/tests/script/unique/db/replica_add12.sim
index ff0dffe0e2..89502ce419 100644
--- a/tests/script/unique/db/replica_add12.sim
+++ b/tests/script/unique/db/replica_add12.sim
@@ -148,25 +148,10 @@ print ========= step5
system sh/exec.sh -n dnode2 -s stop -x SIGINT
sleep 5000
-sql select * from d1.t1
-if $rows != 2 then
- return -1
-endi
-
-sql select * from d2.t2
-if $rows != 2 then
- return -1
-endi
-
-sql select * from d3.t3
-if $rows != 2 then
- return -1
-endi
-
-sql select * from d4.t4
-if $rows != 2 then
- return -1
-endi
+sql_error select * from d1.t1
+sql_error select * from d2.t2
+sql_error select * from d3.t3
+sql_error select * from d4.t4
print ===== insert data
@@ -175,26 +160,6 @@ sql_error insert into d2.t2 values(now, 3)
sql_error insert into d3.t3 values(now, 3)
sql_error insert into d4.t4 values(now, 3)
-sql select * from d1.t1
-if $rows != 2 then
- return -1
-endi
-
-sql select * from d2.t2
-if $rows != 2 then
- return -1
-endi
-
-sql select * from d3.t3
-if $rows != 2 then
- return -1
-endi
-
-sql select * from d4.t4
-if $rows != 2 then
- return -1
-endi
-
print ========= step6
system sh/exec.sh -n dnode2 -s start
sleep 5000
@@ -234,25 +199,10 @@ sql_error insert into d2.t2 values(now, 3)
sql_error insert into d3.t3 values(now, 3)
sql_error insert into d4.t4 values(now, 3)
-sql select * from d1.t1
-if $rows != 3 then
- return -1
-endi
-
-sql select * from d2.t2
-if $rows != 3 then
- return -1
-endi
-
-sql select * from d3.t3
-if $rows != 3 then
- return -1
-endi
-
-sql select * from d4.t4
-if $rows != 3 then
- return -1
-endi
+sql_error select * from d1.t1
+sql_error select * from d2.t2
+sql_error select * from d3.t3
+sql_error select * from d4.t4
print ========= step7
system sh/exec.sh -n dnode3 -s start
diff --git a/tests/script/unique/db/replica_add13.sim b/tests/script/unique/db/replica_add13.sim
index 1b5a01a9fd..9cc1545bfa 100644
--- a/tests/script/unique/db/replica_add13.sim
+++ b/tests/script/unique/db/replica_add13.sim
@@ -171,7 +171,7 @@ print ========= step6
system sh/exec.sh -n dnode2 -s start
sleep 5000
system sh/exec.sh -n dnode3 -s stop -x SIGINT
-sleep 3000
+sleep 5000
sql insert into d1.t1 values(1589529000014, 4)
sql insert into d2.t2 values(1589529000024, 4)
@@ -206,7 +206,7 @@ print ========= step7
system sh/exec.sh -n dnode3 -s start
sleep 5000
system sh/exec.sh -n dnode4 -s stop -x SIGINT
-sleep 3000
+sleep 5000
sql insert into d1.t1 values(1589529000015, 5)
sql insert into d2.t2 values(1589529000025, 5)
@@ -237,7 +237,7 @@ print ========= step8
system sh/exec.sh -n dnode4 -s start
sleep 5000
system sh/exec.sh -n dnode2 -s stop -x SIGINT
-sleep 3000
+sleep 5000
sql insert into d1.t1 values(1589529000016, 6)
sql insert into d2.t2 values(1589529000026, 6)
diff --git a/tests/script/unique/db/replica_reduce21.sim b/tests/script/unique/db/replica_reduce21.sim
index 9dc5abcd0d..a61cb84c11 100644
--- a/tests/script/unique/db/replica_reduce21.sim
+++ b/tests/script/unique/db/replica_reduce21.sim
@@ -118,7 +118,7 @@ endi
print ======== step9 stop dnode2
system sh/exec.sh -n dnode2 -s stop -x SIGINT
-sleep 3000
+sleep 5000
sql insert into d5.t5 values(now, 3)
sql insert into d2.t2 values(now, 3)
diff --git a/tests/script/unique/db/replica_reduce32.sim b/tests/script/unique/db/replica_reduce32.sim
index bc08911a7c..5516009369 100644
--- a/tests/script/unique/db/replica_reduce32.sim
+++ b/tests/script/unique/db/replica_reduce32.sim
@@ -102,19 +102,15 @@ print ========= step4
system sh/exec.sh -n dnode2 -s stop -x SIGINT
sleep 5000
-sql insert into d1.t1 values(now, 3) -x step1
-step1:
-sql insert into d2.t2 values(now, 3) -x step2
-step2:
-sql insert into d3.t3 values(now, 3) -x step3
-step3:
-sql insert into d4.t4 values(now, 3) -x step4
-step4:
+sql_error insert into d1.t1 values(now, 3)
+sql_error insert into d2.t2 values(now, 3)
+sql_error insert into d3.t3 values(now, 3)
+sql_error insert into d4.t4 values(now, 3)
-sql select * from d1.t1
-sql select * from d2.t2
-sql select * from d3.t3
-sql select * from d4.t4
+sql_error select * from d1.t1
+sql_error select * from d2.t2
+sql_error select * from d3.t3
+sql_error select * from d4.t4
print ========= step5
system sh/exec.sh -n dnode2 -s start
@@ -126,14 +122,10 @@ sleep 5000
sql reset query cache
sleep 1000
-sql insert into d1.t1 values(now, 3) -x step11
-step11:
-sql insert into d2.t2 values(now, 3) -x step21
-step21:
-sql insert into d3.t3 values(now, 3) -x step31
-step31:
-sql insert into d4.t4 values(now, 3) -x step41
-step41:
+sql_error insert into d1.t1 values(now, 3)
+sql_error insert into d2.t2 values(now, 3)
+sql_error insert into d3.t3 values(now, 3)
+sql_error insert into d4.t4 values(now, 3)
print ========= step6
system sh/exec.sh -n dnode3 -s start
diff --git a/tests/script/unique/dnode/balance2.sim b/tests/script/unique/dnode/balance2.sim
index 374e3dd5ba..e23562d8b4 100644
--- a/tests/script/unique/dnode/balance2.sim
+++ b/tests/script/unique/dnode/balance2.sim
@@ -84,6 +84,7 @@ if $data2_3 != 2 then
endi
system sh/exec.sh -n dnode2 -s stop -x SIGINT
+sleep 5000
print ========== step3
sql create dnode $hostname4
@@ -210,6 +211,7 @@ if $data2_5 != 3 then
endi
system sh/exec.sh -n dnode2 -s stop -x SIGINT
+sleep 5000
sql reset query cache
sleep 1000
diff --git a/tests/script/unique/dnode/balance3.sim b/tests/script/unique/dnode/balance3.sim
index 786b4404ba..a09fd7e4ea 100644
--- a/tests/script/unique/dnode/balance3.sim
+++ b/tests/script/unique/dnode/balance3.sim
@@ -99,7 +99,7 @@ if $data2_4 != 2 then
endi
system sh/exec.sh -n dnode2 -s stop -x SIGINT
-
+sleep 5000
print ========== step
sql create dnode $hostname5
system sh/exec.sh -n dnode5 -s start
@@ -239,6 +239,7 @@ if $data2_5 != 3 then
endi
system sh/exec.sh -n dnode3 -s stop -x SIGINT
+sleep 5000
sql reset query cache
sleep 1000
diff --git a/tests/script/unique/dnode/balancex.sim b/tests/script/unique/dnode/balancex.sim
index fdb20434ff..0d5da5bbf6 100644
--- a/tests/script/unique/dnode/balancex.sim
+++ b/tests/script/unique/dnode/balancex.sim
@@ -140,6 +140,7 @@ if $data2_3 != 3 then
endi
system sh/exec.sh -n dnode2 -s stop -x SIGINT
+sleep 5000
sql reset query cache
sleep 1000
diff --git a/tests/script/unique/dnode/remove1.sim b/tests/script/unique/dnode/remove1.sim
index 5c4d30e728..246808c56c 100644
--- a/tests/script/unique/dnode/remove1.sim
+++ b/tests/script/unique/dnode/remove1.sim
@@ -110,6 +110,7 @@ if $data2_2 != null then
endi
system sh/exec.sh -n dnode2 -s stop -x SIGINT
+sleep 5000
print ========== step5
sql create dnode $hostname4
diff --git a/tests/script/unique/dnode/remove2.sim b/tests/script/unique/dnode/remove2.sim
index 76c5fbb08e..cf9954c767 100644
--- a/tests/script/unique/dnode/remove2.sim
+++ b/tests/script/unique/dnode/remove2.sim
@@ -76,7 +76,7 @@ endi
print ========== step3
system sh/exec.sh -n dnode2 -s stop -x SIGINT
sql drop dnode $hostname2
-sleep 4000
+sleep 5000
sql show dnodes
print dnode1 openVnodes $data2_1
diff --git a/tests/script/unique/dnode/vnode_clean.sim b/tests/script/unique/dnode/vnode_clean.sim
index 6ab63bd94f..e1ee1da2aa 100644
--- a/tests/script/unique/dnode/vnode_clean.sim
+++ b/tests/script/unique/dnode/vnode_clean.sim
@@ -101,7 +101,7 @@ endi
system sh/exec.sh -n dnode2 -s stop -x SIGINT
print ========== step5
-sleep 2000
+sleep 5000
sql create dnode $hostname3
system sh/exec.sh -n dnode3 -s start
@@ -222,7 +222,8 @@ if $data2_4 != 4 then
goto show9
endi
-system sh/exec.sh -n dnode3 -s stop -x SIGINT
+system sh/exec.sh -n dnode3 -s stop
+sleep 5000
print ========== step10
sql select * from d1.t1 order by t desc
diff --git a/tests/script/unique/http/opentsdb.sim b/tests/script/unique/http/opentsdb.sim
index 19d1c48c3d..58f6609d15 100644
--- a/tests/script/unique/http/opentsdb.sim
+++ b/tests/script/unique/http/opentsdb.sim
@@ -75,7 +75,7 @@ endi
system_content curl -u root:taosdata -d '[{"metric": "ab1234567890123456789012345678ab1234567890123456789012345678","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6020/opentsdb/db/put
print $system_content
-if $system_content != @{"errors":[{"datapoint":{"metric":"ab1234567890123456789012345678ab1234567890123456789012345678","stable":"ab1234567890123456789012345678ab1234567890123456789012345678_d_bbb","table":"ab1234567890123456789012345678ab1234567890123456789012345678_d_bbb_lga_1_web01","timestamp":1346846400,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web01"},"status":"error","code":-2147482999}}],"failed":1,"success":0,"affected_rows":0}@ then
+if $system_content != @{"errors":[{"datapoint":{"metric":"ab1234567890123456789012345678ab1234567890123456789012345678","stable":"ab1234567890123456789012345678ab1234567890123456789012345678_d_bbb","table":"ab1234567890123456789012345678ab1234567890123456789012345678_d_bbb_lga_1_web01","timestamp":1346846400,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web01"},"status":"error","code":-2147482101}}],"failed":1,"success":0,"affected_rows":0}@ then
return -1
endi
@@ -125,7 +125,7 @@ endi
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","group1": "1","group1": "1","group1": "1","group1": "1","dc": "lga"}}]' 127.0.0.1:6020/opentsdb/db/put
print $system_content
-if $system_content != @{"errors":[{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbbbbbb","table":"sys_cpu_d_bbbbbbb_lga_1_1_1_1_1_web01","timestamp":1346846400,"value":18.000000,"tags":{"dc":"lga","group1":"1","group1":"1","group1":"1","group1":"1","group1":"1","host":"web01"},"status":"error","code":-2147483135}}],"failed":1,"success":0,"affected_rows":0}@ then
+if $system_content != @{"errors":[{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbbbbbb","table":"sys_cpu_d_bbbbbbb_lga_1_1_1_1_1_web01","timestamp":1346846400,"value":18.000000,"tags":{"dc":"lga","group1":"1","group1":"1","group1":"1","group1":"1","group1":"1","host":"web01"},"status":"error","code":-2147482782}}],"failed":1,"success":0,"affected_rows":0}@ then
return -1
endi
diff --git a/tests/script/unique/mnode/mgmt22.sim b/tests/script/unique/mnode/mgmt22.sim
index 08fe8d346a..d55e36d7fc 100644
--- a/tests/script/unique/mnode/mgmt22.sim
+++ b/tests/script/unique/mnode/mgmt22.sim
@@ -47,7 +47,7 @@ print should not drop master
print ============== step4
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 2000
+sleep 5000
sql_error show mnodes
print error of no master
diff --git a/tests/script/unique/stable/dnode2_stop.sim b/tests/script/unique/stable/dnode2_stop.sim
index 0a801d9c7e..cb7df5a3cf 100644
--- a/tests/script/unique/stable/dnode2_stop.sim
+++ b/tests/script/unique/stable/dnode2_stop.sim
@@ -74,6 +74,7 @@ endi
sleep 100
system sh/exec.sh -n dnode2 -s stop -x SIGINT
+sleep 5000
print =============== step2
sql select count(*) from $mt -x step2
@@ -85,7 +86,7 @@ sql select count(tbcol) from $mt -x step21
step21:
system sh/exec.sh -n dnode2 -s start
-sleep 10000
+sleep 5000
print =============== step3
sql select count(tbcol) as c from $mt where ts <= 1519833840000
diff --git a/tests/script/windows/alter/metrics.sim b/tests/script/windows/alter/metrics.sim
index ce003a9836..323e150d41 100644
--- a/tests/script/windows/alter/metrics.sim
+++ b/tests/script/windows/alter/metrics.sim
@@ -368,9 +368,9 @@ endi
print ======== step9
print ======== step10
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 3000
+sleep 5000
system sh/exec.sh -n dnode1 -s start
-sleep 3000
+sleep 5000
sql use d2
sql describe tb
diff --git a/tests/script/windows/alter/table.sim b/tests/script/windows/alter/table.sim
index 155debda0d..e6b5d69551 100644
--- a/tests/script/windows/alter/table.sim
+++ b/tests/script/windows/alter/table.sim
@@ -319,9 +319,9 @@ endi
print ======== step9
print ======== step10
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 3000
+sleep 5000
system sh/exec.sh -n dnode1 -s start
-sleep 3000
+sleep 5000
sql use d1
sql describe tb
diff --git a/tests/test-all.sh b/tests/test-all.sh
index cee638e03c..0c91229120 100755
--- a/tests/test-all.sh
+++ b/tests/test-all.sh
@@ -24,65 +24,74 @@ GREEN_DARK='\033[0;32m'
GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
-echo "### run TSIM test case ###"
-cd script
+totalFailed=0
+totalPyFailed=0
-[ -f out.log ] && rm -f out.log
-if [ "$1" == "cron" ]; then
- echo "### run TSIM regression test ###"
- runSimCaseOneByOne regressionSuite.sim
-elif [ "$1" == "full" ]; then
- echo "### run TSIM full test ###"
- runSimCaseOneByOne fullGeneralSuite.sim
-else
- echo "### run TSIM smoke test ###"
- runSimCaseOneByOne basicSuite.sim
-fi
+current_dir=`pwd`
-totalSuccess=`grep 'success' out.log | wc -l`
-totalBasic=`grep success out.log | grep Suite | wc -l`
+if [ "$2" != "python" ]; then
+ echo "### run TSIM test case ###"
+ cd $current_dir/script
-if [ "$totalSuccess" -gt "0" ]; then
- totalSuccess=`expr $totalSuccess - $totalBasic`
-fi
+ [ -f out.log ] && rm -f out.log
+ if [ "$1" == "cron" ]; then
+ echo "### run TSIM regression test ###"
+ runSimCaseOneByOne regressionSuite.sim
+ elif [ "$1" == "full" ]; then
+ echo "### run TSIM full test ###"
+ runSimCaseOneByOne fullGeneralSuite.sim
+ elif [ "$1" == "smoke" ] || [ -z "$1" ]; then
+ echo "### run TSIM smoke test ###"
+ runSimCaseOneByOne basicSuite.sim
+ fi
-echo -e "${GREEN} ### Total $totalSuccess TSIM case(s) succeed! ### ${NC}"
+ totalSuccess=`grep 'success' out.log | wc -l`
+ totalBasic=`grep success out.log | grep Suite | wc -l`
-totalFailed=`grep 'failed\|fault' out.log | wc -l`
+ if [ "$totalSuccess" -gt "0" ]; then
+ totalSuccess=`expr $totalSuccess - $totalBasic`
+ fi
+
+ echo -e "${GREEN} ### Total $totalSuccess TSIM case(s) succeed! ### ${NC}"
+
+ totalFailed=`grep 'failed\|fault' out.log | wc -l`
# echo -e "${RED} ### Total $totalFailed TSIM case(s) failed! ### ${NC}"
-if [ "$totalFailed" -ne "0" ]; then
- echo -e "${RED} ### Total $totalFailed TSIM case(s) failed! ### ${NC}"
+ if [ "$totalFailed" -ne "0" ]; then
+ echo -e "${RED} ### Total $totalFailed TSIM case(s) failed! ### ${NC}"
# exit $totalFailed
+ fi
fi
-echo "### run Python test case ###"
-cd ../pytest
+if [ "$2" != "sim" ]; then
+ echo "### run Python test case ###"
+ cd $current_dir/pytest
-[ -f pytest-out.log ] && rm -f pytest-out.log
+ [ -f pytest-out.log ] && rm -f pytest-out.log
-if [ "$1" == "cron" ]; then
- echo "### run Python regression test ###"
- runPyCaseOneByOne regressiontest.sh
-elif [ "$1" == "full" ]; then
- echo "### run Python full test ###"
- runPyCaseOneByOne fulltest.sh
-else
- echo "### run Python smoke test ###"
- runPyCaseOneByOne smoketest.sh
-fi
-totalPySuccess=`grep 'successfully executed' pytest-out.log | wc -l`
+ if [ "$1" == "cron" ]; then
+ echo "### run Python regression test ###"
+ runPyCaseOneByOne regressiontest.sh
+ elif [ "$1" == "full" ]; then
+ echo "### run Python full test ###"
+ runPyCaseOneByOne fulltest.sh
+ elif [ "$1" == "smoke" ] || [ -z "$1" ]; then
+ echo "### run Python smoke test ###"
+ runPyCaseOneByOne smoketest.sh
+ fi
+ totalPySuccess=`grep 'successfully executed' pytest-out.log | wc -l`
-if [ "$totalPySuccess" -gt "0" ]; then
- echo -e "${GREEN} ### Total $totalPySuccess python case(s) succeed! ### ${NC}"
-fi
+ if [ "$totalPySuccess" -gt "0" ]; then
+ echo -e "${GREEN} ### Total $totalPySuccess python case(s) succeed! ### ${NC}"
+ fi
-totalPyFailed=`grep 'failed\|fault' pytest-out.log | wc -l`
-if [ "$totalPyFailed" -ne "0" ]; then
- echo -e "${RED} ### Total $totalPyFailed python case(s) failed! ### ${NC}"
+ totalPyFailed=`grep 'failed\|fault' pytest-out.log | wc -l`
+ if [ "$totalPyFailed" -ne "0" ]; then
+ echo -e "${RED} ### Total $totalPyFailed python case(s) failed! ### ${NC}"
# exit $totalPyFailed
+ fi
fi
exit $(($totalFailed + $totalPyFailed))
diff --git a/tests/tsim/src/simMain.c b/tests/tsim/src/simMain.c
index 2540851634..f016e36d41 100644
--- a/tests/tsim/src/simMain.c
+++ b/tests/tsim/src/simMain.c
@@ -29,7 +29,7 @@ int main(int argc, char *argv[]) {
for (int i = 1; i < argc; ++i) {
if (strcmp(argv[i], "-c") == 0 && i < argc - 1) {
- strncpy(configDir, argv[++i], MAX_FILE_NAME_LEN);
+ tstrncpy(configDir, argv[++i], MAX_FILE_NAME_LEN);
} else if (strcmp(argv[i], "-f") == 0 && i < argc - 1) {
strcpy(scriptFile, argv[++i]);
} else if (strcmp(argv[i], "-a") == 0) {
diff --git a/tests/tsim/src/simParse.c b/tests/tsim/src/simParse.c
index fddda679c6..9fea1f115b 100644
--- a/tests/tsim/src/simParse.c
+++ b/tests/tsim/src/simParse.c
@@ -165,7 +165,7 @@ SScript *simBuildScriptObj(char *fileName) {
script->type = SIM_SCRIPT_TYPE_MAIN;
script->numOfLines = numOfLines;
- strncpy(script->fileName, fileName, MAX_FILE_NAME_LEN);
+ tstrncpy(script->fileName, fileName, sizeof(script->fileName));
script->optionBuffer = malloc(optionOffset);
memcpy(script->optionBuffer, optionBuffer, optionOffset);