diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c
index 602b8cc430..c61402192d 100644
--- a/src/client/src/tscAsync.c
+++ b/src/client/src/tscAsync.c
@@ -449,10 +449,13 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
tscTrace("%p update table meta in local cache, continue to process sql and send corresponding subquery", pSql);
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
- if (pTableMetaInfo->pTableMeta == NULL){
- code = tscGetTableMeta(pSql, pTableMetaInfo);
+ code = tscGetTableMeta(pSql, pTableMetaInfo);
+ if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
+ return;
+ } else {
assert(code == TSDB_CODE_SUCCESS);
}
+
assert((tscGetNumOfTags(pTableMetaInfo->pTableMeta) != 0) && pTableMetaInfo->vgroupIndex >= 0 && pSql->param != NULL);
@@ -473,7 +476,11 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
code = tscGetTableMeta(pSql, pTableMetaInfo);
- assert(code == TSDB_CODE_SUCCESS);
+ if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
+ return;
+ } else {
+ assert(code == TSDB_CODE_SUCCESS);
+ }
// if failed to process sql, go to error handler
if ((code = tscProcessSql(pSql)) == TSDB_CODE_SUCCESS) {
@@ -483,7 +490,6 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
// // 1. table uid, 2. ip address
// code = tscSendMsgToServer(pSql);
// if (code == TSDB_CODE_SUCCESS) return;
-// }
} else {
tscTrace("%p continue parse sql after get table meta", pSql);
@@ -491,8 +497,11 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_STMT_INSERT)) {
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
code = tscGetTableMeta(pSql, pTableMetaInfo);
- assert(code == TSDB_CODE_SUCCESS && pTableMetaInfo->pTableMeta != NULL);
-
+ if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
+ return;
+ } else {
+ assert(code == TSDB_CODE_SUCCESS);
+ }
(*pSql->fp)(pSql->param, pSql, code);
return;
}
diff --git a/src/client/src/tscFunctionImpl.c b/src/client/src/tscFunctionImpl.c
index b05e82b39a..a0deaa519a 100644
--- a/src/client/src/tscFunctionImpl.c
+++ b/src/client/src/tscFunctionImpl.c
@@ -151,7 +151,6 @@ typedef struct SRateInfo {
double sum; // for sum/avg
} SRateInfo;
-
int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionId, int32_t param, int16_t *type,
int16_t *bytes, int32_t *interBytes, int16_t extLength, bool isSuperTable) {
if (!isValidDataType(dataType, dataBytes)) {
diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c
index 52240941f2..eaf9c21bfb 100644
--- a/src/client/src/tscLocal.c
+++ b/src/client/src/tscLocal.c
@@ -279,7 +279,8 @@ static void tscProcessCurrentUser(SSqlObj *pSql) {
pExpr->resType = TSDB_DATA_TYPE_BINARY;
char* vx = calloc(1, pExpr->resBytes);
- STR_WITH_MAXSIZE_TO_VARSTR(vx, pSql->pTscObj->user, TSDB_USER_LEN);
+ size_t size = sizeof(pSql->pTscObj->user);
+ STR_WITH_MAXSIZE_TO_VARSTR(vx, pSql->pTscObj->user, size);
tscSetLocalQueryResult(pSql, vx, pExpr->aliasName, pExpr->resType, pExpr->resBytes);
free(vx);
diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c
index 372e81d737..60415a8d74 100644
--- a/src/client/src/tscSQLParser.c
+++ b/src/client/src/tscSQLParser.c
@@ -169,7 +169,7 @@ static int32_t handlePassword(SSqlCmd* pCmd, SSQLToken* pPwd) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
- if (pPwd->n > TSDB_PASSWORD_LEN) {
+ if (pPwd->n >= TSDB_PASSWORD_LEN) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
@@ -232,7 +232,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
pzName->n = strdequote(pzName->z);
strncpy(pTableMetaInfo->name, pzName->z, pzName->n);
} else { // drop user
- if (pzName->n > TSDB_USER_LEN) {
+ if (pzName->n >= TSDB_USER_LEN) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
@@ -317,7 +317,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
return TSDB_CODE_TSC_INVALID_SQL;
}
- if (pName->n > TSDB_USER_LEN) {
+ if (pName->n >= TSDB_USER_LEN) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
@@ -348,7 +348,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
- if (pToken->n >= TSDB_TABLE_NAME_LEN) {
+ if (!tscValidateTableNameLength(pToken->n)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
@@ -401,7 +401,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
SSQLToken* pName = &pUser->user;
SSQLToken* pPwd = &pUser->passwd;
- if (pName->n > TSDB_USER_LEN) {
+ if (pName->n >= TSDB_USER_LEN) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
@@ -543,7 +543,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "not support sql expression");
}
- pSql->cmd.parseFinished = true;
+ pSql->cmd.parseFinished = 1;
return tscBuildMsg[pCmd->command](pSql, pInfo);
}
@@ -1056,12 +1056,12 @@ int32_t setObjFullName(char* fullName, const char* account, SSQLToken* pDB, SSQL
totalLen += 1;
/* here we only check the table name length limitation */
- if (tableName->n > TSDB_TABLE_NAME_LEN) {
+ if (!tscValidateTableNameLength(tableName->n)) {
return TSDB_CODE_TSC_INVALID_SQL;
}
} else { // pDB == NULL, the db prefix name is specified in tableName
/* the length limitation includes tablename + dbname + sep */
- if (tableName->n > (TSDB_TABLE_NAME_LEN - 1) + (TSDB_DB_NAME_LEN - 1) + sizeof(TS_PATH_DELIMITER)) {
+ if (tableName->n >= TSDB_TABLE_NAME_LEN + TSDB_DB_NAME_LEN) {
return TSDB_CODE_TSC_INVALID_SQL;
}
}
@@ -1078,7 +1078,7 @@ int32_t setObjFullName(char* fullName, const char* account, SSQLToken* pDB, SSQL
fullName[totalLen] = 0;
}
- return (totalLen <= TSDB_TABLE_ID_LEN) ? TSDB_CODE_SUCCESS : TSDB_CODE_TSC_INVALID_SQL;
+ return (totalLen < TSDB_TABLE_ID_LEN) ? TSDB_CODE_SUCCESS : TSDB_CODE_TSC_INVALID_SQL;
}
static void extractColumnNameFromString(tSQLExprItem* pItem) {
@@ -1402,9 +1402,7 @@ int32_t addProjectionExprAndResultField(SQueryInfo* pQueryInfo, tSQLExprItem* pI
}
if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
- SSchema colSchema = {.type = TSDB_DATA_TYPE_BINARY, .bytes = (TSDB_TABLE_NAME_LEN - 1) + VARSTR_HEADER_SIZE};
- strcpy(colSchema.name, TSQL_TBNAME_L);
-
+ SSchema colSchema = tGetTableNameColumnSchema();
tscAddSpecialColumnForSelect(pQueryInfo, startPos, TSDB_FUNC_TAGPRJ, &index, &colSchema, true);
} else {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
@@ -1914,9 +1912,7 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
SSchema s = {0};
if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
- s.bytes = TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE;
- s.type = TSDB_DATA_TYPE_BINARY;
- s.colId = TSDB_TBNAME_COLUMN_INDEX;
+ s = tGetTableNameColumnSchema();
} else {
s = pTagSchema[index.columnIndex];
}
@@ -2052,7 +2048,7 @@ int32_t getTableIndexImpl(SSQLToken* pTableToken, SQueryInfo* pQueryInfo, SColum
}
pIndex->tableIndex = COLUMN_INDEX_INITIAL_VAL;
- char tableName[TSDB_TABLE_ID_LEN + 1] = {0};
+ char tableName[TSDB_TABLE_ID_LEN] = {0};
for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, i);
@@ -2230,7 +2226,7 @@ int32_t setShowInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
- if (pCmd->payloadLen >= TSDB_TABLE_NAME_LEN) {
+ if (!tscValidateTableNameLength(pCmd->payloadLen)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
}
@@ -2241,7 +2237,7 @@ int32_t setShowInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
// show vnodes may be ip addr of dnode in payload
SSQLToken* pDnodeIp = &pShowInfo->prefix;
- if (pDnodeIp->n > TSDB_IPv4ADDR_LEN) { // ip addr is too long
+ if (pDnodeIp->n >= TSDB_IPv4ADDR_LEN) { // ip addr is too long
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
@@ -2861,7 +2857,7 @@ static int32_t tablenameListToString(tSQLExpr* pExpr, SStringBuilder* sb) {
taosStringBuilderAppendString(sb, TBNAME_LIST_SEP);
}
- if (pSub->val.nLen <= 0 || pSub->val.nLen > TSDB_TABLE_NAME_LEN) {
+ if (pSub->val.nLen <= 0 || !tscValidateTableNameLength(pSub->val.nLen)) {
return TSDB_CODE_TSC_INVALID_SQL;
}
}
@@ -3616,7 +3612,7 @@ static int32_t setTableCondForSTableQuery(SQueryInfo* pQueryInfo, const char* ac
taosStringBuilderAppendStringLen(&sb1, TBNAME_LIST_SEP, 1);
}
- char idBuf[TSDB_TABLE_ID_LEN + 1] = {0};
+ char idBuf[TSDB_TABLE_ID_LEN] = {0};
int32_t xlen = strlen(segments[i]);
SSQLToken t = {.z = segments[i], .n = xlen, .type = TK_STRING};
@@ -4380,9 +4376,10 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
return TSDB_CODE_TSC_INVALID_SQL;
}
- if (index.columnIndex < tscGetNumOfColumns(pTableMeta)) {
+ int32_t numOfCols = tscGetNumOfColumns(pTableMeta);
+ if (index.columnIndex < numOfCols) {
return invalidSqlErrMsg(pQueryInfo->msg, msg10);
- } else if (index.columnIndex == 0) {
+ } else if (index.columnIndex == numOfCols) {
return invalidSqlErrMsg(pQueryInfo->msg, msg11);
}
@@ -5232,9 +5229,10 @@ static int32_t doAddGroupbyColumnsOnDemand(SQueryInfo* pQueryInfo) {
int16_t colIndex = pColIndex->colIndex;
if (colIndex == TSDB_TBNAME_COLUMN_INDEX) {
- type = TSDB_DATA_TYPE_BINARY;
- bytes = (TSDB_TABLE_NAME_LEN - 1) + VARSTR_HEADER_SIZE; // todo extract method
- name = TSQL_TBNAME_L;
+ SSchema s = tGetTableNameColumnSchema();
+ type = s.type;
+ bytes = s.bytes;
+ name = s.name;
} else {
if (TSDB_COL_IS_TAG(pColIndex->flag)) {
SSchema* tagSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta);
@@ -5602,7 +5600,7 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
}
// get meter meta from mnode
- strncpy(pCreateTable->usingInfo.tagdata.name, pStableMeterMetaInfo->name, TSDB_TABLE_ID_LEN);
+ tstrncpy(pCreateTable->usingInfo.tagdata.name, pStableMeterMetaInfo->name, sizeof(pCreateTable->usingInfo.tagdata.name));
tVariantList* pList = pInfo->pCreateTableInfo->usingInfo.pTagVals;
int32_t code = tscGetTableMeta(pSql, pStableMeterMetaInfo);
diff --git a/src/client/src/tscSchemaUtil.c b/src/client/src/tscSchemaUtil.c
index 88417addf6..439aa7c1de 100644
--- a/src/client/src/tscSchemaUtil.c
+++ b/src/client/src/tscSchemaUtil.c
@@ -50,14 +50,6 @@ int32_t tscGetNumOfColumns(const STableMeta* pTableMeta) {
SSchema *tscGetTableSchema(const STableMeta *pTableMeta) {
assert(pTableMeta != NULL);
-
-// if (pTableMeta->tableType == TSDB_CHILD_TABLE) {
-// STableMeta* pSTableMeta = pTableMeta->pSTable;
-// assert (pSTableMeta != NULL);
-//
-// return pSTableMeta->schema;
-// }
-
return (SSchema*) pTableMeta->schema;
}
diff --git a/src/client/src/tscSecondaryMerge.c b/src/client/src/tscSecondaryMerge.c
index b159ffc5a1..52a06277e3 100644
--- a/src/client/src/tscSecondaryMerge.c
+++ b/src/client/src/tscSecondaryMerge.c
@@ -217,7 +217,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
pReducer->numOfBuffer = numOfFlush;
pReducer->numOfVnode = numOfBuffer;
-
+
pReducer->pDesc = pDesc;
tscTrace("%p the number of merged leaves is: %d", pSql, pReducer->numOfBuffer);
@@ -604,7 +604,7 @@ bool isSameGroup(SSqlCmd *pCmd, SLocalReducer *pReducer, char *pPrev, tFilePage
tOrderDescriptor *pOrderDesc = pReducer->pDesc;
SColumnOrderInfo* orderInfo = &pOrderDesc->orderInfo;
-
+
// no group by columns, all data belongs to one group
int32_t numOfCols = orderInfo->numOfCols;
if (numOfCols <= 0) {
@@ -627,7 +627,7 @@ bool isSameGroup(SSqlCmd *pCmd, SLocalReducer *pReducer, char *pPrev, tFilePage
// only one row exists
int32_t index = orderInfo->pData[0];
int32_t offset = (pOrderDesc->pColumnModel)->pFields[index].offset;
-
+
int32_t ret = memcmp(pPrev + offset, tmpBuffer->data + offset, pOrderDesc->pColumnModel->rowSize - offset);
return ret == 0;
}
@@ -1040,7 +1040,7 @@ static void savePreviousRow(SLocalReducer *pLocalReducer, tFilePage *tmpBuffer)
static void doExecuteSecondaryMerge(SSqlCmd *pCmd, SLocalReducer *pLocalReducer, bool needInit) {
// the tag columns need to be set before all functions execution
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
-
+
size_t size = tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t j = 0; j < size; ++j) {
SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[j];
@@ -1182,7 +1182,7 @@ int32_t finalizeRes(SQueryInfo *pQueryInfo, SLocalReducer *pLocalReducer) {
*/
bool needToMerge(SQueryInfo *pQueryInfo, SLocalReducer *pLocalReducer, tFilePage *tmpBuffer) {
int32_t ret = 0; // merge all result by default
-
+
int16_t functionId = pLocalReducer->pCtx[0].functionId;
// todo opt performance
diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c
index 33ab93d113..5913200ff6 100644
--- a/src/client/src/tscServer.c
+++ b/src/client/src/tscServer.c
@@ -209,6 +209,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcIpSet *pIpSet) {
tscError("%p sql is already released", pSql->signature);
return;
}
+
if (pSql->signature != pSql) {
tscError("%p sql is already released, signature:%p", pSql, pSql->signature);
return;
@@ -217,10 +218,9 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcIpSet *pIpSet) {
SSqlRes *pRes = &pSql->res;
SSqlCmd *pCmd = &pSql->cmd;
STscObj *pObj = pSql->pTscObj;
- // tscTrace("%p msg:%s is received from server", pSql, taosMsg[rpcMsg->msgType]);
- if (pObj->signature != pObj) {
- tscTrace("%p sql is already released or DB connection is closed, freed:%d pObj:%p signature:%p", pSql, pSql->freed,
+ if (pObj->signature != pObj || pSql->freed == 1) {
+ tscTrace("%p sqlObj needs to be released or DB connection is closed, freed:%d pObj:%p signature:%p", pSql, pSql->freed,
pObj, pObj->signature);
tscFreeSqlObj(pSql);
rpcFreeCont(rpcMsg->pCont);
@@ -375,7 +375,7 @@ int tscProcessSql(SSqlObj *pSql) {
SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
STableMetaInfo *pTableMetaInfo = NULL;
- uint16_t type = 0;
+ uint32_t type = 0;
if (pQueryInfo != NULL) {
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
@@ -424,7 +424,7 @@ void tscKillSTableQuery(SSqlObj *pSql) {
* sub-queries not correctly released and master sql object of super table query reaches an abnormal state.
*/
pSql->pSubs[i]->res.code = TSDB_CODE_TSC_QUERY_CANCELLED;
- //taosStopRpcConn(pSql->pSubs[i]->thandle);
+// taosStopRpcConn(pSql->pSubs[i]->);
}
/*
@@ -1289,7 +1289,10 @@ int tscBuildUpdateTagMsg(SSqlObj* pSql, SSqlInfo *pInfo) {
SUpdateTableTagValMsg* pUpdateMsg = (SUpdateTableTagValMsg*) (pCmd->payload + tsRpcHeadSize);
pCmd->payloadLen = htonl(pUpdateMsg->head.contLen);
-
+ SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
+ STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
+ tscSetDnodeIpList(pSql, &pTableMetaInfo->pTableMeta->vgroupInfo);
+
return TSDB_CODE_SUCCESS;
}
@@ -1705,8 +1708,9 @@ int tscBuildSTableVgroupMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
for(int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, i);
- strncpy(pMsg, pTableMetaInfo->name, TSDB_TABLE_ID_LEN);
- pMsg += TSDB_TABLE_ID_LEN;
+ size_t size = sizeof(pTableMetaInfo->name);
+ tstrncpy(pMsg, pTableMetaInfo->name, size);
+ pMsg += size;
}
pCmd->msgType = TSDB_MSG_TYPE_CM_STABLE_VGROUP;
@@ -2220,9 +2224,7 @@ int tscProcessAlterTableMsgRsp(SSqlObj *pSql) {
if (pTableMetaInfo->pTableMeta) {
bool isSuperTable = UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo);
-
taosCacheRelease(tscCacheHandle, (void **)&(pTableMetaInfo->pTableMeta), true);
-// taosCacheRelease(tscCacheHandle, (void **)&(pTableMetaInfo->pMetricMeta), true);
if (isSuperTable) { // if it is a super table, reset whole query cache
tscTrace("%p reset query cache since table:%s is stable", pSql, pTableMetaInfo->name);
diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c
index 5f2a8598db..6f043f186a 100644
--- a/src/client/src/tscSql.c
+++ b/src/client/src/tscSql.c
@@ -45,11 +45,11 @@ static bool validImpl(const char* str, size_t maxsize) {
}
static bool validUserName(const char* user) {
- return validImpl(user, TSDB_USER_LEN);
+ return validImpl(user, TSDB_USER_LEN - 1);
}
static bool validPassword(const char* passwd) {
- return validImpl(passwd, TSDB_PASSWORD_LEN);
+ return validImpl(passwd, TSDB_PASSWORD_LEN - 1);
}
SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pass, const char *db, uint16_t port,
@@ -219,6 +219,11 @@ void waitForQueryRsp(void *param, TAOS_RES *tres, int code) {
sem_post(&pSql->rspSem);
}
+static void waitForRetrieveRsp(void *param, TAOS_RES *tres, int numOfRows) {
+ SSqlObj* pSql = (SSqlObj*) tres;
+ sem_post(&pSql->rspSem);
+}
+
TAOS_RES* taos_query(TAOS *taos, const char *sqlstr) {
STscObj *pObj = (STscObj *)taos;
if (pObj == NULL || pObj->signature != pObj) {
@@ -369,11 +374,6 @@ int taos_fetch_block_impl(TAOS_RES *res, TAOS_ROW *rows) {
return (pQueryInfo->order.order == TSDB_ORDER_DESC) ? pRes->numOfRows : -pRes->numOfRows;
}
-static void waitForRetrieveRsp(void *param, TAOS_RES *tres, int numOfRows) {
- SSqlObj* pSql = (SSqlObj*) tres;
- sem_post(&pSql->rspSem);
-}
-
TAOS_ROW taos_fetch_row(TAOS_RES *res) {
SSqlObj *pSql = (SSqlObj *)res;
if (pSql == NULL || pSql->signature != pSql) {
@@ -475,6 +475,42 @@ int taos_select_db(TAOS *taos, const char *db) {
return code;
}
+// send free message to vnode to free qhandle and corresponding resources in vnode
+static bool tscFreeQhandleInVnode(SSqlObj* pSql) {
+ SSqlCmd* pCmd = &pSql->cmd;
+ SSqlRes* pRes = &pSql->res;
+
+ SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
+ STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
+
+ if (pRes->code == TSDB_CODE_SUCCESS && pRes->completed == false && !tscIsTwoStageSTableQuery(pQueryInfo, 0) &&
+ (pCmd->command == TSDB_SQL_SELECT ||
+ pCmd->command == TSDB_SQL_SHOW ||
+ pCmd->command == TSDB_SQL_RETRIEVE ||
+ pCmd->command == TSDB_SQL_FETCH) &&
+ (pCmd->command == TSDB_SQL_SELECT && pSql->pStream == NULL && pTableMetaInfo->pTableMeta != NULL)) {
+
+ pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH;
+ tscTrace("%p start to send msg to free qhandle in dnode, command:%s", pSql, sqlCmd[pCmd->command]);
+ pSql->freed = 1;
+ tscProcessSql(pSql);
+
+ // in case of sync model query, waits for response and then goes on
+// if (pSql->fp == waitForQueryRsp || pSql->fp == waitForRetrieveRsp) {
+// sem_wait(&pSql->rspSem);
+
+// tscFreeSqlObj(pSql);
+// tscTrace("%p sqlObj is freed by app", pSql);
+// } else {
+ tscTrace("%p sqlObj will be freed while rsp received", pSql);
+// }
+
+ return true;
+ }
+
+ return false;
+}
+
void taos_free_result(TAOS_RES *res) {
SSqlObj *pSql = (SSqlObj *)res;
tscTrace("%p start to free result", res);
@@ -484,10 +520,8 @@ void taos_free_result(TAOS_RES *res) {
return;
}
- SSqlRes *pRes = &pSql->res;
- SSqlCmd *pCmd = &pSql->cmd;
-
// The semaphore can not be changed while freeing async sub query objects.
+ SSqlRes *pRes = &pSql->res;
if (pRes == NULL || pRes->qhandle == 0) {
tscTrace("%p SqlObj is freed by app, qhandle is null", pSql);
tscFreeSqlObj(pSql);
@@ -502,31 +536,10 @@ void taos_free_result(TAOS_RES *res) {
}
pQueryInfo->type = TSDB_QUERY_TYPE_FREE_RESOURCE;
- STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
-
- /*
- * If the query process is cancelled by user in stable query, tscProcessSql should not be called
- * for each subquery. Because the failure of execution tsProcessSql may trigger the callback function
- * be executed, and the retry efforts may result in double free the resources, e.g.,SRetrieveSupport
- */
- if (pRes->code == TSDB_CODE_SUCCESS && pRes->completed == false &&
- (pCmd->command == TSDB_SQL_SELECT || pCmd->command == TSDB_SQL_SHOW ||
- pCmd->command == TSDB_SQL_RETRIEVE || pCmd->command == TSDB_SQL_FETCH) &&
- (pCmd->command == TSDB_SQL_SELECT && pSql->pStream == NULL && pTableMetaInfo->pTableMeta != NULL)) {
- pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH;
-
- tscTrace("%p start to send msg to free qhandle in dnode, command:%s", pSql, sqlCmd[pCmd->command]);
- pSql->freed = 1;
- tscProcessSql(pSql);
-
- // in case of sync model query, waits for response and then goes on
- if (pSql->fp == waitForQueryRsp || pSql->fp == waitForRetrieveRsp) {
- sem_wait(&pSql->rspSem);
- }
+ if (!tscFreeQhandleInVnode(pSql)) {
+ tscFreeSqlObj(pSql);
+ tscTrace("%p sqlObj is freed by app", pSql);
}
-
- tscFreeSqlObj(pSql);
- tscTrace("%p sql result is freed by app", pSql);
}
// todo should not be used in async query
diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c
index ddc41e52e5..3bc931a855 100644
--- a/src/client/src/tscSubquery.c
+++ b/src/client/src/tscSubquery.c
@@ -1662,12 +1662,13 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
*/
if (code != TSDB_CODE_SUCCESS) {
if (trsupport->numOfRetry++ >= MAX_NUM_OF_SUBQUERY_RETRY) {
- tscTrace("%p sub:%p reach the max retry times, set global code:%d", pParentSql, pSql, code);
+ tscTrace("%p sub:%p reach the max retry times, set global code:%s", pParentSql, pSql, tstrerror(code));
atomic_val_compare_exchange_32(&pState->code, 0, code);
} else { // does not reach the maximum retry time, go on
tscTrace("%p sub:%p failed code:%s, retry:%d", pParentSql, pSql, tstrerror(code), trsupport->numOfRetry);
SSqlObj *pNew = tscCreateSqlObjForSubquery(pParentSql, trsupport, pSql);
+
if (pNew == NULL) {
tscError("%p sub:%p failed to create new subquery due to out of memory, abort retry, vgId:%d, orderOfSub:%d",
trsupport->pParentSqlObj, pSql, pVgroup->vgId, trsupport->subqueryIndex);
@@ -1677,7 +1678,8 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
} else {
SQueryInfo *pNewQueryInfo = tscGetQueryInfoDetail(&pNew->cmd, 0);
assert(pNewQueryInfo->pTableMetaInfo[0]->pTableMeta != NULL);
-
+
+ taos_free_result(pSql);
tscProcessSql(pNew);
return;
}
diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c
index 2701d2b572..57634e73fd 100644
--- a/src/client/src/tscUtil.c
+++ b/src/client/src/tscUtil.c
@@ -134,24 +134,6 @@ void tscGetDBInfoFromMeterId(char* tableId, char* db) {
db[0] = 0;
}
-//STableIdInfo* tscGetMeterSidInfo(SVnodeSidList* pSidList, int32_t idx) {
-// if (pSidList == NULL) {
-// tscError("illegal sidlist");
-// return 0;
-// }
-//
-// if (idx < 0 || idx >= pSidList->numOfSids) {
-// int32_t sidRange = (pSidList->numOfSids > 0) ? (pSidList->numOfSids - 1) : 0;
-//
-// tscError("illegal sidIdx:%d, reset to 0, sidIdx range:%d-%d", idx, 0, sidRange);
-// idx = 0;
-// }
-//
-// assert(pSidList->pSidExtInfoList[idx] >= 0);
-//
-// return (STableIdInfo*)(pSidList->pSidExtInfoList[idx] + (char*)pSidList);
-//}
-
bool tscIsTwoStageSTableQuery(SQueryInfo* pQueryInfo, int32_t tableIndex) {
if (pQueryInfo == NULL) {
return false;
@@ -176,8 +158,7 @@ bool tscIsTwoStageSTableQuery(SQueryInfo* pQueryInfo, int32_t tableIndex) {
return false;
}
- if (((pQueryInfo->type & TSDB_QUERY_TYPE_STABLE_SUBQUERY) != TSDB_QUERY_TYPE_STABLE_SUBQUERY) &&
- pQueryInfo->command == TSDB_SQL_SELECT) {
+ if (!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_STABLE_SUBQUERY) && pQueryInfo->command == TSDB_SQL_SELECT) {
return UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo);
}
diff --git a/src/common/inc/tdataformat.h b/src/common/inc/tdataformat.h
index 0e57c7317e..ea0eb9ff29 100644
--- a/src/common/inc/tdataformat.h
+++ b/src/common/inc/tdataformat.h
@@ -69,8 +69,8 @@ typedef struct {
int version; // version
int numOfCols; // Number of columns appended
int tlen; // maximum length of a SDataRow without the header part
- int16_t flen; // First part length in a SDataRow after the header part
- int16_t vlen; // pure value part length, excluded the overhead
+ uint16_t flen; // First part length in a SDataRow after the header part
+ uint16_t vlen; // pure value part length, excluded the overhead
STColumn columns[];
} STSchema;
@@ -107,8 +107,8 @@ typedef struct {
int tCols;
int nCols;
int tlen;
- int16_t flen;
- int16_t vlen;
+ uint16_t flen;
+ uint16_t vlen;
int version;
STColumn *columns;
} STSchemaBuilder;
@@ -125,16 +125,16 @@ STSchema *tdGetSchemaFromBuilder(STSchemaBuilder *pBuilder);
* |<--------------------+--------------------------- len ---------------------------------->|
* |<-- Head -->|<--------- flen -------------->| |
* +---------------------+---------------------------------+---------------------------------+
- * | int16_t | int16_t | | |
+ * | uint16_t | int16_t | | |
* +----------+----------+---------------------------------+---------------------------------+
* | len | sversion | First part | Second part |
* +----------+----------+---------------------------------+---------------------------------+
*/
typedef void *SDataRow;
-#define TD_DATA_ROW_HEAD_SIZE sizeof(int16_t)*2
+#define TD_DATA_ROW_HEAD_SIZE (sizeof(uint16_t) + sizeof(int16_t))
-#define dataRowLen(r) (*(int16_t *)(r))
+#define dataRowLen(r) (*(uint16_t *)(r))
#define dataRowVersion(r) *(int16_t *)POINTER_SHIFT(r, sizeof(int16_t))
#define dataRowTuple(r) POINTER_SHIFT(r, TD_DATA_ROW_HEAD_SIZE)
#define dataRowKey(r) (*(TSKEY *)(dataRowTuple(r)))
diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h
index 39144c3083..53d821b3d8 100644
--- a/src/common/inc/tglobal.h
+++ b/src/common/inc/tglobal.h
@@ -53,6 +53,7 @@ extern int64_t tsMsPerDay[3];
extern char tsFirst[];
extern char tsSecond[];
+extern char tsLocalFqdn[];
extern char tsLocalEp[];
extern uint16_t tsServerPort;
extern uint16_t tsDnodeShellPort;
diff --git a/src/common/inc/tname.h b/src/common/inc/tname.h
index 810d7f492c..d2008c9ff8 100644
--- a/src/common/inc/tname.h
+++ b/src/common/inc/tname.h
@@ -23,5 +23,8 @@ void extractTableName(const char *tableId, char *name);
char* extractDBName(const char *tableId, char *name);
+SSchema tGetTableNameColumnSchema();
+
+bool tscValidateTableNameLength(size_t len);
#endif // TDENGINE_NAME_H
diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c
index 12754b164b..86fc6deb1b 100644
--- a/src/common/src/tglobal.c
+++ b/src/common/src/tglobal.c
@@ -65,6 +65,7 @@ int64_t tsMsPerDay[] = {86400000L, 86400000000L, 86400000000000L};
char tsFirst[TSDB_EP_LEN] = {0};
char tsSecond[TSDB_EP_LEN] = {0};
char tsArbitrator[TSDB_EP_LEN] = {0};
+char tsLocalFqdn[TSDB_FQDN_LEN] = {0};
char tsLocalEp[TSDB_EP_LEN] = {0}; // Local End Point, hostname:port
uint16_t tsServerPort = 6030;
uint16_t tsDnodeShellPort = 6030; // udp[6035-6039] tcp[6035]
@@ -305,6 +306,16 @@ static void doInitGlobalConfig() {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
+ cfg.option = "fqdn";
+ cfg.ptr = tsLocalFqdn;
+ cfg.valType = TAOS_CFG_VTYPE_STRING;
+ cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT;
+ cfg.minValue = 0;
+ cfg.maxValue = 0;
+ cfg.ptrLength = TSDB_FQDN_LEN;
+ cfg.unitType = TAOS_CFG_UTYPE_NONE;
+ taosInitConfigOption(cfg);
+
// port
cfg.option = "serverPort";
cfg.ptr = &tsServerPort;
@@ -718,7 +729,7 @@ static void doInitGlobalConfig() {
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT;
cfg.minValue = 0;
cfg.maxValue = 0;
- cfg.ptrLength = TSDB_USER_LEN;
+ cfg.ptrLength = TSDB_USER_LEN - 1;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
@@ -728,7 +739,7 @@ static void doInitGlobalConfig() {
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_NOT_PRINT;
cfg.minValue = 0;
cfg.maxValue = 0;
- cfg.ptrLength = TSDB_PASSWORD_LEN;
+ cfg.ptrLength = TSDB_PASSWORD_LEN - 1;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
@@ -1251,9 +1262,14 @@ bool taosCheckGlobalCfg() {
taosSetAllDebugFlag();
}
- taosGetFqdn(tsLocalEp);
- sprintf(tsLocalEp + strlen(tsLocalEp), ":%d", tsServerPort);
- uPrint("localEp is %s", tsLocalEp);
+ if (tsLocalFqdn[0] == 0) {
+ taosGetFqdn(tsLocalFqdn);
+ }
+
+ strcpy(tsLocalEp, tsLocalFqdn);
+
+ snprintf(tsLocalEp + strlen(tsLocalEp), sizeof(tsLocalEp), ":%d", tsServerPort);
+ uPrint("localEp is: %s", tsLocalEp);
if (tsFirst[0] == 0) {
strcpy(tsFirst, tsLocalEp);
diff --git a/src/common/src/tname.c b/src/common/src/tname.c
index d2f5dfc4f7..2514ed26e5 100644
--- a/src/common/src/tname.c
+++ b/src/common/src/tname.c
@@ -37,3 +37,16 @@ char* extractDBName(const char* tableId, char* name) {
return strncpy(name, &tableId[offset1 + 1], len);
}
+
+SSchema tGetTableNameColumnSchema() {
+ SSchema s = {0};
+ s.bytes = TSDB_TABLE_NAME_LEN - 1 + VARSTR_HEADER_SIZE;
+ s.type = TSDB_DATA_TYPE_BINARY;
+ s.colId = TSDB_TBNAME_COLUMN_INDEX;
+ strncpy(s.name, TSQL_TBNAME_L, TSDB_COL_NAME_LEN);
+ return s;
+}
+
+bool tscValidateTableNameLength(size_t len) {
+ return len < TSDB_TABLE_NAME_LEN;
+}
\ No newline at end of file
diff --git a/src/connector/python/linux/python2/taos/cinterface.py b/src/connector/python/linux/python2/taos/cinterface.py
index 10df10b31f..c26e5c0967 100644
--- a/src/connector/python/linux/python2/taos/cinterface.py
+++ b/src/connector/python/linux/python2/taos/cinterface.py
@@ -130,9 +130,9 @@ _CONVERT_FUNC = {
# Corresponding TAOS_FIELD structure in C
class TaosField(ctypes.Structure):
- _fields_ = [('name', ctypes.c_char * 64),
- ('bytes', ctypes.c_short),
- ('type', ctypes.c_char)]
+ _fields_ = [('name', ctypes.c_char * 65),
+ ('type', ctypes.c_char),
+ ('bytes', ctypes.c_short)]
# C interface class
class CTaosInterface(object):
diff --git a/src/connector/python/windows/python2/taos/cinterface.py b/src/connector/python/windows/python2/taos/cinterface.py
index 06ade4fc35..6a9c5bfcef 100644
--- a/src/connector/python/windows/python2/taos/cinterface.py
+++ b/src/connector/python/windows/python2/taos/cinterface.py
@@ -130,9 +130,9 @@ _CONVERT_FUNC = {
# Corresponding TAOS_FIELD structure in C
class TaosField(ctypes.Structure):
- _fields_ = [('name', ctypes.c_char * 64),
- ('bytes', ctypes.c_short),
- ('type', ctypes.c_char)]
+ _fields_ = [('name', ctypes.c_char * 65),
+ ('type', ctypes.c_char),
+ ('bytes', ctypes.c_short)]
# C interface class
class CTaosInterface(object):
diff --git a/src/connector/python/windows/python3/taos/cinterface.py b/src/connector/python/windows/python3/taos/cinterface.py
index c6218fe9d4..fa7124431c 100644
--- a/src/connector/python/windows/python3/taos/cinterface.py
+++ b/src/connector/python/windows/python3/taos/cinterface.py
@@ -130,9 +130,9 @@ _CONVERT_FUNC = {
# Corresponding TAOS_FIELD structure in C
class TaosField(ctypes.Structure):
- _fields_ = [('name', ctypes.c_char * 64),
- ('bytes', ctypes.c_short),
- ('type', ctypes.c_char)]
+ _fields_ = [('name', ctypes.c_char * 65),
+ ('type', ctypes.c_char),
+ ('bytes', ctypes.c_short)]
# C interface class
class CTaosInterface(object):
diff --git a/src/dnode/src/dnodeMgmt.c b/src/dnode/src/dnodeMgmt.c
index 789a98f78e..d35e82fa47 100644
--- a/src/dnode/src/dnodeMgmt.c
+++ b/src/dnode/src/dnodeMgmt.c
@@ -572,6 +572,7 @@ static void dnodeSaveMnodeInfos() {
len += snprintf(content + len, maxLen - len, "}\n");
fwrite(content, 1, len, fp);
+ fflush(fp);
fclose(fp);
free(content);
@@ -694,6 +695,7 @@ static void dnodeSaveDnodeCfg() {
len += snprintf(content + len, maxLen - len, "}\n");
fwrite(content, 1, len, fp);
+ fflush(fp);
fclose(fp);
free(content);
diff --git a/src/dnode/src/dnodePeer.c b/src/dnode/src/dnodePeer.c
index 851e895f83..bd5f3208ee 100644
--- a/src/dnode/src/dnodePeer.c
+++ b/src/dnode/src/dnodePeer.c
@@ -89,6 +89,8 @@ static void dnodeProcessReqMsgFromDnode(SRpcMsg *pMsg, SRpcIpSet *pIpSet) {
.contLen = 0
};
+ if (pMsg->pCont == NULL) return;
+
if (dnodeGetRunStatus() != TSDB_DNODE_RUN_STATUS_RUNING) {
rspMsg.code = TSDB_CODE_RPC_NOT_READY;
rpcSendResponse(&rspMsg);
diff --git a/src/dnode/src/dnodeShell.c b/src/dnode/src/dnodeShell.c
index 117059ae66..4252e63f8d 100644
--- a/src/dnode/src/dnodeShell.c
+++ b/src/dnode/src/dnodeShell.c
@@ -115,6 +115,8 @@ void dnodeProcessMsgFromShell(SRpcMsg *pMsg, SRpcIpSet *pIpSet) {
.contLen = 0
};
+ if (pMsg->pCont == NULL) return;
+
if (dnodeGetRunStatus() != TSDB_DNODE_RUN_STATUS_RUNING) {
dError("RPC %p, shell msg:%s is ignored since dnode not running", pMsg->handle, taosMsg[pMsg->msgType]);
rpcMsg.code = TSDB_CODE_RPC_NOT_READY;
@@ -145,7 +147,7 @@ static int dnodeRetrieveUserAuthInfo(char *user, char *spi, char *encrypt, char
if (code != TSDB_CODE_RPC_NOT_READY) return code;
SDMAuthMsg *pMsg = rpcMallocCont(sizeof(SDMAuthMsg));
- tstrncpy(pMsg->user, user, TSDB_USER_LEN);
+ tstrncpy(pMsg->user, user, sizeof(pMsg->user));
SRpcMsg rpcMsg = {0};
rpcMsg.pCont = pMsg;
diff --git a/src/dnode/src/dnodeSystem.c b/src/dnode/src/dnodeSystem.c
index 3efa1c728d..2fa738e480 100644
--- a/src/dnode/src/dnodeSystem.c
+++ b/src/dnode/src/dnodeSystem.c
@@ -29,7 +29,7 @@ int32_t main(int32_t argc, char *argv[]) {
for (int32_t i = 1; i < argc; ++i) {
if (strcmp(argv[i], "-c") == 0) {
if (i < argc - 1) {
- if (strlen(argv[++i]) > TSDB_FILENAME_LEN - 1) {
+ if (strlen(argv[++i]) >= TSDB_FILENAME_LEN) {
printf("config file path overflow");
exit(EXIT_FAILURE);
}
diff --git a/src/dnode/src/dnodeVRead.c b/src/dnode/src/dnodeVRead.c
index 2f9e9a0af9..cd18ae6dda 100644
--- a/src/dnode/src/dnodeVRead.c
+++ b/src/dnode/src/dnodeVRead.c
@@ -26,13 +26,6 @@
#include "dnodeVRead.h"
#include "vnode.h"
-typedef struct {
- SRspRet rspRet;
- void *pCont;
- int32_t contLen;
- SRpcMsg rpcMsg;
-} SReadMsg;
-
typedef struct {
pthread_t thread; // thread
int32_t workerId; // worker ID
@@ -218,7 +211,7 @@ static void *dnodeProcessReadQueue(void *param) {
}
dTrace("%p, msg:%s will be processed in vread queue", pReadMsg->rpcMsg.ahandle, taosMsg[pReadMsg->rpcMsg.msgType]);
- int32_t code = vnodeProcessRead(pVnode, pReadMsg->rpcMsg.msgType, pReadMsg->pCont, pReadMsg->contLen, &pReadMsg->rspRet);
+ int32_t code = vnodeProcessRead(pVnode, pReadMsg);
dnodeSendRpcReadRsp(pVnode, pReadMsg, code);
taosFreeQitem(pReadMsg);
}
diff --git a/src/inc/query.h b/src/inc/query.h
index cdadd4759f..10ee0249b6 100644
--- a/src/inc/query.h
+++ b/src/inc/query.h
@@ -77,6 +77,12 @@ int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp** pRsp, int32_t* co
*/
bool qHasMoreResultsToRetrieve(qinfo_t qinfo);
+/**
+ * kill current ongoing query and free query handle automatically
+ * @param qinfo
+ */
+int32_t qKillQuery(qinfo_t qinfo);
+
#ifdef __cplusplus
}
#endif
diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h
index 251e3a9e40..7490de90d0 100644
--- a/src/inc/taosdef.h
+++ b/src/inc/taosdef.h
@@ -186,30 +186,31 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
#define TSDB_BINARY_OP_MULTIPLY 14
#define TSDB_BINARY_OP_DIVIDE 15
#define TSDB_BINARY_OP_REMAINDER 16
-#define TSDB_USERID_LEN 9
#define TS_PATH_DELIMITER_LEN 1
-#define TSDB_METER_ID_LEN_MARGIN 8
-#define TSDB_TABLE_ID_LEN (TSDB_DB_NAME_LEN+TSDB_TABLE_NAME_LEN+2*TS_PATH_DELIMITER_LEN+TSDB_USERID_LEN+TSDB_METER_ID_LEN_MARGIN) //TSDB_DB_NAME_LEN+TSDB_TABLE_NAME_LEN+2*strlen(TS_PATH_DELIMITER)+strlen(USERID)
#define TSDB_UNI_LEN 24
#define TSDB_USER_LEN TSDB_UNI_LEN
-#define TSDB_ACCT_LEN TSDB_UNI_LEN
+// ACCOUNT is a 32 bit positive integer
+// this is the length of its string representation
+// including the terminator zero
+#define TSDB_ACCT_LEN 11
#define TSDB_PASSWORD_LEN TSDB_UNI_LEN
#define TSDB_MAX_COLUMNS 1024
#define TSDB_MIN_COLUMNS 2 //PRIMARY COLUMN(timestamp) + other columns
#define TSDB_NODE_NAME_LEN 64
-#define TSDB_TABLE_NAME_LEN 193
+#define TSDB_TABLE_NAME_LEN 193 // it is a null-terminated string
#define TSDB_DB_NAME_LEN 33
+#define TSDB_TABLE_ID_LEN (TSDB_ACCT_LEN + TSDB_DB_NAME_LEN + TSDB_TABLE_NAME_LEN)
#define TSDB_COL_NAME_LEN 65
#define TSDB_MAX_SAVED_SQL_LEN TSDB_MAX_COLUMNS * 64
#define TSDB_MAX_SQL_LEN TSDB_PAYLOAD_SIZE
#define TSDB_MAX_SQL_SHOW_LEN 256
#define TSDB_MAX_ALLOWED_SQL_LEN (8*1024*1024U) // sql length should be less than 8mb
-#define TSDB_MAX_BYTES_PER_ROW TSDB_MAX_COLUMNS * 64
-#define TSDB_MAX_TAGS_LEN 65536
+#define TSDB_MAX_BYTES_PER_ROW 65535
+#define TSDB_MAX_TAGS_LEN 65535
#define TSDB_MAX_TAGS 128
#define TSDB_AUTH_LEN 16
diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h
index 6a0219d6fc..1198097895 100644
--- a/src/inc/taosmsg.h
+++ b/src/inc/taosmsg.h
@@ -243,13 +243,13 @@ typedef struct {
uint64_t uid;
uint64_t superTableUid;
uint64_t createdTime;
- char tableId[TSDB_TABLE_ID_LEN + 1];
- char superTableId[TSDB_TABLE_ID_LEN + 1];
+ char tableId[TSDB_TABLE_ID_LEN];
+ char superTableId[TSDB_TABLE_ID_LEN];
char data[];
} SMDCreateTableMsg;
typedef struct {
- char tableId[TSDB_TABLE_ID_LEN + 1];
+ char tableId[TSDB_TABLE_ID_LEN];
char db[TSDB_DB_NAME_LEN];
int8_t igExists;
int8_t getMeta;
@@ -262,12 +262,12 @@ typedef struct {
} SCMCreateTableMsg;
typedef struct {
- char tableId[TSDB_TABLE_ID_LEN + 1];
+ char tableId[TSDB_TABLE_ID_LEN];
int8_t igNotExists;
} SCMDropTableMsg;
typedef struct {
- char tableId[TSDB_TABLE_ID_LEN + 1];
+ char tableId[TSDB_TABLE_ID_LEN];
char db[TSDB_DB_NAME_LEN];
int16_t type; /* operation type */
int16_t numOfCols; /* number of schema */
@@ -292,7 +292,7 @@ typedef struct {
typedef struct {
char clientVersion[TSDB_VERSION_LEN];
char msgVersion[TSDB_VERSION_LEN];
- char db[TSDB_TABLE_ID_LEN + 1];
+ char db[TSDB_TABLE_ID_LEN];
} SCMConnectMsg;
typedef struct {
@@ -321,18 +321,18 @@ typedef struct {
} SAcctCfg;
typedef struct {
- char user[TSDB_USER_LEN + 1];
- char pass[TSDB_KEY_LEN + 1];
+ char user[TSDB_USER_LEN];
+ char pass[TSDB_KEY_LEN];
SAcctCfg cfg;
} SCMCreateAcctMsg, SCMAlterAcctMsg;
typedef struct {
- char user[TSDB_USER_LEN + 1];
+ char user[TSDB_USER_LEN];
} SCMDropUserMsg, SCMDropAcctMsg;
typedef struct {
- char user[TSDB_USER_LEN + 1];
- char pass[TSDB_KEY_LEN + 1];
+ char user[TSDB_USER_LEN];
+ char pass[TSDB_KEY_LEN];
int8_t privilege;
int8_t flag;
} SCMCreateUserMsg, SCMAlterUserMsg;
@@ -342,14 +342,14 @@ typedef struct {
int32_t vgId;
int32_t sid;
uint64_t uid;
- char tableId[TSDB_TABLE_ID_LEN + 1];
+ char tableId[TSDB_TABLE_ID_LEN];
} SMDDropTableMsg;
typedef struct {
int32_t contLen;
int32_t vgId;
uint64_t uid;
- char tableId[TSDB_TABLE_ID_LEN + 1];
+ char tableId[TSDB_TABLE_ID_LEN];
} SMDDropSTableMsg;
typedef struct {
@@ -501,8 +501,7 @@ typedef struct {
} SVnodeLoad;
typedef struct {
- char acct[TSDB_USER_LEN + 1];
- char db[TSDB_DB_NAME_LEN];
+ char db[TSDB_ACCT_LEN + TSDB_DB_NAME_LEN];
int32_t cacheBlockSize; //MB
int32_t totalBlocks;
int32_t maxTables;
@@ -521,7 +520,7 @@ typedef struct {
} SCMCreateDbMsg, SCMAlterDbMsg;
typedef struct {
- char db[TSDB_TABLE_ID_LEN + 1];
+ char db[TSDB_TABLE_ID_LEN];
uint8_t ignoreNotExists;
} SCMDropDbMsg, SCMUseDbMsg;
@@ -612,7 +611,7 @@ typedef struct {
} SMDCreateVnodeMsg;
typedef struct {
- char tableId[TSDB_TABLE_ID_LEN + 1];
+ char tableId[TSDB_TABLE_ID_LEN];
int16_t createFlag;
char tags[];
} SCMTableInfoMsg;
@@ -639,7 +638,7 @@ typedef struct {
typedef struct STableMetaMsg {
int32_t contLen;
- char tableId[TSDB_TABLE_ID_LEN + 1]; // table id
+ char tableId[TSDB_TABLE_ID_LEN]; // table id
uint8_t numOfTags;
uint8_t precision;
uint8_t tableType;
@@ -660,7 +659,7 @@ typedef struct SMultiTableMeta {
typedef struct {
int32_t dataLen;
- char name[TSDB_TABLE_ID_LEN + 1];
+ char name[TSDB_TABLE_ID_LEN];
char data[TSDB_MAX_TAGS_LEN];
} STagData;
@@ -746,15 +745,15 @@ typedef struct {
uint64_t uid;
uint64_t stime; // stream starting time
int32_t status;
- char tableId[TSDB_TABLE_ID_LEN + 1];
+ char tableId[TSDB_TABLE_ID_LEN];
} SMDAlterStreamMsg;
typedef struct {
- char user[TSDB_USER_LEN + 1];
+ char user[TSDB_USER_LEN];
char spi;
char encrypt;
- char secret[TSDB_KEY_LEN + 1];
- char ckey[TSDB_KEY_LEN + 1];
+ char secret[TSDB_KEY_LEN];
+ char ckey[TSDB_KEY_LEN];
} SDMAuthMsg, SDMAuthRsp;
#pragma pack(pop)
diff --git a/src/inc/trpc.h b/src/inc/trpc.h
index 5c5c77c251..748fad0e62 100644
--- a/src/inc/trpc.h
+++ b/src/inc/trpc.h
@@ -83,6 +83,7 @@ void rpcSendResponse(const SRpcMsg *pMsg);
void rpcSendRedirectRsp(void *pConn, const SRpcIpSet *pIpSet);
int rpcGetConnInfo(void *thandle, SRpcConnInfo *pInfo);
void rpcSendRecv(void *shandle, SRpcIpSet *pIpSet, const SRpcMsg *pReq, SRpcMsg *pRsp);
+int rpcReportProgress(void *pConn, char *pCont, int contLen);
#ifdef __cplusplus
}
diff --git a/src/inc/vnode.h b/src/inc/vnode.h
index 069f99263d..0da1f51e27 100644
--- a/src/inc/vnode.h
+++ b/src/inc/vnode.h
@@ -34,6 +34,13 @@ typedef struct {
void *qhandle; //used by query and retrieve msg
} SRspRet;
+typedef struct {
+ SRspRet rspRet;
+ void *pCont;
+ int32_t contLen;
+ SRpcMsg rpcMsg;
+} SReadMsg;
+
int32_t vnodeCreate(SMDCreateVnodeMsg *pVnodeCfg);
int32_t vnodeDrop(int32_t vgId);
int32_t vnodeOpen(int32_t vgId, char *rootDir);
@@ -52,7 +59,7 @@ void* vnodeGetWal(void *pVnode);
int32_t vnodeProcessWrite(void *pVnode, int qtype, void *pHead, void *item);
void vnodeBuildStatusMsg(void * param);
-int32_t vnodeProcessRead(void *pVnode, int msgType, void *pCont, int32_t contLen, SRspRet *ret);
+int32_t vnodeProcessRead(void *pVnode, SReadMsg *pReadMsg);
#ifdef __cplusplus
}
diff --git a/src/kit/shell/src/shellDarwin.c b/src/kit/shell/src/shellDarwin.c
index 439ca6edad..1a75a2aa85 100644
--- a/src/kit/shell/src/shellDarwin.c
+++ b/src/kit/shell/src/shellDarwin.c
@@ -97,7 +97,7 @@ void shellParseArgument(int argc, char *argv[], struct arguments *arguments) {
}
} else if (strcmp(argv[i], "-c") == 0) {
if (i < argc - 1) {
- if (strlen(argv[++i]) > TSDB_FILENAME_LEN - 1) {
+ if (strlen(argv[++i]) >= TSDB_FILENAME_LEN) {
fprintf(stderr, "config file path: %s overflow max len %d\n", argv[i], TSDB_FILENAME_LEN - 1);
exit(EXIT_FAILURE);
}
diff --git a/src/kit/shell/src/shellLinux.c b/src/kit/shell/src/shellLinux.c
index 7d035126c0..829ceb9e5f 100644
--- a/src/kit/shell/src/shellLinux.c
+++ b/src/kit/shell/src/shellLinux.c
@@ -81,7 +81,7 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
fprintf(stderr, "Invalid path %s\n", arg);
return -1;
}
- if (strlen(full_path.we_wordv[0]) > TSDB_FILENAME_LEN - 1) {
+ if (strlen(full_path.we_wordv[0]) >= TSDB_FILENAME_LEN) {
fprintf(stderr, "config file path: %s overflow max len %d\n", full_path.we_wordv[0], TSDB_FILENAME_LEN - 1);
wordfree(&full_path);
return -1;
diff --git a/src/kit/shell/src/shellWindows.c b/src/kit/shell/src/shellWindows.c
index cf96bce5a8..8a7996d682 100644
--- a/src/kit/shell/src/shellWindows.c
+++ b/src/kit/shell/src/shellWindows.c
@@ -77,7 +77,7 @@ void shellParseArgument(int argc, char *argv[], struct arguments *arguments) {
}
} else if (strcmp(argv[i], "-c") == 0) {
if (i < argc - 1) {
- if (strlen(argv[++i]) > TSDB_FILENAME_LEN - 1) {
+ if (strlen(argv[++i]) >= TSDB_FILENAME_LEN) {
fprintf(stderr, "config file path: %s overflow max len %d\n", argv[i], TSDB_FILENAME_LEN - 1);
exit(EXIT_FAILURE);
}
diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c
index 6cb57ef0d5..ee792c5116 100644
--- a/src/kit/taosdump/taosdump.c
+++ b/src/kit/taosdump/taosdump.c
@@ -177,8 +177,8 @@ typedef struct SDumpArguments {
char *password;
uint16_t port;
// output file
- char output[TSDB_FILENAME_LEN + 1];
- char input[TSDB_FILENAME_LEN + 1];
+ char output[TSDB_FILENAME_LEN];
+ char input[TSDB_FILENAME_LEN];
char *encode;
// dump unit option
bool all_databases;
diff --git a/src/mnode/inc/mnodeDef.h b/src/mnode/inc/mnodeDef.h
index 14cd405763..2baf28f88f 100644
--- a/src/mnode/inc/mnodeDef.h
+++ b/src/mnode/inc/mnodeDef.h
@@ -154,8 +154,8 @@ typedef struct {
} SDbCfg;
typedef struct SDbObj {
- char name[TSDB_DB_NAME_LEN];
- char acct[TSDB_USER_LEN + 1];
+ char name[TSDB_ACCT_LEN + TSDB_DB_NAME_LEN];
+ char acct[TSDB_USER_LEN];
int64_t createdTime;
int32_t cfgVersion;
SDbCfg cfg;
@@ -172,9 +172,9 @@ typedef struct SDbObj {
} SDbObj;
typedef struct SUserObj {
- char user[TSDB_USER_LEN + 1];
- char pass[TSDB_KEY_LEN + 1];
- char acct[TSDB_USER_LEN + 1];
+ char user[TSDB_USER_LEN];
+ char pass[TSDB_KEY_LEN];
+ char acct[TSDB_USER_LEN];
int64_t createdTime;
int8_t superAuth;
int8_t writeAuth;
@@ -203,8 +203,8 @@ typedef struct {
} SAcctInfo;
typedef struct SAcctObj {
- char user[TSDB_USER_LEN + 1];
- char pass[TSDB_KEY_LEN + 1];
+ char user[TSDB_USER_LEN];
+ char pass[TSDB_KEY_LEN];
SAcctCfg cfg;
int32_t acctId;
int64_t createdTime;
diff --git a/src/mnode/inc/mnodeProfile.h b/src/mnode/inc/mnodeProfile.h
index 30745db035..c9f7cc8e2a 100644
--- a/src/mnode/inc/mnodeProfile.h
+++ b/src/mnode/inc/mnodeProfile.h
@@ -22,7 +22,7 @@ extern "C" {
#include "mnodeDef.h"
typedef struct {
- char user[TSDB_USER_LEN + 1];
+ char user[TSDB_USER_LEN];
int8_t killed;
uint16_t port;
uint32_t ip;
diff --git a/src/mnode/src/mnodeProfile.c b/src/mnode/src/mnodeProfile.c
index bfb92096ec..4720fb0ddc 100644
--- a/src/mnode/src/mnodeProfile.c
+++ b/src/mnode/src/mnodeProfile.c
@@ -97,7 +97,7 @@ SConnObj *mnodeCreateConn(char *user, uint32_t ip, uint16_t port) {
.connId = connId,
.stime = taosGetTimestampMs()
};
- tstrncpy(connObj.user, user, TSDB_USER_LEN);
+ tstrncpy(connObj.user, user, sizeof(connObj.user));
char key[10];
sprintf(key, "%u", connId);
@@ -222,7 +222,7 @@ static int32_t mnodeRetrieveConns(SShowObj *pShow, char *data, int32_t rows, voi
SConnObj *pConnObj = NULL;
int32_t cols = 0;
char * pWrite;
- char ipStr[TSDB_IPv4ADDR_LEN + 7];
+ char ipStr[TSDB_IPv4ADDR_LEN + 6];
while (numOfRows < rows) {
pShow->pIter = mnodeGetNextConn(pShow->pIter, &pConnObj);
@@ -235,12 +235,14 @@ static int32_t mnodeRetrieveConns(SShowObj *pShow, char *data, int32_t rows, voi
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
- STR_WITH_MAXSIZE_TO_VARSTR(pWrite, pConnObj->user, TSDB_USER_LEN);
+ size_t size = sizeof(pConnObj->user);
+ STR_WITH_MAXSIZE_TO_VARSTR(pWrite, pConnObj->user, size);
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
- snprintf(ipStr, TSDB_IPv4ADDR_LEN + 6, "%s:%u", taosIpStr(pConnObj->ip), pConnObj->port);
- STR_WITH_MAXSIZE_TO_VARSTR(pWrite, ipStr, TSDB_IPv4ADDR_LEN + 6);
+ snprintf(ipStr, sizeof(ipStr), "%s:%u", taosIpStr(pConnObj->ip), pConnObj->port);
+ size = sizeof(ipStr);
+ STR_WITH_MAXSIZE_TO_VARSTR(pWrite, ipStr, size);
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
@@ -342,7 +344,7 @@ static int32_t mnodeRetrieveQueries(SShowObj *pShow, char *data, int32_t rows, v
SConnObj *pConnObj = NULL;
int32_t cols = 0;
char * pWrite;
- char ipStr[TSDB_IPv4ADDR_LEN + 7];
+ char ipStr[TSDB_IPv4ADDR_LEN + 6];
while (numOfRows < rows) {
pShow->pIter = mnodeGetNextConn(pShow->pIter, &pConnObj);
@@ -358,12 +360,14 @@ static int32_t mnodeRetrieveQueries(SShowObj *pShow, char *data, int32_t rows, v
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
- STR_WITH_MAXSIZE_TO_VARSTR(pWrite, pConnObj->user, TSDB_USER_LEN);
+ size_t size = sizeof(pConnObj->user);
+ STR_WITH_MAXSIZE_TO_VARSTR(pWrite, pConnObj->user, size);
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
- snprintf(ipStr, TSDB_IPv4ADDR_LEN + 6, "%s:%u", taosIpStr(pConnObj->ip), pConnObj->port);
- STR_WITH_MAXSIZE_TO_VARSTR(pWrite, ipStr, TSDB_IPv4ADDR_LEN + 6);
+ snprintf(ipStr, sizeof(ipStr), "%s:%u", taosIpStr(pConnObj->ip), pConnObj->port);
+ size = sizeof(ipStr);
+ STR_WITH_MAXSIZE_TO_VARSTR(pWrite, ipStr, size);
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
@@ -463,7 +467,7 @@ static int32_t mnodeRetrieveStreams(SShowObj *pShow, char *data, int32_t rows, v
SConnObj *pConnObj = NULL;
int32_t cols = 0;
char * pWrite;
- char ipStr[TSDB_IPv4ADDR_LEN + 7];
+ char ipStr[TSDB_IPv4ADDR_LEN + 6];
while (numOfRows < rows) {
pShow->pIter = mnodeGetNextConn(pShow->pIter, &pConnObj);
@@ -479,12 +483,14 @@ static int32_t mnodeRetrieveStreams(SShowObj *pShow, char *data, int32_t rows, v
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
- STR_WITH_MAXSIZE_TO_VARSTR(pWrite, pConnObj->user, TSDB_USER_LEN);
+ size_t size = sizeof(pConnObj->user);
+ STR_WITH_MAXSIZE_TO_VARSTR(pWrite, pConnObj->user, size);
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
- snprintf(ipStr, TSDB_IPv4ADDR_LEN + 6, "%s:%u", taosIpStr(pConnObj->ip), pConnObj->port);
- STR_WITH_MAXSIZE_TO_VARSTR(pWrite, ipStr, TSDB_IPv4ADDR_LEN + 6);
+ snprintf(ipStr, sizeof(ipStr), "%s:%u", taosIpStr(pConnObj->ip), pConnObj->port);
+ size = sizeof(ipStr);
+ STR_WITH_MAXSIZE_TO_VARSTR(pWrite, ipStr, size);
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
diff --git a/src/mnode/src/mnodeSdb.c b/src/mnode/src/mnodeSdb.c
index a13c6ca129..659ac159a8 100644
--- a/src/mnode/src/mnodeSdb.c
+++ b/src/mnode/src/mnodeSdb.c
@@ -451,7 +451,7 @@ static int32_t sdbInsertHash(SSdbTable *pTable, SSdbOper *pOper) {
pthread_mutex_unlock(&pTable->mutex);
- sdbTrace("table:%s, insert record:%s to hash, rowSize:%d vnumOfRows:%" PRId64 " version:%" PRIu64, pTable->tableName,
+ sdbTrace("table:%s, insert record:%s to hash, rowSize:%d numOfRows:%" PRId64 " version:%" PRIu64, pTable->tableName,
sdbGetKeyStrFromObj(pTable, pOper->pObj), pOper->rowSize, pTable->numOfRows, sdbGetVersion());
(*pTable->insertFp)(pOper);
@@ -475,7 +475,7 @@ static int32_t sdbDeleteHash(SSdbTable *pTable, SSdbOper *pOper) {
pTable->numOfRows--;
pthread_mutex_unlock(&pTable->mutex);
- sdbTrace("table:%s, delete record:%s from hash, numOfRows:%d" PRId64 "version:%" PRIu64, pTable->tableName,
+ sdbTrace("table:%s, delete record:%s from hash, numOfRows:%" PRId64 "version:%" PRIu64, pTable->tableName,
sdbGetKeyStrFromObj(pTable, pOper->pObj), pTable->numOfRows, sdbGetVersion());
int8_t *updateEnd = pOper->pObj + pTable->refCountPos - 1;
diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c
index a2ddd9893f..0e255d011b 100644
--- a/src/mnode/src/mnodeTable.c
+++ b/src/mnode/src/mnodeTable.c
@@ -201,7 +201,7 @@ static int32_t mnodeChildTableActionEncode(SSdbOper *pOper) {
assert(pTable != NULL && pOper->rowData != NULL);
int32_t len = strlen(pTable->info.tableId);
- if (len > TSDB_TABLE_ID_LEN) return TSDB_CODE_MND_INVALID_TABLE_ID;
+ if (len >= TSDB_TABLE_ID_LEN) return TSDB_CODE_MND_INVALID_TABLE_ID;
memcpy(pOper->rowData, pTable->info.tableId, len);
memset(pOper->rowData + len, 0, 1);
@@ -232,7 +232,7 @@ static int32_t mnodeChildTableActionDecode(SSdbOper *pOper) {
if (pTable == NULL) return TSDB_CODE_MND_OUT_OF_MEMORY;
int32_t len = strlen(pOper->rowData);
- if (len > TSDB_TABLE_ID_LEN) {
+ if (len >= TSDB_TABLE_ID_LEN) {
free(pTable);
return TSDB_CODE_MND_INVALID_TABLE_ID;
}
@@ -453,7 +453,7 @@ static int32_t mnodeSuperTableActionEncode(SSdbOper *pOper) {
assert(pOper->pObj != NULL && pOper->rowData != NULL);
int32_t len = strlen(pStable->info.tableId);
- if (len > TSDB_TABLE_ID_LEN) len = TSDB_CODE_MND_INVALID_TABLE_ID;
+ if (len >= TSDB_TABLE_ID_LEN) len = TSDB_CODE_MND_INVALID_TABLE_ID;
memcpy(pOper->rowData, pStable->info.tableId, len);
memset(pOper->rowData + len, 0, 1);
@@ -477,7 +477,7 @@ static int32_t mnodeSuperTableActionDecode(SSdbOper *pOper) {
if (pStable == NULL) return TSDB_CODE_MND_OUT_OF_MEMORY;
int32_t len = strlen(pOper->rowData);
- if (len > TSDB_TABLE_ID_LEN){
+ if (len >= TSDB_TABLE_ID_LEN){
free(pStable);
return TSDB_CODE_MND_INVALID_TABLE_ID;
}
@@ -1078,8 +1078,9 @@ static int32_t mnodeGetShowSuperTableMeta(STableMetaMsg *pMeta, SShowObj *pShow,
int32_t cols = 0;
SSchema *pSchema = pMeta->schema;
- pShow->bytes[cols] = (TSDB_TABLE_NAME_LEN - 1) + VARSTR_HEADER_SIZE;
- pSchema[cols].type = TSDB_DATA_TYPE_BINARY;
+ SSchema tbnameSchema = tGetTableNameColumnSchema();
+ pShow->bytes[cols] = tbnameSchema.bytes;
+ pSchema[cols].type = tbnameSchema.type;
strcpy(pSchema[cols].name, "name");
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
@@ -1249,12 +1250,12 @@ static int32_t mnodeGetSuperTableMeta(SMnodeMsg *pMsg) {
pMeta->numOfColumns = htons((int16_t)pTable->numOfColumns);
pMeta->tableType = pTable->info.type;
pMeta->contLen = sizeof(STableMetaMsg) + mnodeSetSchemaFromSuperTable(pMeta->schema, pTable);
- strncpy(pMeta->tableId, pTable->info.tableId, TSDB_TABLE_ID_LEN);
+ tstrncpy(pMeta->tableId, pTable->info.tableId, sizeof(pMeta->tableId));
+ pMsg->rpcRsp.len = pMeta->contLen;
pMeta->contLen = htons(pMeta->contLen);
pMsg->rpcRsp.rsp = pMeta;
- pMsg->rpcRsp.len = pMeta->contLen;
mTrace("stable:%s, uid:%" PRIu64 " table meta is retrieved", pTable->info.tableId, pTable->uid);
return TSDB_CODE_SUCCESS;
@@ -1769,7 +1770,8 @@ static int32_t mnodeAutoCreateChildTable(SMnodeMsg *pMsg) {
return TSDB_CODE_MND_OUT_OF_MEMORY;
}
- tstrncpy(pCreateMsg->tableId, pInfo->tableId, sizeof(pInfo->tableId));
+ size_t size = sizeof(pInfo->tableId);
+ tstrncpy(pCreateMsg->tableId, pInfo->tableId, size);
tstrncpy(pCreateMsg->db, pMsg->pDb->name, sizeof(pCreateMsg->db));
pCreateMsg->igExists = 1;
pCreateMsg->getMeta = 1;
@@ -2032,7 +2034,7 @@ static int32_t mnodeProcessMultiTableMetaMsg(SMnodeMsg *pMsg) {
pMultiMeta->numOfTables = 0;
for (int32_t t = 0; t < pInfo->numOfTables; ++t) {
- char * tableId = (char *)(pInfo->tableIds + t * TSDB_TABLE_ID_LEN + 1);
+ char * tableId = (char *)(pInfo->tableIds + t * TSDB_TABLE_ID_LEN);
SChildTableObj *pTable = mnodeGetChildTable(tableId);
if (pTable == NULL) continue;
@@ -2079,8 +2081,9 @@ static int32_t mnodeGetShowTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, void
int32_t cols = 0;
SSchema *pSchema = pMeta->schema;
- pShow->bytes[cols] = (TSDB_TABLE_NAME_LEN - 1) + VARSTR_HEADER_SIZE;
- pSchema[cols].type = TSDB_DATA_TYPE_BINARY;
+ SSchema s = tGetTableNameColumnSchema();
+ pShow->bytes[cols] = s.bytes;
+ pSchema[cols].type = s.type;
strcpy(pSchema[cols].name, "table_name");
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
@@ -2097,8 +2100,9 @@ static int32_t mnodeGetShowTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, void
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
- pShow->bytes[cols] = (TSDB_TABLE_NAME_LEN - 1) + VARSTR_HEADER_SIZE;
- pSchema[cols].type = TSDB_DATA_TYPE_BINARY;
+ SSchema tbCol = tGetTableNameColumnSchema();
+ pShow->bytes[cols] = tbCol.bytes;
+ pSchema[cols].type = tbCol.type;
strcpy(pSchema[cols].name, "stable_name");
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
@@ -2268,8 +2272,9 @@ static int32_t mnodeGetStreamTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, vo
int32_t cols = 0;
SSchema *pSchema = pMeta->schema;
- pShow->bytes[cols] = (TSDB_TABLE_NAME_LEN - 1) + VARSTR_HEADER_SIZE;
- pSchema[cols].type = TSDB_DATA_TYPE_BINARY;
+ SSchema tbnameColSchema = tGetTableNameColumnSchema();
+ pShow->bytes[cols] = tbnameColSchema.bytes;
+ pSchema[cols].type = tbnameColSchema.type;
strcpy(pSchema[cols].name, "table_name");
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
diff --git a/src/mnode/src/mnodeUser.c b/src/mnode/src/mnodeUser.c
index aab0847a6b..95457c83a0 100644
--- a/src/mnode/src/mnodeUser.c
+++ b/src/mnode/src/mnodeUser.c
@@ -315,7 +315,8 @@ static int32_t mnodeRetrieveUsers(SShowObj *pShow, char *data, int32_t rows, voi
cols = 0;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
- STR_WITH_MAXSIZE_TO_VARSTR(pWrite, pUser->user, TSDB_USER_LEN);
+ size_t size = sizeof(pUser->user);
+ STR_WITH_MAXSIZE_TO_VARSTR(pWrite, pUser->user, size);
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
@@ -336,7 +337,7 @@ static int32_t mnodeRetrieveUsers(SShowObj *pShow, char *data, int32_t rows, voi
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
- STR_WITH_MAXSIZE_TO_VARSTR(pWrite, pUser->acct, TSDB_USER_LEN);
+ STR_WITH_MAXSIZE_TO_VARSTR(pWrite, pUser->acct, sizeof(pUser->user));
cols++;
numOfRows++;
diff --git a/src/os/linux/src/linuxSysPara.c b/src/os/linux/src/linuxSysPara.c
index 8ae2630a15..0e76ab0046 100644
--- a/src/os/linux/src/linuxSysPara.c
+++ b/src/os/linux/src/linuxSysPara.c
@@ -332,33 +332,42 @@ bool taosGetDisk() {
}
static bool taosGetCardInfo(int64_t *bytes) {
+ *bytes = 0;
FILE *fp = fopen(tsSysNetFile, "r");
if (fp == NULL) {
uError("open file:%s failed", tsSysNetFile);
return false;
}
- int64_t rbytes, rpackts, tbytes, tpackets;
- int64_t nouse1, nouse2, nouse3, nouse4, nouse5, nouse6;
- char nouse0[200] = {0};
- size_t len;
- char * line = NULL;
- *bytes = 0;
+ size_t len = 2048;
+ char * line = calloc(1, len);
while (!feof(fp)) {
- tfree(line);
- len = 0;
+ memset(line, 0, len);
+
+ int64_t rbytes = 0;
+ int64_t rpackts = 0;
+ int64_t tbytes = 0;
+ int64_t tpackets = 0;
+ int64_t nouse1 = 0;
+ int64_t nouse2 = 0;
+ int64_t nouse3 = 0;
+ int64_t nouse4 = 0;
+ int64_t nouse5 = 0;
+ int64_t nouse6 = 0;
+ char nouse0[200] = {0};
+
getline(&line, &len, fp);
- if (line == NULL) {
- break;
- }
+ line[len - 1] = 0;
+
if (strstr(line, "lo:") != NULL) {
continue;
}
sscanf(line,
- "%s %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64,
+ "%s %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64
+ " %" PRId64,
nouse0, &rbytes, &rpackts, &nouse1, &nouse2, &nouse3, &nouse4, &nouse5, &nouse6, &tbytes, &tpackets);
*bytes += (rbytes + tbytes);
}
diff --git a/src/plugins/http/inc/httpHandle.h b/src/plugins/http/inc/httpHandle.h
index 9be2796a96..b888543137 100644
--- a/src/plugins/http/inc/httpHandle.h
+++ b/src/plugins/http/inc/httpHandle.h
@@ -67,7 +67,7 @@
#define HTTP_COMPRESS_IDENTITY 0
#define HTTP_COMPRESS_GZIP 2
-#define HTTP_SESSION_ID_LEN (TSDB_USER_LEN * 2 + 1)
+#define HTTP_SESSION_ID_LEN (TSDB_USER_LEN + TSDB_PASSWORD_LEN)
typedef enum {
HTTP_CONTEXT_STATE_READY,
@@ -84,7 +84,7 @@ typedef struct {
int expire;
int access;
void *taos;
- char id[HTTP_SESSION_ID_LEN + 1];
+ char id[HTTP_SESSION_ID_LEN];
} HttpSession;
typedef enum {
diff --git a/src/plugins/http/src/gcHandle.c b/src/plugins/http/src/gcHandle.c
index 4120980123..176e16301b 100644
--- a/src/plugins/http/src/gcHandle.c
+++ b/src/plugins/http/src/gcHandle.c
@@ -48,7 +48,7 @@ void gcInitHandle(HttpServer* pServer) { httpAddMethod(pServer, &gcDecodeMethod)
bool gcGetUserFromUrl(HttpContext* pContext) {
HttpParser* pParser = &pContext->parser;
- if (pParser->path[GC_USER_URL_POS].len > TSDB_USER_LEN - 1 || pParser->path[GC_USER_URL_POS].len <= 0) {
+ if (pParser->path[GC_USER_URL_POS].len >= TSDB_USER_LEN || pParser->path[GC_USER_URL_POS].len <= 0) {
return false;
}
@@ -58,7 +58,7 @@ bool gcGetUserFromUrl(HttpContext* pContext) {
bool gcGetPassFromUrl(HttpContext* pContext) {
HttpParser* pParser = &pContext->parser;
- if (pParser->path[GC_PASS_URL_POS].len > TSDB_PASSWORD_LEN - 1 || pParser->path[GC_PASS_URL_POS].len <= 0) {
+ if (pParser->path[GC_PASS_URL_POS].len >= TSDB_PASSWORD_LEN || pParser->path[GC_PASS_URL_POS].len <= 0) {
return false;
}
diff --git a/src/plugins/http/src/httpAuth.c b/src/plugins/http/src/httpAuth.c
index ccf39642c9..cf2ce5ddd9 100644
--- a/src/plugins/http/src/httpAuth.c
+++ b/src/plugins/http/src/httpAuth.c
@@ -89,7 +89,7 @@ bool httpParseTaosdAuthToken(HttpContext *pContext, char *token, int len) {
return false;
} else {
tstrncpy(pContext->user, descrypt, sizeof(pContext->user));
- tstrncpy(pContext->pass, descrypt + TSDB_USER_LEN, TSDB_PASSWORD_LEN);
+ tstrncpy(pContext->pass, descrypt + TSDB_USER_LEN, sizeof(pContext->pass));
httpTrace("context:%p, fd:%d, ip:%s, taosd token:%s parsed success, user:%s", pContext, pContext->fd,
pContext->ipstr, token, pContext->user);
@@ -100,14 +100,17 @@ bool httpParseTaosdAuthToken(HttpContext *pContext, char *token, int len) {
}
bool httpGenTaosdAuthToken(HttpContext *pContext, char *token, int maxLen) {
- char buffer[TSDB_USER_LEN + TSDB_PASSWORD_LEN] = {0};
- strncpy(buffer, pContext->user, TSDB_USER_LEN);
- strncpy(buffer + TSDB_USER_LEN, pContext->pass, TSDB_PASSWORD_LEN);
+ char buffer[sizeof(pContext->user) + sizeof(pContext->pass)] = {0};
+ size_t size = sizeof(pContext->user);
+ tstrncpy(buffer, pContext->user, size);
+ size = sizeof(pContext->pass);
+ tstrncpy(buffer + sizeof(pContext->user), pContext->pass, size);
char *encrypt = taosDesEncode(KEY_DES_4, buffer, TSDB_USER_LEN + TSDB_PASSWORD_LEN);
char *base64 = base64_encode((const unsigned char *)encrypt, TSDB_USER_LEN + TSDB_PASSWORD_LEN);
- strncpy(token, base64, (size_t)strlen(base64));
+ size_t len = strlen(base64);
+ tstrncpy(token, base64, len + 1);
free(encrypt);
free(base64);
diff --git a/src/plugins/http/src/restHandle.c b/src/plugins/http/src/restHandle.c
index d481a654d8..93094fa287 100644
--- a/src/plugins/http/src/restHandle.c
+++ b/src/plugins/http/src/restHandle.c
@@ -61,7 +61,7 @@ void restInitHandle(HttpServer* pServer) {
bool restGetUserFromUrl(HttpContext* pContext) {
HttpParser* pParser = &pContext->parser;
- if (pParser->path[REST_USER_URL_POS].len > TSDB_USER_LEN - 1 || pParser->path[REST_USER_URL_POS].len <= 0) {
+ if (pParser->path[REST_USER_URL_POS].len >= TSDB_USER_LEN || pParser->path[REST_USER_URL_POS].len <= 0) {
return false;
}
@@ -71,7 +71,7 @@ bool restGetUserFromUrl(HttpContext* pContext) {
bool restGetPassFromUrl(HttpContext* pContext) {
HttpParser* pParser = &pContext->parser;
- if (pParser->path[REST_PASS_URL_POS].len > TSDB_PASSWORD_LEN - 1 || pParser->path[REST_PASS_URL_POS].len <= 0) {
+ if (pParser->path[REST_PASS_URL_POS].len >= TSDB_PASSWORD_LEN || pParser->path[REST_PASS_URL_POS].len <= 0) {
return false;
}
diff --git a/src/plugins/http/src/tgHandle.c b/src/plugins/http/src/tgHandle.c
index 945eff7611..b85f27d175 100644
--- a/src/plugins/http/src/tgHandle.c
+++ b/src/plugins/http/src/tgHandle.c
@@ -306,21 +306,21 @@ void tgCleanupHandle() {
bool tgGetUserFromUrl(HttpContext *pContext) {
HttpParser *pParser = &pContext->parser;
- if (pParser->path[TG_USER_URL_POS].len > TSDB_USER_LEN - 1 || pParser->path[TG_USER_URL_POS].len <= 0) {
+ if (pParser->path[TG_USER_URL_POS].len >= TSDB_USER_LEN || pParser->path[TG_USER_URL_POS].len <= 0) {
return false;
}
- tstrncpy(pContext->user, pParser->path[TG_USER_URL_POS].pos, TSDB_USER_LEN);
+ tstrncpy(pContext->user, pParser->path[TG_USER_URL_POS].pos, sizeof(pContext->user));
return true;
}
bool tgGetPassFromUrl(HttpContext *pContext) {
HttpParser *pParser = &pContext->parser;
- if (pParser->path[TG_PASS_URL_POS].len > TSDB_PASSWORD_LEN - 1 || pParser->path[TG_PASS_URL_POS].len <= 0) {
+ if (pParser->path[TG_PASS_URL_POS].len >= TSDB_PASSWORD_LEN || pParser->path[TG_PASS_URL_POS].len <= 0) {
return false;
}
- tstrncpy(pContext->pass, pParser->path[TG_PASS_URL_POS].pos, TSDB_PASSWORD_LEN);
+ tstrncpy(pContext->pass, pParser->path[TG_PASS_URL_POS].pos, sizeof(pContext->pass));
return true;
}
diff --git a/src/plugins/monitor/src/monitorMain.c b/src/plugins/monitor/src/monitorMain.c
index ff9faa845c..735c77ae21 100644
--- a/src/plugins/monitor/src/monitorMain.c
+++ b/src/plugins/monitor/src/monitorMain.c
@@ -175,7 +175,7 @@ static void dnodeBuildMonitorSql(char *sql, int32_t cmd) {
", totalConns smallint, maxConns smallint"
", accessState smallint"
") tags (acctId binary(%d))",
- tsMonitorDbName, TSDB_USER_LEN + 1);
+ tsMonitorDbName, TSDB_USER_LEN);
} else if (cmd == MONITOR_CMD_CREATE_TB_ACCT_ROOT) {
snprintf(sql, SQL_LENGTH, "create table if not exists %s.acct_%s using %s.acct tags('%s')", tsMonitorDbName, "root",
tsMonitorDbName, "root");
@@ -183,7 +183,7 @@ static void dnodeBuildMonitorSql(char *sql, int32_t cmd) {
snprintf(sql, SQL_LENGTH,
"create table if not exists %s.slowquery(ts timestamp, username "
"binary(%d), created_time timestamp, time bigint, sql binary(%d))",
- tsMonitorDbName, TSDB_TABLE_ID_LEN, TSDB_SLOW_QUERY_SQL_LEN);
+ tsMonitorDbName, TSDB_TABLE_ID_LEN - 1, TSDB_SLOW_QUERY_SQL_LEN);
} else if (cmd == MONITOR_CMD_CREATE_TB_LOG) {
snprintf(sql, SQL_LENGTH,
"create table if not exists %s.log(ts timestamp, level tinyint, "
diff --git a/src/query/inc/tsqlfunction.h b/src/query/inc/tsqlfunction.h
index aa8e83da38..340f6bc4f3 100644
--- a/src/query/inc/tsqlfunction.h
+++ b/src/query/inc/tsqlfunction.h
@@ -112,7 +112,7 @@ enum {
#define QUERY_IS_STABLE_QUERY(type) (((type)&TSDB_QUERY_TYPE_STABLE_QUERY) != 0)
#define QUERY_IS_JOIN_QUERY(type) (TSDB_QUERY_HAS_TYPE(type, TSDB_QUERY_TYPE_JOIN_QUERY))
-#define QUERY_IS_PROJECTION_QUERY(type) (((type)&TSDB_QUERY_TYPE_PROJECTION_QUERY) != 0)
+#define QUERY_IS_PROJECTION_QUERY(type) (((type)&TSDB_QUERY_TYPE_PROJECTION_QUERY) != 0)
#define QUERY_IS_FREE_RESOURCE(type) (((type)&TSDB_QUERY_TYPE_FREE_RESOURCE) != 0)
typedef struct SArithmeticSupport {
diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c
index cdb05d2288..aa602ed661 100644
--- a/src/query/src/qExecutor.c
+++ b/src/query/src/qExecutor.c
@@ -12,8 +12,8 @@
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see .
*/
-#include "qfill.h"
#include "os.h"
+#include "qfill.h"
#include "hash.h"
#include "hashfunc.h"
@@ -484,7 +484,7 @@ static int32_t setWindowOutputBufByKey(SQueryRuntimeEnv *pRuntimeEnv, SWindowRes
// set time window for current result
pWindowRes->window = *win;
-
+
setWindowResOutputBufInitCtx(pRuntimeEnv, pWindowRes);
return TSDB_CODE_SUCCESS;
}
@@ -685,14 +685,14 @@ static int32_t getNextQualifiedWindow(SQueryRuntimeEnv *pRuntimeEnv, STimeWindow
SDataBlockInfo *pDataBlockInfo, TSKEY *primaryKeys,
__block_search_fn_t searchFn) {
SQuery *pQuery = pRuntimeEnv->pQuery;
-
+
// tumbling time window query, a special case of sliding time window query
if (pQuery->slidingTime == pQuery->intervalTime) {
// todo opt
}
-
+
getNextTimeWindow(pQuery, pNextWin);
-
+
// next time window is not in current block
if ((pNextWin->skey > pDataBlockInfo->window.ekey && QUERY_IS_ASC_QUERY(pQuery)) ||
(pNextWin->ekey < pDataBlockInfo->window.skey && !QUERY_IS_ASC_QUERY(pQuery))) {
@@ -720,7 +720,7 @@ static int32_t getNextQualifiedWindow(SQueryRuntimeEnv *pRuntimeEnv, STimeWindow
*/
if (QUERY_IS_ASC_QUERY(pQuery) && primaryKeys[startPos] > pNextWin->ekey) {
TSKEY next = primaryKeys[startPos];
-
+
pNextWin->ekey += ((next - pNextWin->ekey + pQuery->slidingTime - 1)/pQuery->slidingTime) * pQuery->slidingTime;
pNextWin->skey = pNextWin->ekey - pQuery->intervalTime + 1;
} else if ((!QUERY_IS_ASC_QUERY(pQuery)) && primaryKeys[startPos] < pNextWin->skey) {
@@ -729,7 +729,7 @@ static int32_t getNextQualifiedWindow(SQueryRuntimeEnv *pRuntimeEnv, STimeWindow
pNextWin->skey -= ((pNextWin->skey - next + pQuery->slidingTime - 1) / pQuery->slidingTime) * pQuery->slidingTime;
pNextWin->ekey = pNextWin->skey + pQuery->intervalTime - 1;
}
-
+
return startPos;
}
@@ -1368,8 +1368,10 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int16_t order
int32_t index = pSqlFuncMsg->colInfo.colIndex;
if (TSDB_COL_IS_TAG(pIndex->flag)) {
if (pIndex->colId == TSDB_TBNAME_COLUMN_INDEX) { // todo refactor
- pCtx->inputBytes = (TSDB_TABLE_NAME_LEN - 1) + VARSTR_HEADER_SIZE;
- pCtx->inputType = TSDB_DATA_TYPE_BINARY;
+ SSchema s = tGetTableNameColumnSchema();
+
+ pCtx->inputBytes = s.bytes;
+ pCtx->inputType = s.type;
} else {
pCtx->inputBytes = pQuery->tagColList[index].bytes;
pCtx->inputType = pQuery->tagColList[index].type;
@@ -2070,7 +2072,7 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) {
SQuery *pQuery = pRuntimeEnv->pQuery;
STableQueryInfo* pTableQueryInfo = pQuery->current;
SQueryCostInfo* summary = &pRuntimeEnv->summary;
-
+
qTrace("QInfo:%p query start, qrange:%" PRId64 "-%" PRId64 ", lastkey:%" PRId64 ", order:%d",
GET_QINFO_ADDR(pRuntimeEnv), pTableQueryInfo->win.skey, pTableQueryInfo->win.ekey, pTableQueryInfo->lastKey,
pQuery->order.order);
@@ -2111,7 +2113,7 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) {
SDataStatis *pStatis = NULL;
SArray *pDataBlock = loadDataBlockOnDemand(pRuntimeEnv, pQueryHandle, &blockInfo, &pStatis);
-
+
// query start position can not move into tableApplyFunctionsOnBlock due to limit/offset condition
pQuery->pos = QUERY_IS_ASC_QUERY(pQuery)? 0 : blockInfo.rows - 1;
int32_t numOfRes = tableApplyFunctionsOnBlock(pRuntimeEnv, &blockInfo, pStatis, binarySearchForKey, pDataBlock);
@@ -2500,7 +2502,7 @@ int64_t getNumOfResultWindowRes(SQuery *pQuery, SWindowResult *pWindowRes) {
SResultInfo *pResultInfo = &pWindowRes->resultInfo[j];
assert(pResultInfo != NULL);
-
+
if (pResultInfo->numOfRes > 0) {
return pResultInfo->numOfRes;
}
@@ -2549,7 +2551,7 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) {
assert(pQInfo->numOfGroupResultPages == 0);
return 0;
} else if (numOfTables == 1) { // no need to merge results since only one table in each group
-
+
}
SCompSupporter cs = {pTableList, posList, pQInfo};
@@ -2638,7 +2640,7 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) {
#endif
qTrace("QInfo:%p result merge completed for group:%d, elapsed time:%" PRId64 " ms", pQInfo, pQInfo->groupIndex, endt - startt);
-
+
tfree(pTableList);
tfree(posList);
tfree(pTree);
@@ -2868,12 +2870,12 @@ void initCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv) {
for (int32_t j = 0; j < pQuery->numOfOutput; ++j) {
int32_t functionId = pQuery->pSelectExpr[j].base.functionId;
pRuntimeEnv->pCtx[j].currentStage = 0;
-
+
SResultInfo* pResInfo = GET_RES_INFO(&pRuntimeEnv->pCtx[j]);
if (pResInfo->initialized) {
continue;
}
-
+
aAggs[functionId].init(&pRuntimeEnv->pCtx[j]);
}
}
@@ -3246,7 +3248,7 @@ void setWindowResOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *pResult
for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
SQLFunctionCtx *pCtx = &pRuntimeEnv->pCtx[i];
pCtx->aOutputBuf = getPosInResultPage(pRuntimeEnv, i, pResult);
-
+
int32_t functionId = pQuery->pSelectExpr[i].base.functionId;
if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) {
pCtx->ptsOutputBuf = pRuntimeEnv->pCtx[0].aOutputBuf;
@@ -3266,7 +3268,7 @@ void setWindowResOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *pResult
void setWindowResOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *pResult) {
SQuery *pQuery = pRuntimeEnv->pQuery;
-
+
// Note: pResult->pos[i]->num == 0, there is only fixed number of results for each group
for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
SQLFunctionCtx *pCtx = &pRuntimeEnv->pCtx[i];
@@ -3275,21 +3277,21 @@ void setWindowResOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *
if (pCtx->resultInfo->complete) {
continue;
}
-
+
pCtx->aOutputBuf = getPosInResultPage(pRuntimeEnv, i, pResult);
pCtx->currentStage = 0;
-
+
int32_t functionId = pCtx->functionId;
if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) {
pCtx->ptsOutputBuf = pRuntimeEnv->pCtx[0].aOutputBuf;
}
-
+
/*
* set the output buffer information and intermediate buffer
* not all queries require the interResultBuf, such as COUNT
*/
pCtx->resultInfo->superTableQ = pRuntimeEnv->stableQuery; // set super table query flag
-
+
if (!pCtx->resultInfo->initialized) {
aAggs[functionId].init(pCtx);
}
@@ -4468,7 +4470,7 @@ static void multiTableQueryProcess(SQInfo *pQInfo) {
// query error occurred or query is killed, abort current execution
if (pQInfo->code != TSDB_CODE_SUCCESS || isQueryKilled(pQInfo)) {
- qTrace("QInfo:%p query killed or error occurred, code:%d, abort", pQInfo, pQInfo->code);
+ qTrace("QInfo:%p query killed or error occurred, code:%s, abort", pQInfo, tstrerror(pQInfo->code));
return;
}
@@ -4489,7 +4491,7 @@ static void multiTableQueryProcess(SQInfo *pQInfo) {
setQueryStatus(pQuery, QUERY_COMPLETED);
if (pQInfo->code != TSDB_CODE_SUCCESS || isQueryKilled(pQInfo)) {
- qTrace("QInfo:%p query killed or error occurred, code:%d, abort", pQInfo, pQInfo->code);
+ qTrace("QInfo:%p query killed or error occurred, code:%s, abort", pQInfo, tstrerror(pQInfo->code));
return;
}
@@ -4865,7 +4867,7 @@ static bool validateQuerySourceCols(SQueryTableMsg *pQueryMsg, SSqlFuncMsg** pEx
(pFuncMsg->functionId == TSDB_FUNC_COUNT && pFuncMsg->colInfo.colId == TSDB_TBNAME_COLUMN_INDEX)) {
continue;
}
-
+
return false;
}
}
@@ -5143,8 +5145,9 @@ static int32_t createQFunctionExprFromMsg(SQueryTableMsg *pQueryMsg, SExprInfo *
type = TSDB_DATA_TYPE_DOUBLE;
bytes = tDataTypeDesc[type].nSize;
} else if (pExprs[i].base.colInfo.colId == TSDB_TBNAME_COLUMN_INDEX && pExprs[i].base.functionId == TSDB_FUNC_TAGPRJ) { // parse the normal column
- type = TSDB_DATA_TYPE_BINARY;
- bytes = (TSDB_TABLE_NAME_LEN - 1) + VARSTR_HEADER_SIZE;
+ SSchema s = tGetTableNameColumnSchema();
+ type = s.type;
+ bytes = s.bytes;
} else{
int32_t j = getColumnIndexInSource(pQueryMsg, &pExprs[i].base, pTagCols);
assert(j < pQueryMsg->numOfCols || j < pQueryMsg->numOfTags || j == TSDB_TBNAME_COLUMN_INDEX);
@@ -5154,10 +5157,11 @@ static int32_t createQFunctionExprFromMsg(SQueryTableMsg *pQueryMsg, SExprInfo *
type = pCol->type;
bytes = pCol->bytes;
} else {
- type = TSDB_DATA_TYPE_BINARY;
- bytes = TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE;
- }
+ SSchema s = tGetTableNameColumnSchema();
+ type = s.type;
+ bytes = s.bytes;
+ }
}
int32_t param = pExprs[i].base.arg[0].argValue.i64;
@@ -5822,20 +5826,38 @@ _over:
//pQInfo already freed in initQInfo, but *pQInfo may not pointer to null;
if (code != TSDB_CODE_SUCCESS) {
*pQInfo = NULL;
+ } else {
+ SQInfo* pq = (SQInfo*) (*pQInfo);
+
+ T_REF_INC(pq);
+ T_REF_INC(pq);
}
// if failed to add ref for all meters in this query, abort current query
return code;
}
-void qDestroyQueryInfo(qinfo_t pQInfo) {
+static void doDestoryQueryInfo(SQInfo* pQInfo) {
+ assert(pQInfo != NULL);
qTrace("QInfo:%p query completed", pQInfo);
-
- // print the query cost summary
- queryCostStatis(pQInfo);
+ queryCostStatis(pQInfo); // print the query cost summary
freeQInfo(pQInfo);
}
+void qDestroyQueryInfo(qinfo_t qHandle) {
+ SQInfo* pQInfo = (SQInfo*) qHandle;
+ if (!isValidQInfo(pQInfo)) {
+ return;
+ }
+
+ int16_t ref = T_REF_DEC(pQInfo);
+ qTrace("QInfo:%p dec refCount, value:%d", pQInfo, ref);
+
+ if (ref == 0) {
+ doDestoryQueryInfo(pQInfo);
+ }
+}
+
void qTableQuery(qinfo_t qinfo) {
SQInfo *pQInfo = (SQInfo *)qinfo;
@@ -5846,6 +5868,7 @@ void qTableQuery(qinfo_t qinfo) {
if (isQueryKilled(pQInfo)) {
qTrace("QInfo:%p it is already killed, abort", pQInfo);
+ qDestroyQueryInfo(pQInfo);
return;
}
@@ -5861,7 +5884,7 @@ void qTableQuery(qinfo_t qinfo) {
}
sem_post(&pQInfo->dataReady);
- // vnodeDecRefCount(pQInfo);
+ qDestroyQueryInfo(pQInfo);
}
int32_t qRetrieveQueryResultInfo(qinfo_t qinfo) {
@@ -5887,20 +5910,29 @@ int32_t qRetrieveQueryResultInfo(qinfo_t qinfo) {
bool qHasMoreResultsToRetrieve(qinfo_t qinfo) {
SQInfo *pQInfo = (SQInfo *)qinfo;
- if (pQInfo == NULL || pQInfo->signature != pQInfo || pQInfo->code != TSDB_CODE_SUCCESS) {
+ if (!isValidQInfo(pQInfo) || pQInfo->code != TSDB_CODE_SUCCESS) {
+ qTrace("QInfo:%p invalid qhandle or error occurs, abort query, code:%x", pQInfo, pQInfo->code);
return false;
}
SQuery *pQuery = pQInfo->runtimeEnv.pQuery;
+ bool ret = false;
if (Q_STATUS_EQUAL(pQuery->status, QUERY_OVER)) {
- return false;
+ ret = false;
} else if (Q_STATUS_EQUAL(pQuery->status, QUERY_RESBUF_FULL)) {
- return true;
+ ret = true;
} else if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) {
- return true;
+ ret = true;
} else {
assert(0);
}
+
+ if (ret) {
+ T_REF_INC(pQInfo);
+ qTrace("QInfo:%p has more results waits for client retrieve", pQInfo);
+ }
+
+ return ret;
}
int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *contLen) {
@@ -5945,31 +5977,44 @@ int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *co
return code;
}
+int32_t qKillQuery(qinfo_t qinfo) {
+ SQInfo *pQInfo = (SQInfo *)qinfo;
+
+ if (pQInfo == NULL || !isValidQInfo(pQInfo)) {
+ return TSDB_CODE_QRY_INVALID_QHANDLE;
+ }
+
+ setQueryKilled(pQInfo);
+ qDestroyQueryInfo(pQInfo);
+
+ return TSDB_CODE_SUCCESS;
+}
+
static void buildTagQueryResult(SQInfo* pQInfo) {
SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv;
SQuery * pQuery = pRuntimeEnv->pQuery;
size_t numOfGroup = taosArrayGetSize(pQInfo->groupInfo.pGroupList);
assert(numOfGroup == 0 || numOfGroup == 1);
-
+
if (numOfGroup == 0) {
return;
}
SArray* pa = taosArrayGetP(pQInfo->groupInfo.pGroupList, 0);
-
+
size_t num = taosArrayGetSize(pa);
assert(num == pQInfo->groupInfo.numOfTables);
-
+
int32_t count = 0;
int32_t functionId = pQuery->pSelectExpr[0].base.functionId;
if (functionId == TSDB_FUNC_TID_TAG) { // return the tags & table Id
assert(pQuery->numOfOutput == 1);
-
+
SExprInfo* pExprInfo = &pQuery->pSelectExpr[0];
int32_t rsize = pExprInfo->bytes;
count = 0;
-
+
while(pQInfo->tableIndex < num && count < pQuery->rec.capacity) {
int32_t i = pQInfo->tableIndex++;
SGroupItem *item = taosArrayGet(pa, i);
@@ -6011,12 +6056,12 @@ static void buildTagQueryResult(SQInfo* pQInfo) {
}
}
}
-
+
count += 1;
}
-
+
qTrace("QInfo:%p create (tableId, tag) info completed, rows:%d", pQInfo, count);
-
+
} else if (functionId == TSDB_FUNC_COUNT) {// handle the "count(tbname)" query
*(int64_t*) pQuery->sdata[0]->data = num;
@@ -6025,16 +6070,17 @@ static void buildTagQueryResult(SQInfo* pQInfo) {
qTrace("QInfo:%p create count(tbname) query, res:%d rows:1", pQInfo, count);
} else { // return only the tags|table name etc.
count = 0;
+ SSchema tbnameSchema = tGetTableNameColumnSchema();
while(pQInfo->tableIndex < num && count < pQuery->rec.capacity) {
int32_t i = pQInfo->tableIndex++;
-
+
SExprInfo* pExprInfo = pQuery->pSelectExpr;
SGroupItem* item = taosArrayGet(pa, i);
for(int32_t j = 0; j < pQuery->numOfOutput; ++j) {
if (pExprInfo[j].base.colInfo.colId == TSDB_TBNAME_COLUMN_INDEX) {
char* data = tsdbGetTableName(pQInfo->tsdb, &item->id);
- char* dst = pQuery->sdata[j]->data + count * ((TSDB_TABLE_NAME_LEN - 1) + VARSTR_HEADER_SIZE);
+ char* dst = pQuery->sdata[j]->data + count * tbnameSchema.bytes;
memcpy(dst, data, varDataTLen(data));
} else {// todo refactor
int16_t type = pExprInfo[j].type;
@@ -6042,7 +6088,7 @@ static void buildTagQueryResult(SQInfo* pQInfo) {
char* data = tsdbGetTableTagVal(pQInfo->tsdb, &item->id, pExprInfo[j].base.colInfo.colId, type, bytes);
char* dst = pQuery->sdata[j]->data + count * pExprInfo[j].bytes;
-
+
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
if (data == NULL) {
setVardataNull(dst, type);
@@ -6060,7 +6106,7 @@ static void buildTagQueryResult(SQInfo* pQInfo) {
}
count += 1;
}
-
+
qTrace("QInfo:%p create tag values results completed, rows:%d", pQInfo, count);
}
diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c
index efc2a5fa6a..cf15cc690a 100644
--- a/src/rpc/src/rpcMain.c
+++ b/src/rpc/src/rpcMain.c
@@ -393,7 +393,6 @@ void rpcSendResponse(const SRpcMsg *pRsp) {
if ( pConn->inType == 0 || pConn->user[0] == 0 ) {
tTrace("%s, connection is already released, rsp wont be sent", pConn->info);
rpcUnlockConn(pConn);
- rpcDecRef(pRpc);
return;
}
@@ -426,6 +425,10 @@ void rpcSendResponse(const SRpcMsg *pRsp) {
rpcSendMsgToPeer(pConn, msg, msgLen);
pConn->secured = 1; // connection shall be secured
+ if (pConn->pReqMsg) rpcFreeCont(pConn->pReqMsg);
+ pConn->pReqMsg = NULL;
+ pConn->reqMsgLen = 0;
+
rpcUnlockConn(pConn);
rpcDecRef(pRpc); // decrease the referene count
@@ -482,6 +485,22 @@ void rpcSendRecv(void *shandle, SRpcIpSet *pIpSet, const SRpcMsg *pMsg, SRpcMsg
return;
}
+// this API is used by server app to keep an APP context in case connection is broken
+int rpcReportProgress(void *handle, char *pCont, int contLen) {
+ SRpcConn *pConn = (SRpcConn *)handle;
+
+ if (pConn->user[0]) {
+ // pReqMsg and reqMsgLen is re-used to store the context from app server
+ pConn->pReqMsg = pCont;
+ pConn->reqMsgLen = contLen;
+ return 0;
+ }
+
+ tTrace("%s, rpc connection is already released", pConn->info);
+ rpcFreeCont(pCont);
+ return -1;
+}
+
static void rpcFreeMsg(void *msg) {
if ( msg ) {
char *temp = (char *)msg - sizeof(SRpcReqContext);
@@ -542,7 +561,7 @@ static void rpcCloseConn(void *thandle) {
if ( pRpc->connType == TAOS_CONN_SERVER) {
char hashstr[40] = {0};
- size_t size = sprintf(hashstr, "%x:%x:%x:%d", pConn->peerIp, pConn->linkUid, pConn->peerId, pConn->connType);
+ size_t size = snprintf(hashstr, sizeof(hashstr), "%x:%x:%x:%d", pConn->peerIp, pConn->linkUid, pConn->peerId, pConn->connType);
taosHashRemove(pRpc->hash, hashstr, size);
rpcFreeMsg(pConn->pRspMsg); // it may have a response msg saved, but not request msg
@@ -592,7 +611,7 @@ static SRpcConn *rpcAllocateServerConn(SRpcInfo *pRpc, SRecvInfo *pRecv) {
char hashstr[40] = {0};
SRpcHead *pHead = (SRpcHead *)pRecv->msg;
- size_t size = sprintf(hashstr, "%x:%x:%x:%d", pRecv->ip, pHead->linkUid, pHead->sourceId, pRecv->connType);
+ size_t size = snprintf(hashstr, sizeof(hashstr), "%x:%x:%x:%d", pRecv->ip, pHead->linkUid, pHead->sourceId, pRecv->connType);
// check if it is already allocated
SRpcConn **ppConn = (SRpcConn **)(taosHashGet(pRpc->hash, hashstr, size));
@@ -682,7 +701,7 @@ static SRpcConn *rpcSetupConnToServer(SRpcReqContext *pContext) {
if (pConn) {
pConn->tretry = 0;
pConn->ahandle = pContext->ahandle;
- sprintf(pConn->info, "%s %p %p", pRpc->label, pConn, pConn->ahandle);
+ snprintf(pConn->info, sizeof(pConn->info), "%s %p %p", pRpc->label, pConn, pConn->ahandle);
pConn->tretry = 0;
} else {
tError("%s %p, failed to set up connection(%s)", pRpc->label, pContext->ahandle, tstrerror(terrno));
@@ -811,7 +830,7 @@ static SRpcConn *rpcProcessMsgHead(SRpcInfo *pRpc, SRecvInfo *pRecv) {
if (rpcIsReq(pHead->msgType)) {
pConn->ahandle = (void *)pHead->ahandle;
- sprintf(pConn->info, "%s %p %p", pRpc->label, pConn, pConn->ahandle);
+ snprintf(pConn->info, sizeof(pConn->info), "%s %p %p", pRpc->label, pConn, pConn->ahandle);
}
sid = pConn->sid;
@@ -846,6 +865,21 @@ static SRpcConn *rpcProcessMsgHead(SRpcInfo *pRpc, SRecvInfo *pRecv) {
return pConn;
}
+static void rpcReportBrokenLinkToServer(SRpcConn *pConn) {
+ SRpcInfo *pRpc = pConn->pRpc;
+
+ // if there are pending request, notify the app
+ tTrace("%s, notify the server app, connection is gone", pConn->info);
+
+ SRpcMsg rpcMsg;
+ rpcMsg.pCont = pConn->pReqMsg; // pReqMsg is re-used to store the APP context from server
+ rpcMsg.contLen = pConn->reqMsgLen; // reqMsgLen is re-used to store the APP context length
+ rpcMsg.handle = pConn;
+ rpcMsg.msgType = pConn->inType;
+ rpcMsg.code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
+ if (pRpc->cfp) (*(pRpc->cfp))(&rpcMsg, NULL);
+}
+
static void rpcProcessBrokenLink(SRpcConn *pConn) {
if (pConn == NULL) return;
SRpcInfo *pRpc = pConn->pRpc;
@@ -859,19 +893,7 @@ static void rpcProcessBrokenLink(SRpcConn *pConn) {
taosTmrStart(rpcProcessConnError, 0, pContext, pRpc->tmrCtrl);
}
- if (pConn->inType) {
- // if there are pending request, notify the app
- tTrace("%s, connection is gone, notify the app", pConn->info);
-/*
- SRpcMsg rpcMsg;
- rpcMsg.pCont = NULL;
- rpcMsg.contLen = 0;
- rpcMsg.handle = pConn;
- rpcMsg.msgType = pConn->inType;
- rpcMsg.code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
- (*(pRpc->cfp))(&rpcMsg);
-*/
- }
+ if (pConn->inType) rpcReportBrokenLinkToServer(pConn);
rpcUnlockConn(pConn);
rpcCloseConn(pConn);
@@ -1210,23 +1232,10 @@ static void rpcProcessRetryTimer(void *param, void *tmrId) {
static void rpcProcessIdleTimer(void *param, void *tmrId) {
SRpcConn *pConn = (SRpcConn *)param;
- SRpcInfo *pRpc = pConn->pRpc;
if (pConn->user[0]) {
tTrace("%s, close the connection since no activity", pConn->info);
- if (pConn->inType && pRpc->cfp) {
- // if there are pending request, notify the app
- tTrace("%s, notify the app, connection is gone", pConn->info);
-/*
- SRpcMsg rpcMsg;
- rpcMsg.pCont = NULL;
- rpcMsg.contLen = 0;
- rpcMsg.handle = pConn;
- rpcMsg.msgType = pConn->inType;
- rpcMsg.code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
- (*(pRpc->cfp))(&rpcMsg);
-*/
- }
+ if (pConn->inType) rpcReportBrokenLinkToServer(pConn);
rpcCloseConn(pConn);
} else {
tTrace("%s, idle timer:%p not processed", pConn->info, tmrId);
diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c
index d3bc45e2e4..a62ad5bbd3 100644
--- a/src/tsdb/src/tsdbRead.c
+++ b/src/tsdb/src/tsdbRead.c
@@ -72,7 +72,7 @@ typedef struct STableCheckInfo {
int32_t compSize;
int32_t numOfBlocks; // number of qualified data blocks not the original blocks
SDataCols* pDataCols;
-
+
int32_t chosen; // indicate which iterator should move forward
bool initBuf; // whether to initialize the in-memory skip list iterator or not
SSkipListIterator* iter; // mem buffer skip list iterator
@@ -311,14 +311,14 @@ SDataRow getSDataRowInTableMem(STableCheckInfo* pCheckInfo) {
rmem = SL_GET_NODE_DATA(node);
}
}
-
+
if (pCheckInfo->iiter) {
SSkipListNode* node = tSkipListIterGet(pCheckInfo->iiter);
if (node != NULL) {
rimem = SL_GET_NODE_DATA(node);
}
}
-
+
if (rmem != NULL && rimem != NULL) {
if (dataRowKey(rmem) < dataRowKey(rimem)) {
pCheckInfo->chosen = 0;
@@ -333,17 +333,17 @@ SDataRow getSDataRowInTableMem(STableCheckInfo* pCheckInfo) {
return rimem;
}
}
-
+
if (rmem != NULL) {
pCheckInfo->chosen = 0;
return rmem;
}
-
+
if (rimem != NULL) {
pCheckInfo->chosen = 1;
return rimem;
}
-
+
return NULL;
}
@@ -353,11 +353,11 @@ bool moveToNextRow(STableCheckInfo* pCheckInfo) {
if (pCheckInfo->iter != NULL) {
hasNext = tSkipListIterNext(pCheckInfo->iter);
}
-
+
if (hasNext) {
return hasNext;
}
-
+
if (pCheckInfo->iiter != NULL) {
return tSkipListIterGet(pCheckInfo->iiter) != NULL;
}
@@ -366,17 +366,17 @@ bool moveToNextRow(STableCheckInfo* pCheckInfo) {
if (pCheckInfo->iiter != NULL) {
hasNext = tSkipListIterNext(pCheckInfo->iiter);
}
-
+
if (hasNext) {
return hasNext;
}
-
+
if (pCheckInfo->iter != NULL) {
return tSkipListIterGet(pCheckInfo->iter) != NULL;
}
}
}
-
+
return hasNext;
}
@@ -395,7 +395,7 @@ static bool hasMoreDataInCache(STsdbQueryHandle* pHandle) {
if (row == NULL) {
return false;
}
-
+
pCheckInfo->lastKey = dataRowKey(row); // first timestamp in buffer
tsdbTrace("%p uid:%" PRId64", tid:%d check data in buffer from skey:%" PRId64 ", order:%d, %p", pHandle,
pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, pCheckInfo->lastKey, pHandle->order, pHandle->qinfo);
@@ -581,9 +581,9 @@ static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlo
bool blockLoaded = false;
SArray* sa = getDefaultLoadColumns(pQueryHandle, true);
-
+
int64_t st = taosGetTimestampUs();
-
+
if (pCheckInfo->pDataCols == NULL) {
STsdbMeta* pMeta = tsdbGetMeta(pRepo);
pCheckInfo->pDataCols = tdNewDataCols(pMeta->maxRowBytes, pMeta->maxCols, pRepo->config.maxRowsPerFileBlock);
@@ -603,13 +603,13 @@ static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlo
SDataCols* pCols = pQueryHandle->rhelper.pDataCols[0];
assert(pCols->numOfRows != 0);
-
+
taosArrayDestroy(sa);
tfree(data);
-
+
int64_t et = taosGetTimestampUs() - st;
tsdbTrace("%p load file block into buffer, elapsed time:%"PRId64 " us", pQueryHandle, et);
-
+
return blockLoaded;
}
@@ -681,7 +681,7 @@ static bool loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock
if (!doLoadFileDataBlock(pQueryHandle, pBlock, pCheckInfo)) {
return false;
}
-
+
SDataCols* pTSCol = pQueryHandle->rhelper.pDataCols[0];
assert(pTSCol->cols->type == TSDB_DATA_TYPE_TIMESTAMP && pTSCol->numOfRows == pBlock->numOfRows);
@@ -1212,7 +1212,7 @@ static int32_t createDataBlocksInfo(STsdbQueryHandle* pQueryHandle, int32_t numO
*numOfAllocBlocks = numOfBlocks;
int32_t numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo);
-
+
SBlockOrderSupporter sup = {0};
sup.numOfTables = numOfTables;
sup.numOfBlocksPerTable = calloc(1, sizeof(int32_t) * numOfTables);
@@ -1256,17 +1256,17 @@ static int32_t createDataBlocksInfo(STsdbQueryHandle* pQueryHandle, int32_t numO
}
assert(numOfBlocks == cnt);
-
+
// since there is only one table qualified, blocks are not sorted
if (numOfQualTables == 1) {
memcpy(pQueryHandle->pDataBlockInfo, sup.pDataBlockInfo[0], sizeof(STableBlockInfo) * numOfBlocks);
cleanBlockOrderSupporter(&sup, numOfQualTables);
-
+
tsdbTrace("%p create data blocks info struct completed for 1 table, %d blocks not sorted %p ", pQueryHandle, cnt,
pQueryHandle->qinfo);
return TSDB_CODE_SUCCESS;
}
-
+
tsdbTrace("%p create data blocks info struct completed, %d blocks in %d tables %p", pQueryHandle, cnt,
numOfQualTables, pQueryHandle->qinfo);
@@ -1683,7 +1683,7 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int
int64_t elapsedTime = taosGetTimestampUs() - st;
tsdbTrace("%p build data block from cache completed, elapsed time:%"PRId64" us, numOfRows:%d, numOfCols:%d", pQueryHandle,
elapsedTime, numOfRows, numOfCols);
-
+
return numOfRows;
}
@@ -1942,7 +1942,7 @@ int32_t tableGroupComparFn(const void *p1, const void *p2, const void *param) {
f1 = (char*) pTable1->name;
f2 = (char*) pTable2->name;
type = TSDB_DATA_TYPE_BINARY;
- bytes = (TSDB_TABLE_NAME_LEN - 1) + VARSTR_HEADER_SIZE;
+ bytes = tGetTableNameColumnSchema().bytes;
} else {
STColumn* pCol = schemaColAt(pTableGroupSupp->pTagSchema, colIndex);
bytes = pCol->bytes;
diff --git a/src/util/src/tconfig.c b/src/util/src/tconfig.c
index 203a34fd15..543a84dc44 100644
--- a/src/util/src/tconfig.c
+++ b/src/util/src/tconfig.c
@@ -242,7 +242,7 @@ void taosReadGlobalLogCfg() {
wordexp_t full_path;
wordexp(configDir, &full_path, 0);
if (full_path.we_wordv != NULL && full_path.we_wordv[0] != NULL) {
- if (strlen(full_path.we_wordv[0]) > TSDB_FILENAME_LEN - 1) {
+ if (strlen(full_path.we_wordv[0]) >= TSDB_FILENAME_LEN) {
printf("\nconfig file: %s path overflow max len %d, all variables are set to default\n", full_path.we_wordv[0], TSDB_FILENAME_LEN - 1);
wordfree(&full_path);
return;
diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c
index b952c07616..67ba891f93 100644
--- a/src/vnode/src/vnodeMain.c
+++ b/src/vnode/src/vnodeMain.c
@@ -553,6 +553,7 @@ static int32_t vnodeSaveCfg(SMDCreateVnodeMsg *pVnodeCfg) {
len += snprintf(content + len, maxLen - len, "}\n");
fwrite(content, 1, len, fp);
+ fflush(fp);
fclose(fp);
free(content);
diff --git a/src/vnode/src/vnodeRead.c b/src/vnode/src/vnodeRead.c
index f198c2ffe4..d6227f4270 100644
--- a/src/vnode/src/vnodeRead.c
+++ b/src/vnode/src/vnodeRead.c
@@ -27,17 +27,18 @@
#include "vnodeLog.h"
#include "query.h"
-static int32_t (*vnodeProcessReadMsgFp[TSDB_MSG_TYPE_MAX])(SVnodeObj *, void *pCont, int32_t contLen, SRspRet *pRet);
-static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, void *pCont, int32_t contLen, SRspRet *pRet);
-static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, void *pCont, int32_t contLen, SRspRet *pRet);
+static int32_t (*vnodeProcessReadMsgFp[TSDB_MSG_TYPE_MAX])(SVnodeObj *pVnode, SReadMsg *pReadMsg);
+static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg);
+static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg);
void vnodeInitReadFp(void) {
vnodeProcessReadMsgFp[TSDB_MSG_TYPE_QUERY] = vnodeProcessQueryMsg;
vnodeProcessReadMsgFp[TSDB_MSG_TYPE_FETCH] = vnodeProcessFetchMsg;
}
-int32_t vnodeProcessRead(void *param, int msgType, void *pCont, int32_t contLen, SRspRet *ret) {
+int32_t vnodeProcessRead(void *param, SReadMsg *pReadMsg) {
SVnodeObj *pVnode = (SVnodeObj *)param;
+ int msgType = pReadMsg->rpcMsg.msgType;
if (vnodeProcessReadMsgFp[msgType] == NULL) {
vTrace("vgId:%d, msgType:%s not processed, no handle", pVnode->vgId, taosMsg[msgType]);
@@ -55,16 +56,46 @@ int32_t vnodeProcessRead(void *param, int msgType, void *pCont, int32_t contLen,
return TSDB_CODE_RPC_NOT_READY;
}
- return (*vnodeProcessReadMsgFp[msgType])(pVnode, pCont, contLen, ret);
+ return (*vnodeProcessReadMsgFp[msgType])(pVnode, pReadMsg);
}
-static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, void *pCont, int32_t contLen, SRspRet *pRet) {
+// notify connection(handle) that current qhandle is created, if current connection from
+// client is broken, the query needs to be killed immediately.
+static void vnodeNotifyCurrentQhandle(void* handle, void* qhandle, int32_t vgId) {
+ SRetrieveTableMsg* killQueryMsg = rpcMallocCont(sizeof(SRetrieveTableMsg));
+ killQueryMsg->qhandle = htobe64((uint64_t) qhandle);
+ killQueryMsg->free = htons(1);
+ killQueryMsg->header.vgId = htonl(vgId);
+ killQueryMsg->header.contLen = htonl(sizeof(SRetrieveTableMsg));
+
+ vTrace("QInfo:%p register qhandle to connect:%p", qhandle, handle);
+ rpcReportProgress(handle, (char*) killQueryMsg, sizeof(SRetrieveTableMsg));
+}
+
+static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
+ void * pCont = pReadMsg->pCont;
+ int32_t contLen = pReadMsg->contLen;
+ SRspRet *pRet = &pReadMsg->rspRet;
+
SQueryTableMsg* pQueryTableMsg = (SQueryTableMsg*) pCont;
memset(pRet, 0, sizeof(SRspRet));
- int32_t code = TSDB_CODE_SUCCESS;
+ // qHandle needs to be freed correctly
+ if (pReadMsg->rpcMsg.code == TSDB_CODE_RPC_NETWORK_UNAVAIL) {
+ SRetrieveTableMsg* killQueryMsg = (SRetrieveTableMsg*) pReadMsg->pCont;
+ killQueryMsg->free = htons(killQueryMsg->free);
+ killQueryMsg->qhandle = htobe64(killQueryMsg->qhandle);
+ vWarn("QInfo:%p connection %p broken, kill query", killQueryMsg->qhandle, pReadMsg->rpcMsg.handle);
+ assert(pReadMsg->rpcMsg.contLen > 0 && killQueryMsg->free == 1);
+
+ qKillQuery((qinfo_t) killQueryMsg->qhandle);
+ return TSDB_CODE_TSC_QUERY_CANCELLED; // todo change the error code
+ }
+
+ int32_t code = TSDB_CODE_SUCCESS;
qinfo_t pQInfo = NULL;
+
if (contLen != 0) {
code = qCreateQueryInfo(pVnode->tsdb, pVnode->vgId, pQueryTableMsg, &pQInfo);
@@ -74,7 +105,9 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, void *pCont, int32_t cont
pRet->len = sizeof(SQueryTableRsp);
pRet->rsp = pRsp;
-
+
+ vnodeNotifyCurrentQhandle(pReadMsg->rpcMsg.handle, pQInfo, pVnode->vgId);
+
vTrace("vgId:%d, QInfo:%p, dnode query msg disposed", pVnode->vgId, pQInfo);
} else {
assert(pCont != NULL);
@@ -91,13 +124,34 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, void *pCont, int32_t cont
return code;
}
-static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, void *pCont, int32_t contLen, SRspRet *pRet) {
+static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
+ void * pCont = pReadMsg->pCont;
+ SRspRet *pRet = &pReadMsg->rspRet;
+
SRetrieveTableMsg *pRetrieve = pCont;
void *pQInfo = (void*) htobe64(pRetrieve->qhandle);
+ pRetrieve->free = htons(pRetrieve->free);
+
memset(pRet, 0, sizeof(SRspRet));
+ if (pRetrieve->free == 1) {
+ vTrace("vgId:%d, QInfo:%p, retrieve msg received to kill query and free qhandle", pVnode->vgId, pQInfo);
+ int32_t ret = qKillQuery(pQInfo);
+
+ pRet->rsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp));
+ pRet->len = sizeof(SRetrieveTableRsp);
+
+ memset(pRet->rsp, 0, sizeof(SRetrieveTableRsp));
+ SRetrieveTableRsp* pRsp = pRet->rsp;
+ pRsp->numOfRows = 0;
+ pRsp->completed = true;
+ pRsp->useconds = 0;
+
+ return ret;
+ }
+
vTrace("vgId:%d, QInfo:%p, retrieve msg is received", pVnode->vgId, pQInfo);
-
+
int32_t code = qRetrieveQueryResultInfo(pQInfo);
if (code != TSDB_CODE_SUCCESS) {
//TODO
@@ -110,8 +164,7 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, void *pCont, int32_t cont
if (qHasMoreResultsToRetrieve(pQInfo)) {
pRet->qhandle = pQInfo;
code = TSDB_CODE_VND_ACTION_NEED_REPROCESSED;
- } else {
- // no further execution invoked, release the ref to vnode
+ } else { // no further execution invoked, release the ref to vnode
qDestroyQueryInfo(pQInfo);
vnodeRelease(pVnode);
}
diff --git a/src/wal/src/walMain.c b/src/wal/src/walMain.c
index 09225984c4..b05b0db4c9 100644
--- a/src/wal/src/walMain.c
+++ b/src/wal/src/walMain.c
@@ -44,7 +44,7 @@ typedef struct {
uint32_t id; // increase continuously
int num; // number of wal files
char path[TSDB_FILENAME_LEN];
- char name[TSDB_FILENAME_LEN];
+ char name[TSDB_FILENAME_LEN+16];
pthread_mutex_t mutex;
} SWal;
@@ -108,7 +108,7 @@ void walClose(void *handle) {
if (pWal->keep == 0) {
// remove all files in the directory
for (int i=0; inum; ++i) {
- sprintf(pWal->name, "%s/%s%d", pWal->path, walPrefix, pWal->id-i);
+ snprintf(pWal->name, sizeof(pWal->name), "%s/%s%d", pWal->path, walPrefix, pWal->id-i);
if (remove(pWal->name) <0) {
wError("wal:%s, failed to remove", pWal->name);
} else {
@@ -140,7 +140,7 @@ int walRenew(void *handle) {
pWal->num++;
- sprintf(pWal->name, "%s/%s%d", pWal->path, walPrefix, pWal->id);
+ snprintf(pWal->name, sizeof(pWal->name), "%s/%s%d", pWal->path, walPrefix, pWal->id);
pWal->fd = open(pWal->name, O_WRONLY | O_CREAT, S_IRWXU | S_IRWXG | S_IRWXO);
if (pWal->fd < 0) {
@@ -152,7 +152,7 @@ int walRenew(void *handle) {
if (pWal->num > pWal->max) {
// remove the oldest wal file
char name[TSDB_FILENAME_LEN * 3];
- sprintf(name, "%s/%s%d", pWal->path, walPrefix, pWal->id - pWal->max);
+ snprintf(name, sizeof(name), "%s/%s%d", pWal->path, walPrefix, pWal->id - pWal->max);
if (remove(name) <0) {
wError("wal:%s, failed to remove(%s)", name, strerror(errno));
} else {
@@ -214,7 +214,7 @@ int walRestore(void *handle, void *pVnode, int (*writeFp)(void *, void *, int))
int plen = strlen(walPrefix);
char opath[TSDB_FILENAME_LEN+5];
- int slen = sprintf(opath, "%s", pWal->path);
+ int slen = snprintf(opath, sizeof(opath), "%s", pWal->path);
if ( pWal->keep == 0)
strcpy(opath+slen, "/old");
@@ -245,7 +245,7 @@ int walRestore(void *handle, void *pVnode, int (*writeFp)(void *, void *, int))
wTrace("wal:%s, %d files will be restored", opath, count);
for (index = minId; index<=maxId; ++index) {
- sprintf(pWal->name, "%s/%s%d", opath, walPrefix, index);
+ snprintf(pWal->name, sizeof(pWal->name), "%s/%s%d", opath, walPrefix, index);
terrno = walRestoreWalFile(pWal, pVnode, writeFp);
if (terrno < 0) break;
}
@@ -264,7 +264,7 @@ int walRestore(void *handle, void *pVnode, int (*writeFp)(void *, void *, int))
// open the existing WAL file in append mode
pWal->num = count;
pWal->id = maxId;
- sprintf(pWal->name, "%s/%s%d", opath, walPrefix, maxId);
+ snprintf(pWal->name, sizeof(pWal->name), "%s/%s%d", opath, walPrefix, maxId);
pWal->fd = open(pWal->name, O_WRONLY | O_CREAT | O_APPEND, S_IRWXU | S_IRWXG | S_IRWXO);
if (pWal->fd < 0) {
wError("wal:%s, failed to open file(%s)", pWal->name, strerror(errno));
@@ -361,7 +361,7 @@ int walHandleExistingFiles(const char *path) {
char nname[TSDB_FILENAME_LEN * 3];
char opath[TSDB_FILENAME_LEN];
- sprintf(opath, "%s/old", path);
+ snprintf(opath, sizeof(opath), "%s/old", path);
struct dirent *ent;
DIR *dir = opendir(path);
@@ -377,8 +377,8 @@ int walHandleExistingFiles(const char *path) {
int count = 0;
while ((ent = readdir(dir))!= NULL) {
if ( strncmp(ent->d_name, walPrefix, plen) == 0) {
- sprintf(oname, "%s/%s", path, ent->d_name);
- sprintf(nname, "%s/old/%s", path, ent->d_name);
+ snprintf(oname, sizeof(oname), "%s/%s", path, ent->d_name);
+ snprintf(nname, sizeof(nname), "%s/old/%s", path, ent->d_name);
if (access(opath, F_OK) != 0) {
if (mkdir(opath, 0755) != 0) {
wError("wal:%s, failed to create directory:%s(%s)", oname, opath, strerror(errno));
@@ -416,7 +416,7 @@ static int walRemoveWalFiles(const char *path) {
while ((ent = readdir(dir))!= NULL) {
if ( strncmp(ent->d_name, walPrefix, plen) == 0) {
- sprintf(name, "%s/%s", path, ent->d_name);
+ snprintf(name, sizeof(name), "%s/%s", path, ent->d_name);
if (remove(name) <0) {
wError("wal:%s, failed to remove(%s)", name, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
diff --git a/tests/examples/c/demo.c b/tests/examples/c/demo.c
index 55a19eb5f9..34c785a271 100644
--- a/tests/examples/c/demo.c
+++ b/tests/examples/c/demo.c
@@ -30,7 +30,7 @@ static int32_t doQuery(TAOS* taos, const char* sql) {
TAOS_RES* res = taos_query(taos, sql);
if (taos_errno(res) != 0) {
- printf("failed to execute query, reason:%s\n", taos_errstr(res));
+ printf("failed to execute query, reason:%s\n", taos_errstr(taos));
return -1;
}
@@ -77,7 +77,7 @@ static __attribute__((unused)) void multiThreadTest(int32_t numOfThreads, void*
pthread_attr_init(&thattr);
pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
- pthread_t* threadId = malloc(sizeof(pthread_t)*numOfThreads);
+ pthread_t* threadId = (pthread_t*)malloc(sizeof(pthread_t)*(uint32_t)numOfThreads);
for (int i = 0; i < numOfThreads; ++i) {
pthread_create(&threadId[i], NULL, oneLoader, conn);
@@ -115,15 +115,15 @@ int main(int argc, char *argv[]) {
printf("success to connect to server\n");
// doQuery(taos, "select c1,count(*) from group_db0.group_mt0 where c1<8 group by c1");
- doQuery(taos, "select * from test.m1");
+// doQuery(taos, "select * from test.m1");
// multiThreadTest(1, taos);
// doQuery(taos, "select tbname from test.m1");
// doQuery(taos, "select max(c1), min(c2), sum(c3), avg(c4), first(c7), last(c8), first(c9) from lm2_db0.lm2_stb0 where ts >= 1537146000000 and ts <= 1543145400000 and tbname in ('lm2_tb0') interval(1s) group by t1");
// doQuery(taos, "select max(c1), min(c2), sum(c3), avg(c4), first(c7), last(c8), first(c9) from lm2_db0.lm2_stb0 where ts >= 1537146000000 and ts <= 1543145400000 and tbname in ('lm2_tb0', 'lm2_tb1', 'lm2_tb2') interval(1s)");
-// for(int32_t i = 0; i < 100000; ++i) {
-// doQuery(taos, "insert into t1 values(now, 2)");
-// }
+ for(int32_t i = 0; i < 200; ++i) {
+ doQuery(taos, "select * from lm2_db0.lm2_stb0");
+ }
// doQuery(taos, "create table t1(ts timestamp, k binary(12), f nchar(2))");
taos_close(taos);
diff --git a/tests/pytest/random-test/random-test-multi-threading-3.py b/tests/pytest/random-test/random-test-multi-threading-3.py
index 91f35ea7a5..47c4228a8f 100644
--- a/tests/pytest/random-test/random-test-multi-threading-3.py
+++ b/tests/pytest/random-test/random-test-multi-threading-3.py
@@ -127,6 +127,8 @@ class Test (Thread):
def drop_stable(self):
tdLog.info("drop_stable")
global last_stb
+ global last_tb
+ global written
if (last_stb == ""):
tdLog.info("no super table")
@@ -135,6 +137,8 @@ class Test (Thread):
tdLog.info("will drop last super table")
tdSql.execute('drop table %s' % last_stb)
last_stb = ""
+ last_tb = ""
+ written = 0
def restart_database(self):
tdLog.info("restart_database")
diff --git a/tests/pytest/random-test/random-test-multi-threading.py b/tests/pytest/random-test/random-test-multi-threading.py
index 997001157e..65b6dcd948 100644
--- a/tests/pytest/random-test/random-test-multi-threading.py
+++ b/tests/pytest/random-test/random-test-multi-threading.py
@@ -105,12 +105,18 @@ class Test (threading.Thread):
return
else:
tdLog.info("will create stable %s" % current_stb)
+ tdLog.info(
+ 'create table %s(ts timestamp, c1 int, c2 nchar(10)) tags (t1 int, t2 nchar(10))' %
+ current_stb)
tdSql.execute(
'create table %s(ts timestamp, c1 int, c2 nchar(10)) tags (t1 int, t2 nchar(10))' %
current_stb)
last_stb = current_stb
current_tb = "tb%d" % int(round(time.time() * 1000))
+ tdLog.info(
+ "create table %s using %s tags (1, '表1')" %
+ (current_tb, last_stb))
tdSql.execute(
"create table %s using %s tags (1, '表1')" %
(current_tb, last_stb))
@@ -128,6 +134,8 @@ class Test (threading.Thread):
def drop_stable(self):
tdLog.info("drop_stable")
global last_stb
+ global last_tb
+ global written
if (last_stb == ""):
tdLog.info("no super table")
@@ -136,6 +144,8 @@ class Test (threading.Thread):
tdLog.info("will drop last super table")
tdSql.execute('drop table %s' % last_stb)
last_stb = ""
+ last_tb = ""
+ written = 0
def restart_database(self):
tdLog.info("restart_database")
diff --git a/tests/pytest/random-test/random-test.py b/tests/pytest/random-test/random-test.py
index 9cfc24f404..5eb356960a 100644
--- a/tests/pytest/random-test/random-test.py
+++ b/tests/pytest/random-test/random-test.py
@@ -111,6 +111,8 @@ class Test:
tdLog.info("will drop last super table")
tdSql.execute('drop table %s' % self.last_stb)
self.last_stb = ""
+ self.last_tb = ""
+ self.written = 0
def query_data_from_stable(self):
tdLog.info("query_data_from_stable")
diff --git a/tests/pytest/table/boundary.py b/tests/pytest/table/boundary.py
index d9f0490ed4..50586b72ff 100644
--- a/tests/pytest/table/boundary.py
+++ b/tests/pytest/table/boundary.py
@@ -141,7 +141,7 @@ class TDTestCase:
tdSql.prepare()
# 8 bytes for timestamp
- maxRowSize = 65536 - 8
+ maxRowSize = 65535 - 8
maxCols = self.getLimitFromSourceCode('TSDB_MAX_COLUMNS') - 1
# for binary cols, 2 bytes are used for length
diff --git a/tests/script/general/cache/restart_metrics.sim b/tests/script/general/cache/restart_metrics.sim
index 376a491f26..18c514acbf 100644
--- a/tests/script/general/cache/restart_metrics.sim
+++ b/tests/script/general/cache/restart_metrics.sim
@@ -50,6 +50,7 @@ endi
print =============== step2
system sh/exec.sh -n dnode1 -s stop
+sleep 5000
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 0
system sh/cfg.sh -n dnode1 -c tableMetaKeepTimer -v 10
diff --git a/tests/script/general/cache/restart_table.sim b/tests/script/general/cache/restart_table.sim
index 4e8bc92c10..c4e6c6f2ac 100644
--- a/tests/script/general/cache/restart_table.sim
+++ b/tests/script/general/cache/restart_table.sim
@@ -34,6 +34,7 @@ endi
print =============== step2
system sh/exec.sh -n dnode1 -s stop
+sleep 5000
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 0
system sh/cfg.sh -n dnode1 -c tableMetaKeepTimer -v 10
diff --git a/tests/script/unique/arbitrator/dn3_mn1_replica_change.sim b/tests/script/unique/arbitrator/dn3_mn1_replica_change.sim
index 09654ca8a9..1652470346 100644
--- a/tests/script/unique/arbitrator/dn3_mn1_replica_change.sim
+++ b/tests/script/unique/arbitrator/dn3_mn1_replica_change.sim
@@ -51,7 +51,7 @@ system sh/exec.sh -n dnode2 -s start
sql create dnode $hostname2
sleep 3000
-$totalTableNum = 10000
+$totalTableNum = 10
$sleepTimer = 10000
$db = db
@@ -192,7 +192,7 @@ if $data00 != $totalRows then
endi
-print ============== step4: stop dnode2 for checking if sync success
+print ============== step4: stop dnode2 for checking if sync ok
system sh/exec.sh -n dnode2 -s stop
sleep $sleepTimer
diff --git a/tests/script/unique/arbitrator/dn3_mn1_replica_change_dropDnod.sim b/tests/script/unique/arbitrator/dn3_mn1_replica_change_dropDnod.sim
index a7aaf018f1..b870d07c1d 100644
--- a/tests/script/unique/arbitrator/dn3_mn1_replica_change_dropDnod.sim
+++ b/tests/script/unique/arbitrator/dn3_mn1_replica_change_dropDnod.sim
@@ -55,7 +55,7 @@ sql create dnode $hostname3
sql create dnode $hostname4
sleep 3000
-$totalTableNum = 10000
+$totalTableNum = 10
$sleepTimer = 10000
$db = db
@@ -133,5 +133,3 @@ if $data00 != $totalRows then
return -1
endi
-print drop dnode $hostname3, return error: not drop dnode for repica is 2, need 2 dnodes.
-sql_error drop dnode $hostname3
diff --git a/tests/script/unique/arbitrator/dn3_mn1_stopDnode_timeout.sim b/tests/script/unique/arbitrator/dn3_mn1_stopDnode_timeout.sim
index 04933ea6e9..1b0a184690 100644
--- a/tests/script/unique/arbitrator/dn3_mn1_stopDnode_timeout.sim
+++ b/tests/script/unique/arbitrator/dn3_mn1_stopDnode_timeout.sim
@@ -173,7 +173,7 @@ if $rows != 3 then
return -1
endi
-print ============== step5: remove dnode4 director, then recreate dnode4 into cluster, result should success
+print ============== step5: remove dnode4 director, then recreate dnode4 into cluster, result should ok
system sh/exec.sh -n dnode4 -s stop
system rm -rf ../../../sim/dnode4
diff --git a/tests/script/unique/arbitrator/dn3_mn1_vnode_change.sim b/tests/script/unique/arbitrator/dn3_mn1_vnode_change.sim
index c156d7d55c..fda850d2c9 100644
--- a/tests/script/unique/arbitrator/dn3_mn1_vnode_change.sim
+++ b/tests/script/unique/arbitrator/dn3_mn1_vnode_change.sim
@@ -55,7 +55,7 @@ sql create dnode $hostname3
sql create dnode $hostname4
sleep 3000
-$totalTableNum = 100
+$totalTableNum = 10
$sleepTimer = 3000
$db = db
diff --git a/tests/script/unique/arbitrator/dn3_mn1_vnode_corruptFile_offline.sim b/tests/script/unique/arbitrator/dn3_mn1_vnode_corruptFile_offline.sim
index 9f6486cfcb..d556976a43 100644
--- a/tests/script/unique/arbitrator/dn3_mn1_vnode_corruptFile_offline.sim
+++ b/tests/script/unique/arbitrator/dn3_mn1_vnode_corruptFile_offline.sim
@@ -55,11 +55,11 @@ sql create dnode $hostname3
#sql create dnode $hostname4
sleep 3000
-$totalTableNum = 100
+$totalTableNum = 10
$sleepTimer = 3000
$db = db
-sql create database $db replica 3 maxTables $totalTableNum
+sql create database $db replica 2 maxTables $totalTableNum
sql use $db
# create table , insert data
@@ -168,7 +168,7 @@ $dnode4Status = $data4_4
if $dnode3Status != ready then
sleep 2000
- goto wait_dnode4_reready
+ goto wait_dnode3_reready
endi
sql select count(*) from $stb
@@ -177,7 +177,7 @@ if $data00 != $totalRows then
return -1
endi
-print ============== step5: stop dnode2, and check if dnode3 sync success
+print ============== step5: stop dnode2, and check if dnode3 sync ok
system sh/exec.sh -n dnode2 -s stop -x SIGINT
sleep $sleepTimer
wait_dnode2_offline_0:
@@ -213,8 +213,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
-$dnode2Vtatus = $data4_2
-$dnode3Vtatus = $data7_2
+$dnode2Vtatus = $data7_2
+$dnode3Vtatus = $data4_2
if $dnode2Vtatus != offline then
sleep 2000
diff --git a/tests/script/unique/arbitrator/dn3_mn1_vnode_corruptFile_online.sim b/tests/script/unique/arbitrator/dn3_mn1_vnode_corruptFile_online.sim
index b3b71ccf51..968a93156e 100644
--- a/tests/script/unique/arbitrator/dn3_mn1_vnode_corruptFile_online.sim
+++ b/tests/script/unique/arbitrator/dn3_mn1_vnode_corruptFile_online.sim
@@ -55,11 +55,11 @@ sql create dnode $hostname3
#sql create dnode $hostname4
sleep 3000
-$totalTableNum = 100
+$totalTableNum = 10
$sleepTimer = 3000
$db = db
-sql create database $db replica 3 maxTables $totalTableNum
+sql create database $db replica 2 maxTables $totalTableNum
sql use $db
# create table , insert data
@@ -105,7 +105,7 @@ if $data00 != $totalRows then
return -1
endi
-print ============== step5: stop dnode2, and check if dnode3 sync success
+print ============== step5: stop dnode2, and check if dnode3 sync ok
system sh/exec.sh -n dnode2 -s stop -x SIGINT
sleep $sleepTimer
wait_dnode2_offline_0:
@@ -141,8 +141,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
-$dnode2Vtatus = $data4_2
-$dnode3Vtatus = $data7_2
+$dnode2Vtatus = $data7_2
+$dnode3Vtatus = $data4_2
if $dnode2Vtatus != offline then
sleep 2000
diff --git a/tests/script/unique/arbitrator/dn3_mn1_vnode_delDir.sim b/tests/script/unique/arbitrator/dn3_mn1_vnode_delDir.sim
index 46b4023aa2..82f2aad07b 100644
--- a/tests/script/unique/arbitrator/dn3_mn1_vnode_delDir.sim
+++ b/tests/script/unique/arbitrator/dn3_mn1_vnode_delDir.sim
@@ -55,7 +55,7 @@ sql create dnode $hostname3
sql create dnode $hostname4
sleep 3000
-$totalTableNum = 100
+$totalTableNum = 10
$sleepTimer = 3000
$db = db
diff --git a/tests/script/unique/arbitrator/dn3_mn1_vnode_nomaster.sim b/tests/script/unique/arbitrator/dn3_mn1_vnode_nomaster.sim
index b6a2b7e1d5..4e9afbf31a 100644
--- a/tests/script/unique/arbitrator/dn3_mn1_vnode_nomaster.sim
+++ b/tests/script/unique/arbitrator/dn3_mn1_vnode_nomaster.sim
@@ -55,7 +55,7 @@ sql create dnode $hostname3
sql create dnode $hostname4
sleep 3000
-$totalTableNum = 100
+$totalTableNum = 10
$sleepTimer = 3000
$db = db
@@ -152,7 +152,7 @@ print ============== step4: restart dnode2, then create database with replica 2,
system sh/exec.sh -n dnode2 -s start
sleep 3000
-$totalTableNum = 100
+$totalTableNum = 10
$sleepTimer = 3000
$db = db1
diff --git a/tests/script/unique/arbitrator/dn3_mn2_killDnode.sim b/tests/script/unique/arbitrator/dn3_mn2_killDnode.sim
new file mode 100644
index 0000000000..57a833b8de
--- /dev/null
+++ b/tests/script/unique/arbitrator/dn3_mn2_killDnode.sim
@@ -0,0 +1,126 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/deploy.sh -n dnode3 -i 3
+system sh/deploy.sh -n dnode4 -i 4
+
+system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
+system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
+system sh/cfg.sh -n dnode3 -c numOfMnodes -v 2
+
+system sh/cfg.sh -n dnode1 -c walLevel -v 2
+system sh/cfg.sh -n dnode2 -c walLevel -v 2
+system sh/cfg.sh -n dnode3 -c walLevel -v 2
+system sh/cfg.sh -n dnode4 -c walLevel -v 2
+
+system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
+
+system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
+
+#system sh/cfg.sh -n dnode1 -c alternativeRole -v 2
+#system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
+#system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
+#system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
+
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 4
+
+system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
+
+print ============== step0: start tarbitrator
+system sh/exec_tarbitrator.sh -s start
+
+print ============== step1: start dnode1, only deploy mnode
+system sh/exec.sh -n dnode1 -s start
+sleep 3000
+sql connect
+
+print ============== step2: start dnode2/dnode3 and add into cluster , then create database with replica 3, and create table to max tables
+system sh/exec.sh -n dnode2 -s start
+system sh/exec.sh -n dnode3 -s start
+#system sh/exec.sh -n dnode4 -s start
+sql create dnode $hostname2
+sleep 3000
+sql create dnode $hostname3
+#sql create dnode $hostname4
+sleep 3000
+
+$totalTableNum = 4
+$sleepTimer = 3000
+
+$db = db
+sql create database $db replica 3
+sql use $db
+
+# create table , insert data
+$stb = stb
+sql create table $stb (ts timestamp, c1 int) tags(t1 int)
+$rowNum = 10
+$tblNum = $totalTableNum
+$totalRows = 0
+$tsStart = 1420041600000
+
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql create table $tb using $stb tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x )
+ $x = $x + 10
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+
+sql select count(*) from $stb
+print data00:$data00 totalRows:$totalRows
+if $data00 != $totalRows then
+ return -1
+endi
+
+
+print ============== step3: stop dnode2
+system sh/exec.sh -n dnode2 -s stop
+sleep 3000
+
+sql show mnodes
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
+#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
+$mnode1Status = $data2_1
+$mnode2Status = $data2_2
+$mnode3Status = $data2_3
+#$mnode4Status = $data2_4
+
+if $mnode1Status != master then
+ return -1
+endi
+
+if $mnode2Status != offline then
+ return -1
+endi
+
+sql reset query cache
+sql select count(*) from $stb
+print data00:$data00 totalRows:$totalRows
+if $data00 != $totalRows then
+ return -1
+endi
+
+
diff --git a/tests/script/unique/arbitrator/offline_replica2_alterTable_online.sim b/tests/script/unique/arbitrator/offline_replica2_alterTable_online.sim
index 990c77b050..26a6359a5c 100644
--- a/tests/script/unique/arbitrator/offline_replica2_alterTable_online.sim
+++ b/tests/script/unique/arbitrator/offline_replica2_alterTable_online.sim
@@ -47,7 +47,7 @@ system sh/exec.sh -n dnode1 -s start
sleep 3000
sql connect
-print ============== step2: start dnode2/dnode3/dnode4 and add into cluster , then create database with replica 3, and create table, insert data
+print ============== step2: start dnode2/dnode3/dnode4 and add into cluster , then create database with replica 2, and create table, insert data
system sh/exec.sh -n dnode2 -s start
#system sh/exec.sh -n dnode3 -s start
system sh/exec.sh -n dnode4 -s start
@@ -56,17 +56,17 @@ sql create dnode $hostname2
sql create dnode $hostname4
sleep 3000
-$totalTableNum = 100
+$totalTableNum = 10
$sleepTimer = 3000
$db = db
-print create database $db replica 3 maxTables $totalTableNum
-sql create database $db replica 3 maxTables $totalTableNum
+print create database $db replica 2 maxTables $totalTableNum
+sql create database $db replica 2 maxTables $totalTableNum
sql use $db
# create table , insert data
$stb = stb
-sql create table $stb (ts timestamp, c1 int) tags(t1 int)
+sql create table $stb (ts timestamp, c1 int, c2 int) tags(t1 int)
$rowNum = 500
$tblNum = $totalTableNum
$totalRows = 0
@@ -81,8 +81,8 @@ while $i < $tblNum
$x = 0
while $x < $rowNum
$ts = $tsStart + $x
- sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
- $x = $x + 60
+ sql insert into $tb values ( $ts + 0a , $x , $x ) ( $ts + 1a , $x , $x ) ( $ts + 2a , $x , $x ) ( $ts + 3a , $x , $x ) ( $ts + 4a , $x , $x ) ( $ts + 5a , $x , $x ) ( $ts + 6a , $x , $x ) ( $ts + 7a , $x , $x ) ( $ts + 8a , $x , $x ) ( $ts + 9a , $x , $x ) ( $ts + 10a , $x , $x ) ( $ts + 11a , $x , $x ) ( $ts + 12a , $x , $x ) ( $ts + 13a , $x , $x ) ( $ts + 14a , $x , $x ) ( $ts + 15a , $x , $x ) ( $ts + 16a , $x , $x ) ( $ts + 17a , $x , $x ) ( $ts + 18a , $x , $x ) ( $ts + 19a , $x , $x )
+ $x = $x + 20
endw
$totalRows = $totalRows + $x
print info: inserted $x rows into $tb and totalRows: $totalRows
@@ -113,8 +113,8 @@ print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
#$dnode1Status = $data4_1
$dnode2Status = $data4_2
-$dnode3Status = $data4_3
-$dnode4Status = $data4_4
+#$dnode3Status = $data4_3
+$dnode4Status = $data4_3
#$dnode5Status = $data4_5
if $dnode4Status != offline then
@@ -151,7 +151,7 @@ sql alter table $stb add column f1 double
$i = 0
while $i < $tblNum
$tb = tb . $i
- sql inset into $tb values (now, 10001) (now + 1s, 10002) (now + 2s, 10003) (now + 3s, 10004)
+ sql insert into $tb values (now, 10001, 1.0001) (now + 1s, 10002, 1.0002) (now + 2s, 10003, 1.0003) (now + 3s, 10004, 1.0004)
$i = $i + 1
endw
$addRows = 4 * $tblNum
@@ -174,8 +174,8 @@ print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
#$dnode1Status = $data4_1
#$dnode2Status = $data4_2
-$dnode3Status = $data4_3
-$dnode4Status = $data4_4
+#$dnode3Status = $data4_3
+$dnode4Status = $data4_3
#$dnode5Status = $data4_5
if $dnode4Status != ready then
diff --git a/tests/script/unique/arbitrator/offline_replica2_alterTag_online.sim b/tests/script/unique/arbitrator/offline_replica2_alterTag_online.sim
new file mode 100644
index 0000000000..b883f077c2
--- /dev/null
+++ b/tests/script/unique/arbitrator/offline_replica2_alterTag_online.sim
@@ -0,0 +1,232 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/deploy.sh -n dnode3 -i 3
+system sh/deploy.sh -n dnode4 -i 4
+
+system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1
+
+system sh/cfg.sh -n dnode1 -c walLevel -v 2
+system sh/cfg.sh -n dnode2 -c walLevel -v 2
+system sh/cfg.sh -n dnode3 -c walLevel -v 2
+system sh/cfg.sh -n dnode4 -c walLevel -v 2
+
+system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
+
+system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
+
+#system sh/cfg.sh -n dnode1 -c alternativeRole -v 1
+#system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
+#system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
+#system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
+
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
+
+system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode4 -c arbitrator -v $arbitrator
+
+print ============== step0: start tarbitrator
+system sh/exec_tarbitrator.sh -s start
+
+print ============== step1: start dnode1, only deploy mnode
+system sh/exec.sh -n dnode1 -s start
+sleep 3000
+sql connect
+
+print ============== step2: start dnode2/dnode3/dnode4 and add into cluster , then create database with replica 2, and create table, insert data
+system sh/exec.sh -n dnode2 -s start
+#system sh/exec.sh -n dnode3 -s start
+system sh/exec.sh -n dnode4 -s start
+sql create dnode $hostname2
+#sql create dnode $hostname3
+sql create dnode $hostname4
+sleep 3000
+
+$totalTableNum = 10
+$sleepTimer = 3000
+$maxTables = $totalTableNum * 2
+
+$db = db
+print create database $db replica 2 maxTables $maxTables
+sql create database $db replica 2 maxTables $maxTables
+sql use $db
+
+# create table , insert data
+$stb = stb
+sql create table $stb (ts timestamp, c1 int, c2 int) tags(t0 int, t1 int)
+$rowNum = 500
+$tblNum = $totalTableNum
+$totalRows = 0
+$tsStart = 1420041600000
+$tsEnd = 0
+
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql create table $tb using $stb tags( $i , $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x , $x ) ( $ts + 1a , $x , $x ) ( $ts + 2a , $x , $x ) ( $ts + 3a , $x , $x ) ( $ts + 4a , $x , $x ) ( $ts + 5a , $x , $x ) ( $ts + 6a , $x , $x ) ( $ts + 7a , $x , $x ) ( $ts + 8a , $x , $x ) ( $ts + 9a , $x , $x ) ( $ts + 10a , $x , $x ) ( $ts + 11a , $x , $x ) ( $ts + 12a , $x , $x ) ( $ts + 13a , $x , $x ) ( $ts + 14a , $x , $x ) ( $ts + 15a , $x , $x ) ( $ts + 16a , $x , $x ) ( $ts + 17a , $x , $x ) ( $ts + 18a , $x , $x ) ( $ts + 19a , $x , $x )
+ $x = $x + 20
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+$tsEnd = $tsStart + $totalRows / $tblNum
+
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+print ============== step3: stop dnode4
+system sh/exec.sh -n dnode4 -s stop -x SIGINT
+sleep $sleepTimer
+wait_dnode4_offline_0:
+sql show dnodes
+if $rows != 3 then
+ sleep 2000
+ goto wait_dnode4_offline_0
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
+#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
+#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
+#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
+#$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+#$dnode3Status = $data4_3
+$dnode4Status = $data4_3
+#$dnode5Status = $data4_5
+
+if $dnode4Status != offline then
+ sleep 2000
+ goto wait_dnode4_offline_0
+endi
+
+wait_dnode4_vgroup_offline:
+sql show vgroups
+if $rows != 1 then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+print show vgroups:
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
+$dnode4Vtatus = $data4_2
+$dnode3Vtatus = $data7_2
+
+if $dnode4Vtatus != offline then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+if $dnode3Vtatus != master then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+
+print ============== step4: alter table and tag, then drop all sub tables, recreate som subtable and insert more data rows
+sql alter table $stb drop column c1
+sql alter table $stb add column f1 double
+
+sql alter table $stb add tag t2 int
+sql alter table $stb add tag t3 int
+sql alter table $stb drop tag t1
+
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql drop table $tb
+ $i = $i + 1
+endw
+
+$totalRows = 0
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql create table $tb using $stb tags( $i , $i , $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x , $x ) ( $ts + 1a , $x , $x ) ( $ts + 2a , $x , $x ) ( $ts + 3a , $x , $x ) ( $ts + 4a , $x , $x ) ( $ts + 5a , $x , $x ) ( $ts + 6a , $x , $x ) ( $ts + 7a , $x , $x ) ( $ts + 8a , $x , $x ) ( $ts + 9a , $x , $x ) ( $ts + 10a , $x , $x ) ( $ts + 11a , $x , $x ) ( $ts + 12a , $x , $x ) ( $ts + 13a , $x , $x ) ( $ts + 14a , $x , $x ) ( $ts + 15a , $x , $x ) ( $ts + 16a , $x , $x ) ( $ts + 17a , $x , $x ) ( $ts + 18a , $x , $x ) ( $ts + 19a , $x , $x )
+ $x = $x + 20
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+$tsEnd = $tsStart + $totalRows / $tblNum
+
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+print ============== step5: restart dnode4, waiting dnode4 synced
+system sh/exec.sh -n dnode4 -s start
+
+wait_dnode4_ready:
+sql show dnodes
+if $rows != 3 then
+ sleep 2000
+ goto wait_dnode4_ready
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
+print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
+#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
+#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
+#$dnode1Status = $data4_1
+#$dnode2Status = $data4_2
+#$dnode3Status = $data4_3
+$dnode4Status = $data4_3
+#$dnode5Status = $data4_5
+
+if $dnode4Status != ready then
+ sleep 2000
+ goto wait_dnode4_ready
+endi
+
+print ============== step6: check result
+
+sql reset query cache
+
+$cnt = 0
+wait_table_dropped:
+$cnt = $cnt + 1
+if $cnt == 20 then
+ return -1
+endi
+sql select count(*) from $stb
+if $data00 != $totalRows then
+ print data00: $data00 totalRows: $totalRows
+ sleep 2000
+ goto wait_table_dropped
+endi
+
+
+
+
diff --git a/tests/script/unique/arbitrator/offline_replica2_createTable_online.sim b/tests/script/unique/arbitrator/offline_replica2_createTable_online.sim
index eb76908ac9..e7b2c70c78 100644
--- a/tests/script/unique/arbitrator/offline_replica2_createTable_online.sim
+++ b/tests/script/unique/arbitrator/offline_replica2_createTable_online.sim
@@ -47,7 +47,7 @@ system sh/exec.sh -n dnode1 -s start
sleep 3000
sql connect
-print ============== step2: start dnode2/dnode3/dnode4 and add into cluster , then create database with replica 3, and create table, insert data
+print ============== step2: start dnode2/dnode3/dnode4 and add into cluster , then create database with replica 2, and create table, insert data
system sh/exec.sh -n dnode2 -s start
#system sh/exec.sh -n dnode3 -s start
system sh/exec.sh -n dnode4 -s start
@@ -56,12 +56,14 @@ sql create dnode $hostname2
sql create dnode $hostname4
sleep 3000
-$totalTableNum = 100
+$totalTableNum = 10
$sleepTimer = 3000
+$maxTables = $totalTableNum * 2
+
$db = db
-print create database $db replica 3 maxTables $totalTableNum
-sql create database $db replica 3 maxTables $totalTableNum
+print create database $db replica 2 maxTables $maxTables
+sql create database $db replica 2 maxTables $maxTables
sql use $db
# create table , insert data
@@ -113,8 +115,8 @@ print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
#$dnode1Status = $data4_1
$dnode2Status = $data4_2
-$dnode3Status = $data4_3
-$dnode4Status = $data4_4
+#$dnode3Status = $data4_3
+$dnode4Status = $data4_3
#$dnode5Status = $data4_5
if $dnode4Status != offline then
@@ -186,8 +188,8 @@ print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
#$dnode1Status = $data4_1
#$dnode2Status = $data4_2
-$dnode3Status = $data4_3
-$dnode4Status = $data4_4
+#$dnode3Status = $data4_3
+$dnode4Status = $data4_3
#$dnode5Status = $data4_5
if $dnode4Status != ready then
diff --git a/tests/script/unique/arbitrator/offline_replica2_dropDb_online.sim b/tests/script/unique/arbitrator/offline_replica2_dropDb_online.sim
index f4af4f4c9d..20c575d382 100644
--- a/tests/script/unique/arbitrator/offline_replica2_dropDb_online.sim
+++ b/tests/script/unique/arbitrator/offline_replica2_dropDb_online.sim
@@ -56,7 +56,7 @@ sql create dnode $hostname2
sql create dnode $hostname4
sleep 3000
-$totalTableNum = 100
+$totalTableNum = 10
$sleepTimer = 3000
$db = db
@@ -113,8 +113,8 @@ print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
#$dnode1Status = $data4_1
$dnode2Status = $data4_2
-$dnode3Status = $data4_3
-$dnode4Status = $data4_4
+#$dnode3Status = $data4_3
+$dnode4Status = $data4_3
#$dnode5Status = $data4_5
if $dnode4Status != offline then
diff --git a/tests/script/unique/arbitrator/offline_replica2_dropTable_online.sim b/tests/script/unique/arbitrator/offline_replica2_dropTable_online.sim
index 56e468bf1a..9f72cde440 100644
--- a/tests/script/unique/arbitrator/offline_replica2_dropTable_online.sim
+++ b/tests/script/unique/arbitrator/offline_replica2_dropTable_online.sim
@@ -47,7 +47,7 @@ system sh/exec.sh -n dnode1 -s start
sleep 3000
sql connect
-print ============== step2: start dnode2/dnode3/dnode4 and add into cluster , then create database with replica 3, and create table, insert data
+print ============== step2: start dnode2/dnode3/dnode4 and add into cluster , then create database with replica 2, and create table, insert data
system sh/exec.sh -n dnode2 -s start
#system sh/exec.sh -n dnode3 -s start
system sh/exec.sh -n dnode4 -s start
@@ -56,12 +56,12 @@ sql create dnode $hostname2
sql create dnode $hostname4
sleep 3000
-$totalTableNum = 100
+$totalTableNum = 10
$sleepTimer = 3000
$db = db
-print create database $db replica 3 maxTables $totalTableNum
-sql create database $db replica 3 maxTables $totalTableNum
+print create database $db replica 2 maxTables $totalTableNum
+sql create database $db replica 2 maxTables $totalTableNum
sql use $db
# create table , insert data
@@ -113,8 +113,8 @@ print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
#$dnode1Status = $data4_1
$dnode2Status = $data4_2
-$dnode3Status = $data4_3
-$dnode4Status = $data4_4
+#$dnode3Status = $data4_3
+$dnode4Status = $data4_3
#$dnode5Status = $data4_5
if $dnode4Status != offline then
@@ -146,14 +146,14 @@ endi
print ============== step4: drop some tables
$i = 1
-$dropTblNum = 21
+$dropTblNum = 6
while $i < $dropTblNum
$tb = tb . $i
sql drop table if exists $tb
$i = $i + 1
endw
-$tblNum = $tblNum - 20
+$tblNum = $tblNum - 5
print ============== step5: restart dnode4, waiting dnode4 synced
system sh/exec.sh -n dnode4 -s start
@@ -172,8 +172,8 @@ print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
#$dnode1Status = $data4_1
#$dnode2Status = $data4_2
-$dnode3Status = $data4_3
-$dnode4Status = $data4_4
+#$dnode3Status = $data4_3
+$dnode4Status = $data4_3
#$dnode5Status = $data4_5
if $dnode4Status != ready then
diff --git a/tests/script/unique/arbitrator/offline_replica3_alterTable_online.sim b/tests/script/unique/arbitrator/offline_replica3_alterTable_online.sim
index 24deed212d..43f9dfff14 100644
--- a/tests/script/unique/arbitrator/offline_replica3_alterTable_online.sim
+++ b/tests/script/unique/arbitrator/offline_replica3_alterTable_online.sim
@@ -56,7 +56,7 @@ sql create dnode $hostname3
sql create dnode $hostname4
sleep 3000
-$totalTableNum = 100
+$totalTableNum = 10
$sleepTimer = 3000
$db = db
@@ -66,7 +66,7 @@ sql use $db
# create table , insert data
$stb = stb
-sql create table $stb (ts timestamp, c1 int) tags(t1 int)
+sql create table $stb (ts timestamp, c1 int, c2 int) tags(t1 int)
$rowNum = 500
$tblNum = $totalTableNum
$totalRows = 0
@@ -81,8 +81,8 @@ while $i < $tblNum
$x = 0
while $x < $rowNum
$ts = $tsStart + $x
- sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
- $x = $x + 60
+ sql insert into $tb values ( $ts + 0a , $x , $x ) ( $ts + 1a , $x , $x ) ( $ts + 2a , $x , $x ) ( $ts + 3a , $x , $x ) ( $ts + 4a , $x , $x ) ( $ts + 5a , $x , $x ) ( $ts + 6a , $x , $x ) ( $ts + 7a , $x , $x ) ( $ts + 8a , $x , $x ) ( $ts + 9a , $x , $x ) ( $ts + 10a , $x , $x ) ( $ts + 11a , $x , $x ) ( $ts + 12a , $x , $x ) ( $ts + 13a , $x , $x ) ( $ts + 14a , $x , $x ) ( $ts + 15a , $x , $x ) ( $ts + 16a , $x , $x ) ( $ts + 17a , $x , $x ) ( $ts + 18a , $x , $x ) ( $ts + 19a , $x , $x )
+ $x = $x + 20
endw
$totalRows = $totalRows + $x
print info: inserted $x rows into $tb and totalRows: $totalRows
@@ -151,7 +151,7 @@ sql alter table $stb add column f1 double
$i = 0
while $i < $tblNum
$tb = tb . $i
- sql inset into $tb values (now, 10001) (now + 1s, 10002) (now + 2s, 10003) (now + 3s, 10004)
+ sql insert into $tb values (now, 10001, 1.0001) (now + 1s, 10002, 1.0002) (now + 2s, 10003, 1.0003) (now + 3s, 10004, 1.0004)
$i = $i + 1
endw
$addRows = 4 * $tblNum
diff --git a/tests/script/unique/arbitrator/offline_replica3_alterTag_online.sim b/tests/script/unique/arbitrator/offline_replica3_alterTag_online.sim
new file mode 100644
index 0000000000..e27bbafba7
--- /dev/null
+++ b/tests/script/unique/arbitrator/offline_replica3_alterTag_online.sim
@@ -0,0 +1,232 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/deploy.sh -n dnode3 -i 3
+system sh/deploy.sh -n dnode4 -i 4
+
+system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1
+
+system sh/cfg.sh -n dnode1 -c walLevel -v 2
+system sh/cfg.sh -n dnode2 -c walLevel -v 2
+system sh/cfg.sh -n dnode3 -c walLevel -v 2
+system sh/cfg.sh -n dnode4 -c walLevel -v 2
+
+system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
+
+system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
+
+#system sh/cfg.sh -n dnode1 -c alternativeRole -v 1
+#system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
+#system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
+#system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
+
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
+
+system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode4 -c arbitrator -v $arbitrator
+
+print ============== step0: start tarbitrator
+system sh/exec_tarbitrator.sh -s start
+
+print ============== step1: start dnode1, only deploy mnode
+system sh/exec.sh -n dnode1 -s start
+sleep 3000
+sql connect
+
+print ============== step2: start dnode2/dnode3/dnode4 and add into cluster , then create database with replica 3, and create table, insert data
+system sh/exec.sh -n dnode2 -s start
+system sh/exec.sh -n dnode3 -s start
+system sh/exec.sh -n dnode4 -s start
+sql create dnode $hostname2
+sql create dnode $hostname3
+sql create dnode $hostname4
+sleep 3000
+
+$totalTableNum = 10
+$sleepTimer = 3000
+$maxTables = $totalTableNum * 2
+
+$db = db
+print create database $db replica 3 maxTables $maxTables
+sql create database $db replica 3 maxTables $maxTables
+sql use $db
+
+# create table , insert data
+$stb = stb
+sql create table $stb (ts timestamp, c1 int, c2 int) tags(t0 int, t1 int)
+$rowNum = 500
+$tblNum = $totalTableNum
+$totalRows = 0
+$tsStart = 1420041600000
+$tsEnd = 0
+
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql create table $tb using $stb tags( $i , $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x , $x ) ( $ts + 1a , $x , $x ) ( $ts + 2a , $x , $x ) ( $ts + 3a , $x , $x ) ( $ts + 4a , $x , $x ) ( $ts + 5a , $x , $x ) ( $ts + 6a , $x , $x ) ( $ts + 7a , $x , $x ) ( $ts + 8a , $x , $x ) ( $ts + 9a , $x , $x ) ( $ts + 10a , $x , $x ) ( $ts + 11a , $x , $x ) ( $ts + 12a , $x , $x ) ( $ts + 13a , $x , $x ) ( $ts + 14a , $x , $x ) ( $ts + 15a , $x , $x ) ( $ts + 16a , $x , $x ) ( $ts + 17a , $x , $x ) ( $ts + 18a , $x , $x ) ( $ts + 19a , $x , $x )
+ $x = $x + 20
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+$tsEnd = $tsStart + $totalRows / $tblNum
+
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+print ============== step3: stop dnode4
+system sh/exec.sh -n dnode4 -s stop -x SIGINT
+sleep $sleepTimer
+wait_dnode4_offline_0:
+sql show dnodes
+if $rows != 4 then
+ sleep 2000
+ goto wait_dnode4_offline_0
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
+#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
+#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
+#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
+#$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+$dnode3Status = $data4_3
+$dnode4Status = $data4_4
+#$dnode5Status = $data4_5
+
+if $dnode4Status != offline then
+ sleep 2000
+ goto wait_dnode4_offline_0
+endi
+
+wait_dnode4_vgroup_offline:
+sql show vgroups
+if $rows != 1 then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+print show vgroups:
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
+$dnode4Vtatus = $data4_2
+$dnode3Vtatus = $data7_2
+
+if $dnode4Vtatus != offline then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+if $dnode3Vtatus != master then
+ sleep 2000
+ goto wait_dnode4_vgroup_offline
+endi
+
+print ============== step4: alter table and tag, then drop all sub tables, recreate som subtable and insert more data rows
+sql alter table $stb drop column c1
+sql alter table $stb add column f1 double
+
+sql alter table $stb add tag t2 int
+sql alter table $stb add tag t3 int
+sql alter table $stb drop tag t1
+
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql drop table $tb
+ $i = $i + 1
+endw
+
+$totalRows = 0
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql create table $tb using $stb tags( $i , $i , $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x , $x ) ( $ts + 1a , $x , $x ) ( $ts + 2a , $x , $x ) ( $ts + 3a , $x , $x ) ( $ts + 4a , $x , $x ) ( $ts + 5a , $x , $x ) ( $ts + 6a , $x , $x ) ( $ts + 7a , $x , $x ) ( $ts + 8a , $x , $x ) ( $ts + 9a , $x , $x ) ( $ts + 10a , $x , $x ) ( $ts + 11a , $x , $x ) ( $ts + 12a , $x , $x ) ( $ts + 13a , $x , $x ) ( $ts + 14a , $x , $x ) ( $ts + 15a , $x , $x ) ( $ts + 16a , $x , $x ) ( $ts + 17a , $x , $x ) ( $ts + 18a , $x , $x ) ( $ts + 19a , $x , $x )
+ $x = $x + 20
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+$tsEnd = $tsStart + $totalRows / $tblNum
+
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+print ============== step5: restart dnode4, waiting dnode4 synced
+system sh/exec.sh -n dnode4 -s start
+
+wait_dnode4_ready:
+sql show dnodes
+if $rows != 4 then
+ sleep 2000
+ goto wait_dnode4_ready
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
+print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
+#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
+#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
+#$dnode1Status = $data4_1
+#$dnode2Status = $data4_2
+$dnode3Status = $data4_3
+$dnode4Status = $data4_4
+#$dnode5Status = $data4_5
+
+if $dnode4Status != ready then
+ sleep 2000
+ goto wait_dnode4_ready
+endi
+
+print ============== step6: check result
+
+sql reset query cache
+
+$cnt = 0
+wait_table_dropped:
+$cnt = $cnt + 1
+if $cnt == 20 then
+ return -1
+endi
+sql select count(*) from $stb
+if $data00 != $totalRows then
+ print data00: $data00 totalRows: $totalRows
+ sleep 2000
+ goto wait_table_dropped
+endi
+
+
+
+
diff --git a/tests/script/unique/arbitrator/offline_replica3_createTable_online.sim b/tests/script/unique/arbitrator/offline_replica3_createTable_online.sim
index f395d57211..1e47157975 100644
--- a/tests/script/unique/arbitrator/offline_replica3_createTable_online.sim
+++ b/tests/script/unique/arbitrator/offline_replica3_createTable_online.sim
@@ -56,12 +56,13 @@ sql create dnode $hostname3
sql create dnode $hostname4
sleep 3000
-$totalTableNum = 100
+$totalTableNum = 10
$sleepTimer = 3000
+$maxTables = $totalTableNum * 2
$db = db
-print create database $db replica 3 maxTables $totalTableNum
-sql create database $db replica 3 maxTables $totalTableNum
+print create database $db replica 3 maxTables $maxTables
+sql create database $db replica 3 maxTables $maxTables
sql use $db
# create table , insert data
diff --git a/tests/script/unique/arbitrator/offline_replica3_dropDb_online.sim b/tests/script/unique/arbitrator/offline_replica3_dropDb_online.sim
index 0c7211d9e5..c9fc91527d 100644
--- a/tests/script/unique/arbitrator/offline_replica3_dropDb_online.sim
+++ b/tests/script/unique/arbitrator/offline_replica3_dropDb_online.sim
@@ -56,7 +56,7 @@ sql create dnode $hostname3
sql create dnode $hostname4
sleep 3000
-$totalTableNum = 100
+$totalTableNum = 10
$sleepTimer = 3000
$db = db
diff --git a/tests/script/unique/arbitrator/offline_replica3_dropTable_online.sim b/tests/script/unique/arbitrator/offline_replica3_dropTable_online.sim
index 8e59acc234..aecf41b892 100644
--- a/tests/script/unique/arbitrator/offline_replica3_dropTable_online.sim
+++ b/tests/script/unique/arbitrator/offline_replica3_dropTable_online.sim
@@ -56,7 +56,7 @@ sql create dnode $hostname3
sql create dnode $hostname4
sleep 3000
-$totalTableNum = 100
+$totalTableNum = 10
$sleepTimer = 3000
$db = db
@@ -146,14 +146,14 @@ endi
print ============== step4: drop some tables
$i = 1
-$dropTblNum = 21
+$dropTblNum = 6
while $i < $dropTblNum
$tb = tb . $i
sql drop table if exists $tb
$i = $i + 1
endw
-$tblNum = $tblNum - 20
+$tblNum = $tblNum - 5
print ============== step5: restart dnode4, waiting dnode4 synced
system sh/exec.sh -n dnode4 -s start
diff --git a/tests/script/unique/arbitrator/sync_replica2_alterTable_add.sim b/tests/script/unique/arbitrator/sync_replica2_alterTable_add.sim
index 06747ed589..9730842938 100644
--- a/tests/script/unique/arbitrator/sync_replica2_alterTable_add.sim
+++ b/tests/script/unique/arbitrator/sync_replica2_alterTable_add.sim
@@ -47,7 +47,7 @@ system sh/exec.sh -n dnode1 -s start
sleep 3000
sql connect
-print ============== step2: start dnode2/dnode3/dnode4 and add into cluster , then create database with replica 3, and create table, insert data
+print ============== step2: start dnode2/dnode3/dnode4 and add into cluster , then create database with replica 2, and create table, insert data
system sh/exec.sh -n dnode2 -s start
#system sh/exec.sh -n dnode3 -s start
system sh/exec.sh -n dnode4 -s start
@@ -56,12 +56,12 @@ sql create dnode $hostname2
sql create dnode $hostname4
sleep 3000
-$totalTableNum = 100
+$totalTableNum = 10
$sleepTimer = 3000
$db = db
-print create database $db replica 3 maxTables $totalTableNum
-sql create database $db replica 3 maxTables $totalTableNum
+print create database $db replica 2 maxTables $totalTableNum
+sql create database $db replica 2 maxTables $totalTableNum
sql use $db
# create table , insert data
@@ -113,8 +113,8 @@ print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
#$dnode1Status = $data4_1
$dnode2Status = $data4_2
-$dnode3Status = $data4_3
-$dnode4Status = $data4_4
+#$dnode3Status = $data4_3
+$dnode4Status = $data4_3
#$dnode5Status = $data4_5
if $dnode4Status != offline then
@@ -162,7 +162,7 @@ while $i < $tblNum
endw
sql select count(*) from $stb
-print data00 $data00
+print data00:$data00 totalRows:$totalRows
if $data00 != $totalRows then
return -1
endi
@@ -172,8 +172,8 @@ system sh/exec.sh -n dnode4 -s start
run_back unique/arbitrator/sync_replica_alterTable_background_add.sim
print ============== step6: check result
-#in background.sim, add one column and insert 200 rows
-$totalRows = $totalRows + 200
+#in background.sim, add one column and insert 36 rows
+$totalRows = $totalRows + 36
$cnt = 0
wait_table_altered:
@@ -183,7 +183,7 @@ if $cnt == 20 then
endi
sql select count(*) from $stb
if $data00 != $totalRows then
- print data00: $data00
+ print data00:$data00 totalRows:$totalRows
sleep 2000
goto wait_table_altered
endi
diff --git a/tests/script/unique/arbitrator/sync_replica2_alterTable_drop.sim b/tests/script/unique/arbitrator/sync_replica2_alterTable_drop.sim
index ecd8208281..21957871a5 100644
--- a/tests/script/unique/arbitrator/sync_replica2_alterTable_drop.sim
+++ b/tests/script/unique/arbitrator/sync_replica2_alterTable_drop.sim
@@ -47,7 +47,7 @@ system sh/exec.sh -n dnode1 -s start
sleep 3000
sql connect
-print ============== step2: start dnode2/dnode3/dnode4 and add into cluster , then create database with replica 3, and create table, insert data
+print ============== step2: start dnode2/dnode3/dnode4 and add into cluster , then create database with replica 2, and create table, insert data
system sh/exec.sh -n dnode2 -s start
#system sh/exec.sh -n dnode3 -s start
system sh/exec.sh -n dnode4 -s start
@@ -56,12 +56,12 @@ sql create dnode $hostname2
sql create dnode $hostname4
sleep 3000
-$totalTableNum = 100
+$totalTableNum = 10
$sleepTimer = 3000
$db = db
-print create database $db replica 3 maxTables $totalTableNum
-sql create database $db replica 3 maxTables $totalTableNum
+print create database $db replica 2 maxTables $totalTableNum
+sql create database $db replica 2 maxTables $totalTableNum
sql use $db
# create table , insert data
@@ -113,8 +113,8 @@ print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
#$dnode1Status = $data4_1
$dnode2Status = $data4_2
-$dnode3Status = $data4_3
-$dnode4Status = $data4_4
+#$dnode3Status = $data4_3
+$dnode4Status = $data4_3
#$dnode5Status = $data4_5
if $dnode4Status != offline then
@@ -173,7 +173,7 @@ run_back unique/arbitrator/sync_replica_alterTable_background_drop.sim
print ============== step6: check result
#in background.sim, drop one column and add one new column, then insert 200 rows
-$totalRows = $totalRows + 200
+$totalRows = $totalRows + 36
$cnt = 0
wait_table_altered:
diff --git a/tests/script/unique/arbitrator/sync_replica2_dropDb.sim b/tests/script/unique/arbitrator/sync_replica2_dropDb.sim
index fa19917e9f..9836284af9 100644
--- a/tests/script/unique/arbitrator/sync_replica2_dropDb.sim
+++ b/tests/script/unique/arbitrator/sync_replica2_dropDb.sim
@@ -56,7 +56,7 @@ sql create dnode $hostname2
sql create dnode $hostname4
sleep 3000
-$totalTableNum = 100
+$totalTableNum = 10
$sleepTimer = 3000
$db = db
@@ -113,8 +113,8 @@ print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
#$dnode1Status = $data4_1
$dnode2Status = $data4_2
-$dnode3Status = $data4_3
-$dnode4Status = $data4_4
+#$dnode3Status = $data4_3
+$dnode4Status = $data4_3
#$dnode5Status = $data4_5
if $dnode4Status != offline then
diff --git a/tests/script/unique/arbitrator/sync_replica2_dropTable.sim b/tests/script/unique/arbitrator/sync_replica2_dropTable.sim
index cca4586b35..4793e8e535 100644
--- a/tests/script/unique/arbitrator/sync_replica2_dropTable.sim
+++ b/tests/script/unique/arbitrator/sync_replica2_dropTable.sim
@@ -56,7 +56,7 @@ sql create dnode $hostname2
sql create dnode $hostname4
sleep 3000
-$totalTableNum = 100
+$totalTableNum = 10
$sleepTimer = 3000
$db = db
@@ -162,18 +162,18 @@ while $i < $tblNum
endw
sql select count(*) from $stb
-print data00 $data00
+print data00:$data00 totalRows:$totalRows
if $data00 != $totalRows then
return -1
endi
-print ============== step5: restart dnode4, while drop database in other thead when dnode4 is syncing
+print ============== step5: restart dnode4, while drop some tables in other thread when dnode4 is syncing
system sh/exec.sh -n dnode4 -s start
run_back unique/arbitrator/sync_replica_dropTable_background.sim
print ============== step6: check result
-#in background.sim, drop 10 tables
-$totalRows = $totalRows - 10800
+#in background.sim, drop 5 tables
+$totalRows = $totalRows - 5400
$cnt = 0
wait_table_dropped:
@@ -183,15 +183,15 @@ if $cnt == 20 then
endi
sql select count(*) from $stb
if $data00 != $totalRows then
- print data00: $data00
+ print data00:$data00 totalRows:$totalRows
sleep 2000
goto wait_table_dropped
endi
-$tblNum = $tblNum - 10
+$tblNum = $tblNum - 5
sql select count(tbname) from $stb
if $data00 != $tblNum then
- print data00: $data00
+ print data00: $data00 tblNum: $tblNum
sleep 2000
goto wait_table_dropped
endi
diff --git a/tests/script/unique/arbitrator/sync_replica3_alterTable_add.sim b/tests/script/unique/arbitrator/sync_replica3_alterTable_add.sim
index d9dcea2b22..9277ad2c85 100644
--- a/tests/script/unique/arbitrator/sync_replica3_alterTable_add.sim
+++ b/tests/script/unique/arbitrator/sync_replica3_alterTable_add.sim
@@ -56,7 +56,7 @@ sql create dnode $hostname3
sql create dnode $hostname4
sleep 3000
-$totalTableNum = 100
+$totalTableNum = 10
$sleepTimer = 3000
$db = db
@@ -173,7 +173,7 @@ run_back unique/arbitrator/sync_replica_alterTable_background_add.sim
print ============== step6: check result
#in background.sim, add one column and insert 200 rows
-$totalRows = $totalRows + 200
+$totalRows = $totalRows + 36
$cnt = 0
wait_table_altered:
diff --git a/tests/script/unique/arbitrator/sync_replica3_alterTable_drop.sim b/tests/script/unique/arbitrator/sync_replica3_alterTable_drop.sim
index 2c0a9f8747..6593d6933b 100644
--- a/tests/script/unique/arbitrator/sync_replica3_alterTable_drop.sim
+++ b/tests/script/unique/arbitrator/sync_replica3_alterTable_drop.sim
@@ -56,7 +56,7 @@ sql create dnode $hostname3
sql create dnode $hostname4
sleep 3000
-$totalTableNum = 100
+$totalTableNum = 10
$sleepTimer = 3000
$db = db
@@ -172,8 +172,8 @@ system sh/exec.sh -n dnode4 -s start
run_back unique/arbitrator/sync_replica_alterTable_background_drop.sim
print ============== step6: check result
-#in background.sim, drop one column and add one new column, then insert 200 rows
-$totalRows = $totalRows + 200
+#in background.sim, drop one column and add one new column, then insert 36 rows
+$totalRows = $totalRows + 36
$cnt = 0
wait_table_altered:
diff --git a/tests/script/unique/arbitrator/sync_replica3_dropDb.sim b/tests/script/unique/arbitrator/sync_replica3_dropDb.sim
index e73291d6f3..7099b1dc8e 100644
--- a/tests/script/unique/arbitrator/sync_replica3_dropDb.sim
+++ b/tests/script/unique/arbitrator/sync_replica3_dropDb.sim
@@ -56,7 +56,7 @@ sql create dnode $hostname3
sql create dnode $hostname4
sleep 3000
-$totalTableNum = 100
+$totalTableNum = 10
$sleepTimer = 3000
$db = db
diff --git a/tests/script/unique/arbitrator/sync_replica3_dropTable.sim b/tests/script/unique/arbitrator/sync_replica3_dropTable.sim
index 20d157eeb5..f902b41de5 100644
--- a/tests/script/unique/arbitrator/sync_replica3_dropTable.sim
+++ b/tests/script/unique/arbitrator/sync_replica3_dropTable.sim
@@ -56,7 +56,7 @@ sql create dnode $hostname3
sql create dnode $hostname4
sleep 3000
-$totalTableNum = 100
+$totalTableNum = 10
$sleepTimer = 3000
$db = db
@@ -143,7 +143,7 @@ if $dnode3Vtatus != master then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
-
+sleep 2000
print ============== step4: insert more data rows
$tsStart = $tsEnd + 1000
$i = 0
@@ -173,7 +173,7 @@ run_back unique/arbitrator/sync_replica_dropTable_background.sim
print ============== step6: check result
#in background.sim, drop 10 tables
-$totalRows = $totalRows - 10800
+$totalRows = $totalRows - 5400
$cnt = 0
wait_table_dropped:
@@ -188,7 +188,7 @@ if $data00 != $totalRows then
goto wait_table_dropped
endi
-$tblNum = $tblNum - 10
+$tblNum = $tblNum - 5
sql select count(tbname) from $stb
if $data00 != $tblNum then
print data00: $data00
diff --git a/tests/script/unique/arbitrator/sync_replica_alterTable_background_add.sim b/tests/script/unique/arbitrator/sync_replica_alterTable_background_add.sim
index c5f9157e26..3867aa3699 100644
--- a/tests/script/unique/arbitrator/sync_replica_alterTable_background_add.sim
+++ b/tests/script/unique/arbitrator/sync_replica_alterTable_background_add.sim
@@ -3,7 +3,7 @@ sql connect
$db = db
$stb = stb
print =============== sync_replica_alterTable_background_add.sim step0: alter table and insert data
-$totalTableNum = 100
+$totalTableNum = 10
sql use $db
@@ -12,7 +12,7 @@ print alter table $stb add column f1 float
sql alter table $stb add column f1 float
$tblNum = $totalTableNum
-$alterTblNum = 51
+$alterTblNum = 10
$i = 1
while $i < $alterTblNum
diff --git a/tests/script/unique/arbitrator/sync_replica_alterTable_background_drop.sim b/tests/script/unique/arbitrator/sync_replica_alterTable_background_drop.sim
index 5540eb96df..fb8bc60972 100644
--- a/tests/script/unique/arbitrator/sync_replica_alterTable_background_drop.sim
+++ b/tests/script/unique/arbitrator/sync_replica_alterTable_background_drop.sim
@@ -8,11 +8,11 @@ $totalTableNum = 100
sql use $db
#sql create table $stb (ts timestamp, c1 int) tags(t1 int)
-sql alter table $stb drop column c1
sql alter table $stb add column f1 double
+sql alter table $stb drop column c1
$tblNum = $totalTableNum
-$alterTblNum = 51
+$alterTblNum = 10
$i = 1
while $i < $alterTblNum
diff --git a/tests/script/unique/arbitrator/sync_replica_dropTable_background.sim b/tests/script/unique/arbitrator/sync_replica_dropTable_background.sim
index a52cecaa8e..485253027a 100644
--- a/tests/script/unique/arbitrator/sync_replica_dropTable_background.sim
+++ b/tests/script/unique/arbitrator/sync_replica_dropTable_background.sim
@@ -3,12 +3,12 @@ sql connect
$db = db
$stb = stb
print =============== sync_replica_dropTable_background.sim step0: drop table
-$totalTableNum = 100
+$totalTableNum = 6
sql use $db
$tblNum = $totalTableNum
-$dropTblNum = 11
+$dropTblNum = 6
$i = 1
while $i < $dropTblNum
diff --git a/tests/script/unique/arbitrator/testSuite.sim b/tests/script/unique/arbitrator/testSuite.sim
new file mode 100644
index 0000000000..d286ecfaf2
--- /dev/null
+++ b/tests/script/unique/arbitrator/testSuite.sim
@@ -0,0 +1,36 @@
+run unique/arbitrator/dn2_mn1_cache_file_sync.sim
+run unique/arbitrator/dn2_mn1_cache_file_sync_second.sim
+run unique/arbitrator/dn3_mn1_full_createTableFail.sim
+run unique/arbitrator/dn3_mn1_full_dropDnodeFail.sim
+run unique/arbitrator/dn3_mn1_multiCreateDropTable.sim
+run unique/arbitrator/dn3_mn1_nw_disable_timeout_autoDropDnode.sim
+run unique/arbitrator/dn3_mn1_replica2_wal1_AddDelDnode.sim
+run unique/arbitrator/dn3_mn1_replica_change_dropDnod.sim
+run unique/arbitrator/dn3_mn1_replica_change.sim
+run unique/arbitrator/dn3_mn1_stopDnode_timeout.sim
+run unique/arbitrator/dn3_mn1_vnode_change.sim
+run unique/arbitrator/dn3_mn1_vnode_corruptFile_offline.sim
+run unique/arbitrator/dn3_mn1_vnode_corruptFile_online.sim
+####run unique/arbitrator/dn3_mn1_vnode_delDir.sim
+run unique/arbitrator/dn3_mn1_vnode_nomaster.sim
+run unique/arbitrator/dn3_mn2_killDnode.sim
+run unique/arbitrator/insert_duplicationTs.sim
+run unique/arbitrator/offline_replica2_alterTable_online.sim
+run unique/arbitrator/offline_replica2_alterTag_online.sim
+run unique/arbitrator/offline_replica2_createTable_online.sim
+run unique/arbitrator/offline_replica2_dropDb_online.sim
+run unique/arbitrator/offline_replica2_dropTable_online.sim
+run unique/arbitrator/offline_replica3_alterTable_online.sim
+run unique/arbitrator/offline_replica3_alterTag_online.sim
+run unique/arbitrator/offline_replica3_createTable_online.sim
+run unique/arbitrator/offline_replica3_dropDb_online.sim
+run unique/arbitrator/offline_replica3_dropTable_online.sim
+run unique/arbitrator/replica_changeWithArbitrator.sim
+run unique/arbitrator/sync_replica2_alterTable_add.sim
+run unique/arbitrator/sync_replica2_alterTable_drop.sim
+run unique/arbitrator/sync_replica2_dropDb.sim
+run unique/arbitrator/sync_replica2_dropTable.sim
+run unique/arbitrator/sync_replica3_alterTable_add.sim
+run unique/arbitrator/sync_replica3_alterTable_drop.sim
+run unique/arbitrator/sync_replica3_dropDb.sim
+run unique/arbitrator/sync_replica3_dropTable.sim
diff --git a/tests/script/unique/column/replica3.sim b/tests/script/unique/column/replica3.sim
index c21f71dc2c..2d6c194ef8 100644
--- a/tests/script/unique/column/replica3.sim
+++ b/tests/script/unique/column/replica3.sim
@@ -29,8 +29,8 @@ while $x < 1010
$x = $x + 1
endw
-sql_error create database d1 replica 2 wal 0
-sql create database d2 replica 1 wal 0
+sql_error create database d1 replica 2 wallevel 0
+sql_error create database d2 replica 1 wallevel 0
sql_error alter database d2 replica 2
system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/unique/db/replica_add12.sim b/tests/script/unique/db/replica_add12.sim
index ff0dffe0e2..89502ce419 100644
--- a/tests/script/unique/db/replica_add12.sim
+++ b/tests/script/unique/db/replica_add12.sim
@@ -148,25 +148,10 @@ print ========= step5
system sh/exec.sh -n dnode2 -s stop -x SIGINT
sleep 5000
-sql select * from d1.t1
-if $rows != 2 then
- return -1
-endi
-
-sql select * from d2.t2
-if $rows != 2 then
- return -1
-endi
-
-sql select * from d3.t3
-if $rows != 2 then
- return -1
-endi
-
-sql select * from d4.t4
-if $rows != 2 then
- return -1
-endi
+sql_error select * from d1.t1
+sql_error select * from d2.t2
+sql_error select * from d3.t3
+sql_error select * from d4.t4
print ===== insert data
@@ -175,26 +160,6 @@ sql_error insert into d2.t2 values(now, 3)
sql_error insert into d3.t3 values(now, 3)
sql_error insert into d4.t4 values(now, 3)
-sql select * from d1.t1
-if $rows != 2 then
- return -1
-endi
-
-sql select * from d2.t2
-if $rows != 2 then
- return -1
-endi
-
-sql select * from d3.t3
-if $rows != 2 then
- return -1
-endi
-
-sql select * from d4.t4
-if $rows != 2 then
- return -1
-endi
-
print ========= step6
system sh/exec.sh -n dnode2 -s start
sleep 5000
@@ -234,25 +199,10 @@ sql_error insert into d2.t2 values(now, 3)
sql_error insert into d3.t3 values(now, 3)
sql_error insert into d4.t4 values(now, 3)
-sql select * from d1.t1
-if $rows != 3 then
- return -1
-endi
-
-sql select * from d2.t2
-if $rows != 3 then
- return -1
-endi
-
-sql select * from d3.t3
-if $rows != 3 then
- return -1
-endi
-
-sql select * from d4.t4
-if $rows != 3 then
- return -1
-endi
+sql_error select * from d1.t1
+sql_error select * from d2.t2
+sql_error select * from d3.t3
+sql_error select * from d4.t4
print ========= step7
system sh/exec.sh -n dnode3 -s start
diff --git a/tests/script/unique/db/replica_reduce32.sim b/tests/script/unique/db/replica_reduce32.sim
index bc08911a7c..5516009369 100644
--- a/tests/script/unique/db/replica_reduce32.sim
+++ b/tests/script/unique/db/replica_reduce32.sim
@@ -102,19 +102,15 @@ print ========= step4
system sh/exec.sh -n dnode2 -s stop -x SIGINT
sleep 5000
-sql insert into d1.t1 values(now, 3) -x step1
-step1:
-sql insert into d2.t2 values(now, 3) -x step2
-step2:
-sql insert into d3.t3 values(now, 3) -x step3
-step3:
-sql insert into d4.t4 values(now, 3) -x step4
-step4:
+sql_error insert into d1.t1 values(now, 3)
+sql_error insert into d2.t2 values(now, 3)
+sql_error insert into d3.t3 values(now, 3)
+sql_error insert into d4.t4 values(now, 3)
-sql select * from d1.t1
-sql select * from d2.t2
-sql select * from d3.t3
-sql select * from d4.t4
+sql_error select * from d1.t1
+sql_error select * from d2.t2
+sql_error select * from d3.t3
+sql_error select * from d4.t4
print ========= step5
system sh/exec.sh -n dnode2 -s start
@@ -126,14 +122,10 @@ sleep 5000
sql reset query cache
sleep 1000
-sql insert into d1.t1 values(now, 3) -x step11
-step11:
-sql insert into d2.t2 values(now, 3) -x step21
-step21:
-sql insert into d3.t3 values(now, 3) -x step31
-step31:
-sql insert into d4.t4 values(now, 3) -x step41
-step41:
+sql_error insert into d1.t1 values(now, 3)
+sql_error insert into d2.t2 values(now, 3)
+sql_error insert into d3.t3 values(now, 3)
+sql_error insert into d4.t4 values(now, 3)
print ========= step6
system sh/exec.sh -n dnode3 -s start
diff --git a/tests/script/unique/http/opentsdb.sim b/tests/script/unique/http/opentsdb.sim
index 61ce5127b4..58f6609d15 100644
--- a/tests/script/unique/http/opentsdb.sim
+++ b/tests/script/unique/http/opentsdb.sim
@@ -125,7 +125,7 @@ endi
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","group1": "1","group1": "1","group1": "1","group1": "1","dc": "lga"}}]' 127.0.0.1:6020/opentsdb/db/put
print $system_content
-if $system_content != @{"errors":[{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbbbbbb","table":"sys_cpu_d_bbbbbbb_lga_1_1_1_1_1_web01","timestamp":1346846400,"value":18.000000,"tags":{"dc":"lga","group1":"1","group1":"1","group1":"1","group1":"1","group1":"1","host":"web01"},"status":"error","code":-2147483135}}],"failed":1,"success":0,"affected_rows":0}@ then
+if $system_content != @{"errors":[{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbbbbbb","table":"sys_cpu_d_bbbbbbb_lga_1_1_1_1_1_web01","timestamp":1346846400,"value":18.000000,"tags":{"dc":"lga","group1":"1","group1":"1","group1":"1","group1":"1","group1":"1","host":"web01"},"status":"error","code":-2147482782}}],"failed":1,"success":0,"affected_rows":0}@ then
return -1
endi
diff --git a/tests/script/unique/stable/dnode2_stop.sim b/tests/script/unique/stable/dnode2_stop.sim
index 0a801d9c7e..cb7df5a3cf 100644
--- a/tests/script/unique/stable/dnode2_stop.sim
+++ b/tests/script/unique/stable/dnode2_stop.sim
@@ -74,6 +74,7 @@ endi
sleep 100
system sh/exec.sh -n dnode2 -s stop -x SIGINT
+sleep 5000
print =============== step2
sql select count(*) from $mt -x step2
@@ -85,7 +86,7 @@ sql select count(tbcol) from $mt -x step21
step21:
system sh/exec.sh -n dnode2 -s start
-sleep 10000
+sleep 5000
print =============== step3
sql select count(tbcol) as c from $mt where ts <= 1519833840000