diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h
index b99a8a46d0..f687d7f244 100644
--- a/src/client/inc/tscUtil.h
+++ b/src/client/inc/tscUtil.h
@@ -176,8 +176,6 @@ SColumn* tscColumnListInsert(SArray* pColList, SColumnIndex* colIndex);
SArray* tscColumnListClone(const SArray* src, int16_t tableIndex);
void tscColumnListDestroy(SArray* pColList);
-SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numOfFilters);
-
int32_t tscValidateName(SSQLToken* pToken);
void tscIncStreamExecutionCount(void* pStream);
diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h
index 6ce94d5aa4..c8754e5beb 100644
--- a/src/client/inc/tsclient.h
+++ b/src/client/inc/tsclient.h
@@ -32,8 +32,8 @@ extern "C" {
#include "qExecutor.h"
#include "qsqlparser.h"
-#include "qsqltype.h"
#include "qtsbuf.h"
+#include "tcmdtype.h"
// forward declaration
struct SSqlInfo;
@@ -395,7 +395,6 @@ TAOS *taos_connect_a(char *ip, char *user, char *pass, char *db, uint16_t port,
void *param, void **taos);
void waitForQueryRsp(void *param, TAOS_RES *tres, int code) ;
-int doAsyncParseSql(SSqlObj* pSql);
void doAsyncQuery(STscObj *pObj, SSqlObj *pSql, void (*fp)(), void *param, const char *sqlstr, size_t sqlLen);
void tscProcessMultiVnodesImportFromFile(SSqlObj *pSql);
@@ -403,13 +402,14 @@ void tscKillSTableQuery(SSqlObj *pSql);
void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen);
bool tscIsUpdateQuery(SSqlObj* pSql);
bool tscHasReachLimitation(SQueryInfo *pQueryInfo, SSqlRes *pRes);
+
+// todo remove this function.
bool tscResultsetFetchCompleted(TAOS_RES *result);
char *tscGetErrorMsgPayload(SSqlCmd *pCmd);
int32_t tscInvalidSQLErrMsg(char *msg, const char *additionalInfo, const char *sql);
-void tscQueueAsyncFreeResult(SSqlObj *pSql);
int32_t tscToSQLCmd(SSqlObj *pSql, struct SSqlInfo *pInfo);
void tscGetResultColumnChr(SSqlRes *pRes, SFieldInfo* pFieldInfo, int32_t column);
diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c
index 2de45bcc6e..3fed3e4d67 100644
--- a/src/client/src/tscAsync.c
+++ b/src/client/src/tscAsync.c
@@ -213,27 +213,34 @@ void taos_fetch_rows_a(TAOS_RES *taosa, void (*fp)(void *, TAOS_RES *, int), voi
// handle the sub queries of join query
if (pCmd->command == TSDB_SQL_TABLE_JOIN_RETRIEVE) {
tscFetchDatablockFromSubquery(pSql);
- } else if (pRes->completed && pCmd->command == TSDB_SQL_FETCH) {
- if (hasMoreVnodesToTry(pSql)) { // sequentially retrieve data from remain vnodes.
- tscTryQueryNextVnode(pSql, tscAsyncQueryRowsForNextVnode);
- return;
- } else {
- /*
+ } else if (pRes->completed) {
+ if(pCmd->command == TSDB_SQL_FETCH) {
+ if (hasMoreVnodesToTry(pSql)) { // sequentially retrieve data from remain vnodes.
+ tscTryQueryNextVnode(pSql, tscAsyncQueryRowsForNextVnode);
+ return;
+ } else {
+ /*
* all available virtual node has been checked already, now we need to check
* for the next subclause queries
- */
- if (pCmd->clauseIndex < pCmd->numOfClause - 1) {
- tscTryQueryNextClause(pSql, tscAsyncQueryRowsForNextVnode);
- return;
- }
-
- /*
+ */
+ if (pCmd->clauseIndex < pCmd->numOfClause - 1) {
+ tscTryQueryNextClause(pSql, tscAsyncQueryRowsForNextVnode);
+ return;
+ }
+
+ /*
* 1. has reach the limitation
* 2. no remain virtual nodes to be retrieved anymore
- */
+ */
+ (*pSql->fetchFp)(param, pSql, 0);
+ }
+ return;
+ } else if (pCmd->command == TSDB_SQL_RETRIEVE || pCmd->command == TSDB_SQL_RETRIEVE_LOCALMERGE) {
+ // in case of show command, return no data
(*pSql->fetchFp)(param, pSql, 0);
+ } else {
+ assert(0);
}
- return;
} else { // current query is not completed, continue retrieve from node
if (pCmd->command != TSDB_SQL_RETRIEVE_LOCALMERGE && pCmd->command < TSDB_SQL_LOCAL) {
pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH;
@@ -405,17 +412,6 @@ void tscProcessAsyncFree(SSchedMsg *pMsg) {
taos_free_result(pSql);
}
-void tscQueueAsyncFreeResult(SSqlObj *pSql) {
- tscDebug("%p sqlObj put in queue to async free", pSql);
-
- SSchedMsg schedMsg = { 0 };
- schedMsg.fp = tscProcessAsyncFree;
- schedMsg.ahandle = pSql;
- schedMsg.thandle = (void *)1;
- schedMsg.msg = NULL;
- taosScheduleTask(tscQhandle, &schedMsg);
-}
-
int tscSendMsgToServer(SSqlObj *pSql);
void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c
index 83700ce0a5..1d66fb0467 100644
--- a/src/client/src/tscLocal.c
+++ b/src/client/src/tscLocal.c
@@ -406,7 +406,7 @@ int tscProcessLocalCmd(SSqlObj *pSql) {
pSql->res.qhandle = 0x1;
pSql->res.numOfRows = 0;
} else if (pCmd->command == TSDB_SQL_RESET_CACHE) {
- taosCacheEmpty(tscCacheHandle);
+ taosCacheEmpty(tscCacheHandle,false);
} else if (pCmd->command == TSDB_SQL_SERV_VERSION) {
tscProcessServerVer(pSql);
} else if (pCmd->command == TSDB_SQL_CLI_VERSION) {
diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c
index 9f557f5529..2b325afa7c 100644
--- a/src/client/src/tscSQLParser.c
+++ b/src/client/src/tscSQLParser.c
@@ -62,34 +62,34 @@ static int32_t setObjFullName(char* fullName, const char* account, SSQLToken* pD
static void getColumnName(tSQLExprItem* pItem, char* resultFieldName, int32_t nameLength);
static void getRevisedName(char* resultFieldName, int32_t functionId, int32_t maxLen, char* columnName);
-static int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExprItem* pItem, bool finalResult);
+static int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExprItem* pItem, bool finalResult);
static int32_t insertResultField(SQueryInfo* pQueryInfo, int32_t outputIndex, SColumnList* pIdList, int16_t bytes,
int8_t type, char* fieldName, SSqlExpr* pSqlExpr);
static int32_t changeFunctionID(int32_t optr, int16_t* functionId);
static int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSelection, bool isSTable);
static bool validateIpAddress(const char* ip, size_t size);
-static bool hasUnsupportFunctionsForSTableQuery(SQueryInfo* pQueryInfo);
+static bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo);
static bool functionCompatibleCheck(SQueryInfo* pQueryInfo);
static void setColumnOffsetValueInResultset(SQueryInfo* pQueryInfo);
static int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, tVariantList* pList, SSqlCmd* pCmd);
-static int32_t parseIntervalClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql);
-static int32_t parseSlidingClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql);
+static int32_t parseIntervalClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql);
+static int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql);
-static int32_t addProjectionExprAndResultField(SQueryInfo* pQueryInfo, tSQLExprItem* pItem);
+static int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExprItem* pItem);
static int32_t parseWhereClause(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, SSqlObj* pSql);
-static int32_t parseFillClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySQL);
-static int32_t parseOrderbyClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql, SSchema* pSchema);
+static int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySQL);
+static int32_t parseOrderbyClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql, SSchema* pSchema);
-static int32_t tsRewriteFieldNameIfNecessary(SQueryInfo* pQueryInfo);
+static int32_t tsRewriteFieldNameIfNecessary(SSqlCmd* pCmd, SQueryInfo* pQueryInfo);
static int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo);
-static int32_t validateSqlFunctionInStreamSql(SQueryInfo* pQueryInfo);
+static int32_t validateSqlFunctionInStreamSql(SSqlCmd* pCmd, SQueryInfo* pQueryInfo);
static int32_t buildArithmeticExprString(tSQLExpr* pExpr, char** exprString);
-static int32_t validateFunctionsInIntervalOrGroupbyQuery(SQueryInfo* pQueryInfo);
-static int32_t validateArithmeticSQLExpr(tSQLExpr* pExpr, SQueryInfo* pQueryInfo, SColumnList* pList, int32_t* type);
+static int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo);
+static int32_t validateArithmeticSQLExpr(SSqlCmd* pCmd, tSQLExpr* pExpr, SQueryInfo* pQueryInfo, SColumnList* pList, int32_t* type);
static int32_t validateDNodeConfig(tDCLSQL* pOptions);
static int32_t validateLocalConfig(tDCLSQL* pOptions);
static int32_t validateColumnName(char* name);
@@ -98,15 +98,15 @@ static int32_t setKillInfo(SSqlObj* pSql, struct SSqlInfo* pInfo, int32_t killTy
static bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField);
static bool hasTimestampForPointInterpQuery(SQueryInfo* pQueryInfo);
-static int32_t parseLimitClause(SQueryInfo* pQueryInfo, int32_t index, SQuerySQL* pQuerySql, SSqlObj* pSql);
+static int32_t parseLimitClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t index, SQuerySQL* pQuerySql, SSqlObj* pSql);
static int32_t parseCreateDBOptions(SSqlCmd* pCmd, SCreateDBInfo* pCreateDbSql);
-static int32_t getColumnIndexByName(const SSQLToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex);
+static int32_t getColumnIndexByName(SSqlCmd* pCmd, const SSQLToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex);
static int32_t getTableIndexByName(SSQLToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex);
static int32_t optrToString(tSQLExpr* pExpr, char** exprString);
static int32_t getTableIndexImpl(SSQLToken* pTableToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex);
static int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo);
-static int32_t doLocalQueryProcess(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql);
+static int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql);
static int32_t tscCheckCreateDbParams(SSqlCmd* pCmd, SCMCreateDbMsg* pCreate);
static SColumnList getColumnList(int32_t num, int16_t tableIndex, int32_t columnIndex);
@@ -115,7 +115,7 @@ static int32_t doCheckForCreateTable(SSqlObj* pSql, int32_t subClauseIndex, SSql
static int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo);
static int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo);
static int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index);
-static int32_t exprTreeFromSqlExpr(tExprNode **pExpr, const tSQLExpr* pSqlExpr, SArray* pExprInfo, SQueryInfo* pQueryInfo, SArray* pCols);
+static int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSQLExpr* pSqlExpr, SArray* pExprInfo, SQueryInfo* pQueryInfo, SArray* pCols);
/*
* Used during parsing query sql. Since the query sql usually small in length, error position
@@ -125,7 +125,7 @@ static int32_t invalidSqlErrMsg(char* dstBuffer, const char* errMsg) {
return tscInvalidSQLErrMsg(dstBuffer, errMsg, NULL);
}
-static int setColumnFilterInfoForTimestamp(SQueryInfo* pQueryInfo, tVariant* pVar) {
+static int setColumnFilterInfoForTimestamp(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tVariant* pVar) {
int64_t time = 0;
const char* msg = "invalid timestamp";
@@ -137,11 +137,11 @@ static int setColumnFilterInfoForTimestamp(SQueryInfo* pQueryInfo, tVariant* pVa
if (seg != NULL) {
if (taosParseTime(pVar->pz, &time, pVar->nLen, tinfo.precision, tsDaylight) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
}
} else {
if (tVariantDump(pVar, (char*)&time, TSDB_DATA_TYPE_BIGINT, true)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
}
}
@@ -563,7 +563,7 @@ static bool isTopBottomQuery(SQueryInfo* pQueryInfo) {
return false;
}
-int32_t parseIntervalClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) {
+int32_t parseIntervalClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) {
const char* msg1 = "invalid query expression";
const char* msg2 = "interval cannot be less than 10 ms";
@@ -590,12 +590,12 @@ int32_t parseIntervalClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) {
// interval cannot be less than 10 milliseconds
if (pQueryInfo->intervalTime < tsMinIntervalTime) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
// for top/bottom + interval query, we do not add additional timestamp column in the front
if (isTopBottomQuery(pQueryInfo)) {
- if (parseSlidingClause(pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) {
+ if (parseSlidingClause(pCmd, pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -610,7 +610,7 @@ int32_t parseIntervalClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) {
for (int32_t i = 0; i < size; ++i) {
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
if (pExpr->functionId == TSDB_FUNC_COUNT && TSDB_COL_IS_TAG(pExpr->colInfo.flag)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
}
@@ -619,7 +619,7 @@ int32_t parseIntervalClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) {
* select tbname, tags_fields from super_table_name interval(1s)
*/
if (tscQueryTags(pQueryInfo) && pQueryInfo->intervalTime > 0) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
// need to add timestamp column in result set, if interval is existed
@@ -644,14 +644,14 @@ int32_t parseIntervalClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) {
SColumnIndex index = {tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX};
tscAddSpecialColumnForSelect(pQueryInfo, 0, TSDB_FUNC_TS, &index, &s, TSDB_COL_NORMAL);
- if (parseSlidingClause(pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) {
+ if (parseSlidingClause(pCmd, pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
return TSDB_CODE_SUCCESS;
}
-int32_t parseSlidingClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) {
+int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) {
const char* msg0 = "sliding value too small";
const char* msg1 = "sliding value no larger than the interval value";
@@ -666,11 +666,11 @@ int32_t parseSlidingClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) {
}
if (pQueryInfo->slidingTime < tsMinSlidingTime) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg0);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
}
if (pQueryInfo->slidingTime > pQueryInfo->intervalTime) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
} else {
pQueryInfo->slidingTime = pQueryInfo->intervalTime;
@@ -1124,12 +1124,12 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSel
TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_PROJECTION_QUERY);
// select table_name1.field_name1, table_name2.field_name2 from table_name1, table_name2
- if (addProjectionExprAndResultField(pQueryInfo, pItem) != TSDB_CODE_SUCCESS) {
+ if (addProjectionExprAndResultField(pCmd, pQueryInfo, pItem) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
} else if (pItem->pNode->nSQLOptr >= TK_COUNT && pItem->pNode->nSQLOptr <= TK_TBID) {
// sql function in selection clause, append sql function info in pSqlCmd structure sequentially
- if (addExprAndResultField(pQueryInfo, outputIndex, pItem, true) != TSDB_CODE_SUCCESS) {
+ if (addExprAndResultField(pCmd, pQueryInfo, outputIndex, pItem, true) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -1138,8 +1138,8 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSel
SColumnList columnList = {0};
int32_t arithmeticType = NON_ARITHMEIC_EXPR;
- if (validateArithmeticSQLExpr(pItem->pNode, pQueryInfo, &columnList, &arithmeticType) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ if (validateArithmeticSQLExpr(pCmd, pItem->pNode, pQueryInfo, &columnList, &arithmeticType) != TSDB_CODE_SUCCESS) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
int32_t tableIndex = columnList.ids[0].tableIndex;
@@ -1152,7 +1152,7 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSel
// all columns in arithmetic expression must belong to the same table
for (int32_t f = 1; f < columnList.num; ++f) {
if (columnList.ids[f].tableIndex != tableIndex) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg4);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
}
@@ -1172,10 +1172,10 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSel
tExprNode* pNode = NULL;
SArray* colList = taosArrayInit(10, sizeof(SColIndex));
- int32_t ret = exprTreeFromSqlExpr(&pNode, pItem->pNode, pQueryInfo->exprList, pQueryInfo, colList);
+ int32_t ret = exprTreeFromSqlExpr(pCmd, &pNode, pItem->pNode, pQueryInfo->exprList, pQueryInfo, colList);
if (ret != TSDB_CODE_SUCCESS) {
tExprTreeDestroy(&pNode, NULL);
- return invalidSqlErrMsg(pQueryInfo->msg, "invalid arithmetic expression in select clause");
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "invalid arithmetic expression in select clause");
}
SBufferWriter bw = tbufInitWriter(NULL, false);
@@ -1215,10 +1215,10 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSel
pArithExprInfo->interBytes = sizeof(double);
pArithExprInfo->type = TSDB_DATA_TYPE_DOUBLE;
- int32_t ret = exprTreeFromSqlExpr(&pArithExprInfo->pExpr, pItem->pNode, pQueryInfo->exprList, pQueryInfo, NULL);
+ int32_t ret = exprTreeFromSqlExpr(pCmd, &pArithExprInfo->pExpr, pItem->pNode, pQueryInfo->exprList, pQueryInfo, NULL);
if (ret != TSDB_CODE_SUCCESS) {
tExprTreeDestroy(&pArithExprInfo->pExpr, NULL);
- return invalidSqlErrMsg(pQueryInfo->msg, "invalid expression in select clause");
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "invalid expression in select clause");
}
pInfo->pArithExprInfo = pArithExprInfo;
@@ -1229,7 +1229,7 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSel
* not support such expression
* e.g., select 12+5 from table_name
*/
- return invalidSqlErrMsg(pQueryInfo->msg, msg3);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
if (pQueryInfo->fieldsInfo.numOfOutput > TSDB_MAX_COLUMNS) {
@@ -1238,7 +1238,7 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSel
}
if (!functionCompatibleCheck(pQueryInfo)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
/*
@@ -1248,7 +1248,7 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSel
if (isSTable) {
tscTansformSQLFuncForSTableQuery(pQueryInfo);
- if (hasUnsupportFunctionsForSTableQuery(pQueryInfo)) {
+ if (hasUnsupportFunctionsForSTableQuery(pCmd, pQueryInfo)) {
return TSDB_CODE_TSC_INVALID_SQL;
}
}
@@ -1373,7 +1373,7 @@ static int32_t doAddProjectionExprAndResultFields(SQueryInfo* pQueryInfo, SColum
return numOfTotalColumns;
}
-int32_t addProjectionExprAndResultField(SQueryInfo* pQueryInfo, tSQLExprItem* pItem) {
+int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExprItem* pItem) {
const char* msg0 = "invalid column name";
const char* msg1 = "tag for normal table query is not allowed";
@@ -1382,7 +1382,7 @@ int32_t addProjectionExprAndResultField(SQueryInfo* pQueryInfo, tSQLExprItem* pI
if (pItem->pNode->nSQLOptr == TK_ALL) { // project on all fields
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
if (getTableIndexByName(&pItem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg0);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
}
// all meters columns are required
@@ -1398,8 +1398,8 @@ int32_t addProjectionExprAndResultField(SQueryInfo* pQueryInfo, tSQLExprItem* pI
} else if (pItem->pNode->nSQLOptr == TK_ID) { // simple column projection query
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(&pItem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg0);
+ if (getColumnIndexByName(pCmd, &pItem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
}
if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
@@ -1410,7 +1410,7 @@ int32_t addProjectionExprAndResultField(SQueryInfo* pQueryInfo, tSQLExprItem* pI
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
if (index.columnIndex >= tscGetNumOfColumns(pTableMeta) && UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
addProjectQueryCol(pQueryInfo, startPos, &index, pItem);
@@ -1422,7 +1422,7 @@ int32_t addProjectionExprAndResultField(SQueryInfo* pQueryInfo, tSQLExprItem* pI
return TSDB_CODE_SUCCESS;
}
-static int32_t setExprInfoForFunctions(SQueryInfo* pQueryInfo, SSchema* pSchema, int32_t functionID, char* aliasName,
+static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSchema* pSchema, int32_t functionID, char* aliasName,
int32_t resColIdx, SColumnIndex* pColIndex) {
int16_t type = 0;
int16_t bytes = 0;
@@ -1434,7 +1434,7 @@ static int32_t setExprInfoForFunctions(SQueryInfo* pQueryInfo, SSchema* pSchema,
if (pSchema[pColIndex->columnIndex].type == TSDB_DATA_TYPE_BINARY ||
pSchema[pColIndex->columnIndex].type == TSDB_DATA_TYPE_NCHAR ||
pSchema[pColIndex->columnIndex].type == TSDB_DATA_TYPE_BOOL) {
- invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
return -1;
} else {
type = TSDB_DATA_TYPE_DOUBLE;
@@ -1471,7 +1471,7 @@ static int32_t setExprInfoForFunctions(SQueryInfo* pQueryInfo, SSchema* pSchema,
return TSDB_CODE_SUCCESS;
}
-int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExprItem* pItem, bool finalResult) {
+int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExprItem* pItem, bool finalResult) {
STableMetaInfo* pTableMetaInfo = NULL;
int32_t optr = pItem->pNode->nSQLOptr;
@@ -1489,7 +1489,7 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
case TK_COUNT: {
if (pItem->pNode->pParam != NULL && pItem->pNode->pParam->nExpr != 1) {
/* more than one parameter for count() function */
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
int16_t functionID = 0;
@@ -1503,7 +1503,7 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
if (pItem->pNode->pParam != NULL) {
SSQLToken* pToken = &pItem->pNode->pParam->a[0].pNode->colInfo;
if (pToken->z == NULL || pToken->n == 0) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg3);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
tSQLExprItem* pParamElem = &pItem->pNode->pParam->a[0];
@@ -1513,7 +1513,7 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
SSQLToken tmpToken = pParamElem->pNode->colInfo;
if (getTableIndexByName(&tmpToken, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg4);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX};
@@ -1521,8 +1521,8 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, size, false);
} else {
// count the number of meters created according to the super table
- if (getColumnIndexByName(pToken, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg3);
+ if (getColumnIndexByName(pCmd, pToken, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
@@ -1583,18 +1583,18 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
if (pItem->pNode->pParam == NULL || (optr != TK_LEASTSQUARES && pItem->pNode->pParam->nExpr != 1) ||
(optr == TK_LEASTSQUARES && pItem->pNode->pParam->nExpr != 3)) {
/* no parameters or more than one parameter for function */
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
tSQLExprItem* pParamElem = &(pItem->pNode->pParam->a[0]);
if (pParamElem->pNode->nSQLOptr != TK_ALL && pParamElem->pNode->nSQLOptr != TK_ID) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if ((getColumnIndexByName(&pParamElem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) ||
+ if ((getColumnIndexByName(pCmd, &pParamElem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) ||
index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg3);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
// 2. check if sql function can be applied on this column data type
@@ -1603,7 +1603,7 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
int16_t colType = pSchema->type;
if (colType <= TSDB_DATA_TYPE_BOOL || colType >= TSDB_DATA_TYPE_BINARY) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
int16_t resultType = 0;
@@ -1633,7 +1633,7 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
// functions can not be applied to tags
if (index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg6);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, resultType, resultSize, resultSize, false);
@@ -1685,23 +1685,23 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
int16_t functionID = 0;
if (changeFunctionID(optr, &functionID) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg9);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg9);
}
if (!requireAllFields) {
if (pItem->pNode->pParam->nExpr < 1) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg3);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
if (pItem->pNode->pParam->nExpr > 1 && (pItem->aliasName != NULL && strlen(pItem->aliasName) > 0)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg8);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg8);
}
/* in first/last function, multiple columns can be add to resultset */
for (int32_t i = 0; i < pItem->pNode->pParam->nExpr; ++i) {
tSQLExprItem* pParamElem = &(pItem->pNode->pParam->a[i]);
if (pParamElem->pNode->nSQLOptr != TK_ALL && pParamElem->pNode->nSQLOptr != TK_ID) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg3);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
@@ -1711,7 +1711,7 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
SSQLToken tmpToken = pParamElem->pNode->colInfo;
if (getTableIndexByName(&tmpToken, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg4);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
@@ -1719,14 +1719,14 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
for (int32_t j = 0; j < tscGetNumOfColumns(pTableMetaInfo->pTableMeta); ++j) {
index.columnIndex = j;
- if (setExprInfoForFunctions(pQueryInfo, pSchema, functionID, pItem->aliasName, colIndex++, &index) != 0) {
+ if (setExprInfoForFunctions(pCmd, pQueryInfo, pSchema, functionID, pItem->aliasName, colIndex++, &index) != 0) {
return TSDB_CODE_TSC_INVALID_SQL;
}
}
} else {
- if (getColumnIndexByName(&pParamElem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg3);
+ if (getColumnIndexByName(pCmd, &pParamElem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
@@ -1734,10 +1734,10 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
// functions can not be applied to tags
if ((index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) || (index.columnIndex < 0)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg6);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
- if (setExprInfoForFunctions(pQueryInfo, pSchema, functionID, pItem->aliasName, colIndex + i, &index) != 0) {
+ if (setExprInfoForFunctions(pCmd, pQueryInfo, pSchema, functionID, pItem->aliasName, colIndex + i, &index) != 0) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -1765,7 +1765,7 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
// multicolumn selection does not support alias name
if (pItem->aliasName != NULL && strlen(pItem->aliasName) > 0) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg8);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg8);
}
for (int32_t j = 0; j < pQueryInfo->numOfTables; ++j) {
@@ -1774,7 +1774,7 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
for (int32_t i = 0; i < tscGetNumOfColumns(pTableMetaInfo->pTableMeta); ++i) {
SColumnIndex index = {.tableIndex = j, .columnIndex = i};
- if (setExprInfoForFunctions(pQueryInfo, pSchema, functionID, pItem->aliasName, colIndex + i + j, &index) !=
+ if (setExprInfoForFunctions(pCmd, pQueryInfo, pSchema, functionID, pItem->aliasName, colIndex + i + j, &index) !=
0) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -1794,17 +1794,17 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
// 1. valid the number of parameters
if (pItem->pNode->pParam == NULL || pItem->pNode->pParam->nExpr != 2) {
/* no parameters or more than one parameter for function */
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
tSQLExprItem* pParamElem = &(pItem->pNode->pParam->a[0]);
if (pParamElem->pNode->nSQLOptr != TK_ID) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(&pParamElem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg3);
+ if (getColumnIndexByName(pCmd, &pParamElem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
@@ -1812,18 +1812,18 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
// functions can not be applied to tags
if (index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg6);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
// 2. valid the column type
int16_t colType = pSchema[index.columnIndex].type;
if (colType == TSDB_DATA_TYPE_BOOL || colType >= TSDB_DATA_TYPE_BINARY) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
// 3. valid the parameters
if (pParamElem[1].pNode->nSQLOptr == TK_ID) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
tVariant* pVariant = &pParamElem[1].pNode->val;
@@ -1839,7 +1839,7 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
double dp = GET_DOUBLE_VAL(val);
if (dp < 0 || dp > TOP_BOTTOM_QUERY_LIMIT) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg5);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
resultSize = sizeof(double);
@@ -1862,7 +1862,7 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
int64_t nTop = GET_INT32_VAL(val);
if (nTop <= 0 || nTop > 100) { // todo use macro
- return invalidSqlErrMsg(pQueryInfo->msg, msg5);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
int16_t functionId = 0;
@@ -1906,19 +1906,19 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
case TK_TBID: {
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg7);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7);
}
// no parameters or more than one parameter for function
if (pItem->pNode->pParam == NULL || pItem->pNode->pParam->nExpr != 1) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
tSQLExpr* pParam = pItem->pNode->pParam->a[0].pNode;
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(&pParam->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg3);
+ if (getColumnIndexByName(pCmd, &pParam->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
@@ -1927,7 +1927,7 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
// functions can not be applied to normal columns
int32_t numOfCols = tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
if (index.columnIndex < numOfCols && index.columnIndex != TSDB_TBNAME_COLUMN_INDEX) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg6);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
if (index.columnIndex > 0) {
@@ -1943,7 +1943,7 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
}
if (colType == TSDB_DATA_TYPE_BOOL) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
tscColumnListInsert(pTableMetaInfo->tagColList, &index);
@@ -2036,7 +2036,7 @@ static int16_t doGetColumnIndex(SQueryInfo* pQueryInfo, int32_t index, SSQLToken
return columnIndex;
}
-int32_t doGetColumnIndexByName(SSQLToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex) {
+int32_t doGetColumnIndexByName(SSqlCmd* pCmd, SSQLToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex) {
const char* msg0 = "ambiguous column name";
const char* msg1 = "invalid column name";
@@ -2052,7 +2052,7 @@ int32_t doGetColumnIndexByName(SSQLToken* pToken, SQueryInfo* pQueryInfo, SColum
if (colIndex != COLUMN_INDEX_INITIAL_VAL) {
if (pIndex->columnIndex != COLUMN_INDEX_INITIAL_VAL) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg0);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
} else {
pIndex->tableIndex = i;
pIndex->columnIndex = colIndex;
@@ -2067,7 +2067,7 @@ int32_t doGetColumnIndexByName(SSQLToken* pToken, SQueryInfo* pQueryInfo, SColum
}
if (pIndex->columnIndex == COLUMN_INDEX_INITIAL_VAL) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
}
@@ -2118,7 +2118,7 @@ int32_t getTableIndexByName(SSQLToken* pToken, SQueryInfo* pQueryInfo, SColumnIn
return TSDB_CODE_SUCCESS;
}
-int32_t getColumnIndexByName(const SSQLToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex) {
+int32_t getColumnIndexByName(SSqlCmd* pCmd, const SSQLToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex) {
if (pQueryInfo->pTableMetaInfo == NULL || pQueryInfo->numOfTables == 0) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -2129,7 +2129,7 @@ int32_t getColumnIndexByName(const SSQLToken* pToken, SQueryInfo* pQueryInfo, SC
return TSDB_CODE_TSC_INVALID_SQL;
}
- return doGetColumnIndexByName(&tmpToken, pQueryInfo, pIndex);
+ return doGetColumnIndexByName(pCmd, &tmpToken, pQueryInfo, pIndex);
}
int32_t changeFunctionID(int32_t optr, int16_t* functionId) {
@@ -2412,7 +2412,7 @@ void tscRestoreSQLFuncForSTableQuery(SQueryInfo* pQueryInfo) {
}
}
-bool hasUnsupportFunctionsForSTableQuery(SQueryInfo* pQueryInfo) {
+bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
const char* msg1 = "TWA not allowed to apply to super table directly";
const char* msg2 = "TWA only support group by tbname for super table query";
const char* msg3 = "function not support for super table query";
@@ -2422,24 +2422,24 @@ bool hasUnsupportFunctionsForSTableQuery(SQueryInfo* pQueryInfo) {
for (int32_t i = 0; i < size; ++i) {
int32_t functionId = tscSqlExprGet(pQueryInfo, i)->functionId;
if ((aAggs[functionId].nStatus & TSDB_FUNCSTATE_STABLE) == 0) {
- invalidSqlErrMsg(pQueryInfo->msg, msg3);
+ invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
return true;
}
}
if (tscIsTWAQuery(pQueryInfo)) {
if (pQueryInfo->groupbyExpr.numOfGroupCols == 0) {
- invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
return true;
}
if (pQueryInfo->groupbyExpr.numOfGroupCols != 1) {
- invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
return true;
} else {
SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, 0);
if (pColIndex->colIndex != TSDB_TBNAME_COLUMN_INDEX) {
- invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
return true;
}
}
@@ -2506,7 +2506,7 @@ int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, tVariantList* pList, SSqlCmd*
pQueryInfo->groupbyExpr.numOfGroupCols = pList->nExpr;
if (pList->nExpr > TSDB_MAX_TAGS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
STableMeta* pTableMeta = NULL;
@@ -2520,8 +2520,8 @@ int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, tVariantList* pList, SSqlCmd*
SSQLToken token = {pVar->nLen, pVar->nType, pVar->pz};
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(&token, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ if (getColumnIndexByName(pCmd, &token, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
tableIndex = index.tableIndex;
@@ -2548,7 +2548,7 @@ int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, tVariantList* pList, SSqlCmd*
if (groupTag) {
if (!UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg9);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg9);
}
int32_t relIndex = index.columnIndex;
@@ -2564,7 +2564,7 @@ int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, tVariantList* pList, SSqlCmd*
} else {
// check if the column type is valid, here only support the bool/tinyint/smallint/bigint group by
if (pSchema->type > TSDB_DATA_TYPE_BINARY) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg8);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg8);
}
tscColumnListInsert(pQueryInfo->colList, &index);
@@ -2574,7 +2574,7 @@ int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, tVariantList* pList, SSqlCmd*
pQueryInfo->groupbyExpr.orderType = TSDB_ORDER_ASC;
if (i == 0 && pList->nExpr > 1) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg7);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7);
}
}
}
@@ -2610,7 +2610,7 @@ static SColumnFilterInfo* addColumnFilterInfo(SColumn* pColumn) {
return pColFilterInfo;
}
-static int32_t doExtractColumnFilterInfo(SQueryInfo* pQueryInfo, SColumnFilterInfo* pColumnFilter,
+static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SColumnFilterInfo* pColumnFilter,
SColumnIndex* columnIndex, tSQLExpr* pExpr) {
const char* msg = "not supported filter condition";
@@ -2625,7 +2625,7 @@ static int32_t doExtractColumnFilterInfo(SQueryInfo* pQueryInfo, SColumnFilterIn
} else if (colType == TSDB_DATA_TYPE_FLOAT || colType == TSDB_DATA_TYPE_DOUBLE) {
colType = TSDB_DATA_TYPE_DOUBLE;
} else if ((colType == TSDB_DATA_TYPE_TIMESTAMP) && (TSDB_DATA_TYPE_BINARY == pRight->val.nType)) {
- int retVal = setColumnFilterInfoForTimestamp(pQueryInfo, &pRight->val);
+ int retVal = setColumnFilterInfoForTimestamp(pCmd, pQueryInfo, &pRight->val);
if (TSDB_CODE_SUCCESS != retVal) {
return retVal;
}
@@ -2675,7 +2675,7 @@ static int32_t doExtractColumnFilterInfo(SQueryInfo* pQueryInfo, SColumnFilterIn
pColumnFilter->lowerRelOptr = TSDB_RELATION_LIKE;
break;
default:
- return invalidSqlErrMsg(pQueryInfo->msg, msg);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
}
return TSDB_CODE_SUCCESS;
@@ -2859,7 +2859,7 @@ enum {
TSQL_EXPR_TBNAME = 3,
};
-static int32_t extractColumnFilterInfo(SQueryInfo* pQueryInfo, SColumnIndex* pIndex, tSQLExpr* pExpr, int32_t sqlOptr) {
+static int32_t extractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SColumnIndex* pIndex, tSQLExpr* pExpr, int32_t sqlOptr) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, pIndex->tableIndex);
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
@@ -2895,22 +2895,22 @@ static int32_t extractColumnFilterInfo(SQueryInfo* pQueryInfo, SColumnIndex* pIn
if (pColFilter->filterstr) {
if (pExpr->nSQLOptr != TK_EQ && pExpr->nSQLOptr != TK_NE && pExpr->nSQLOptr != TK_LIKE) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
} else {
if (pExpr->nSQLOptr == TK_LIKE) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (pSchema->type == TSDB_DATA_TYPE_BOOL) {
if (pExpr->nSQLOptr != TK_EQ && pExpr->nSQLOptr != TK_NE) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg3);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
}
}
pColumn->colIndex = *pIndex;
- return doExtractColumnFilterInfo(pQueryInfo, pColFilter, pIndex, pExpr);
+ return doExtractColumnFilterInfo(pCmd, pQueryInfo, pColFilter, pIndex, pExpr);
}
static void relToString(tSQLExpr* pExpr, char** str) {
@@ -2957,7 +2957,7 @@ static int32_t getTagCondString(tSQLExpr* pExpr, char** str) {
return tSQLExprLeafToString(pExpr, true, str);
}
-static int32_t getTablenameCond(SQueryInfo* pQueryInfo, tSQLExpr* pTableCond, SStringBuilder* sb) {
+static int32_t getTablenameCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr* pTableCond, SStringBuilder* sb) {
const char* msg0 = "invalid table name list";
if (pTableCond == NULL) {
@@ -2980,35 +2980,35 @@ static int32_t getTablenameCond(SQueryInfo* pQueryInfo, tSQLExpr* pTableCond, SS
}
if (ret != TSDB_CODE_SUCCESS) {
- invalidSqlErrMsg(pQueryInfo->msg, msg0);
+ invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
}
return ret;
}
-static int32_t getColumnQueryCondInfo(SQueryInfo* pQueryInfo, tSQLExpr* pExpr, int32_t relOptr) {
+static int32_t getColumnQueryCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr* pExpr, int32_t relOptr) {
if (pExpr == NULL) {
return TSDB_CODE_SUCCESS;
}
if (!isExprDirectParentOfLeaftNode(pExpr)) { // internal node
- int32_t ret = getColumnQueryCondInfo(pQueryInfo, pExpr->pLeft, pExpr->nSQLOptr);
+ int32_t ret = getColumnQueryCondInfo(pCmd, pQueryInfo, pExpr->pLeft, pExpr->nSQLOptr);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
- return getColumnQueryCondInfo(pQueryInfo, pExpr->pRight, pExpr->nSQLOptr);
+ return getColumnQueryCondInfo(pCmd, pQueryInfo, pExpr->pRight, pExpr->nSQLOptr);
} else { // handle leaf node
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(&pExpr->pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(pCmd, &pExpr->pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
- return extractColumnFilterInfo(pQueryInfo, &index, pExpr, relOptr);
+ return extractColumnFilterInfo(pCmd, pQueryInfo, &index, pExpr, relOptr);
}
}
-static int32_t getJoinCondInfo(SQueryInfo* pQueryInfo, tSQLExpr* pExpr) {
+static int32_t getJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr* pExpr) {
const char* msg1 = "invalid join query condition";
const char* msg2 = "join on binary/nchar not supported";
const char* msg3 = "type of join columns must be identical";
@@ -3019,7 +3019,7 @@ static int32_t getJoinCondInfo(SQueryInfo* pQueryInfo, tSQLExpr* pExpr) {
}
if (!isExprDirectParentOfLeaftNode(pExpr)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
STagCond* pTagCond = &pQueryInfo->tagCond;
@@ -3027,8 +3027,8 @@ static int32_t getJoinCondInfo(SQueryInfo* pQueryInfo, tSQLExpr* pExpr) {
SJoinNode* pRight = &pTagCond->joinInfo.right;
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(&pExpr->pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg4);
+ if (getColumnIndexByName(pCmd, &pExpr->pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
@@ -3039,8 +3039,8 @@ static int32_t getJoinCondInfo(SQueryInfo* pQueryInfo, tSQLExpr* pExpr) {
strcpy(pLeft->tableId, pTableMetaInfo->name);
index = (SColumnIndex)COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(&pExpr->pRight->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg4);
+ if (getColumnIndexByName(pCmd, &pExpr->pRight->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
@@ -3051,11 +3051,11 @@ static int32_t getJoinCondInfo(SQueryInfo* pQueryInfo, tSQLExpr* pExpr) {
strcpy(pRight->tableId, pTableMetaInfo->name);
if (pTagSchema1->type != pTagSchema2->type) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg3);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
if (pTagSchema1->type == TSDB_DATA_TYPE_BINARY || pTagSchema1->type == TSDB_DATA_TYPE_NCHAR) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
pTagCond->joinInfo.hasJoin = true;
@@ -3094,7 +3094,7 @@ int32_t buildArithmeticExprString(tSQLExpr* pExpr, char** exprString) {
return TSDB_CODE_SUCCESS;
}
-static int32_t validateSQLExpr(tSQLExpr* pExpr, SQueryInfo* pQueryInfo, SColumnList* pList, int32_t* type) {
+static int32_t validateSQLExpr(SSqlCmd* pCmd, tSQLExpr* pExpr, SQueryInfo* pQueryInfo, SColumnList* pList, int32_t* type) {
if (pExpr->nSQLOptr == TK_ID) {
if (*type == NON_ARITHMEIC_EXPR) {
*type = NORMAL_ARITHMETIC;
@@ -3103,7 +3103,7 @@ static int32_t validateSQLExpr(tSQLExpr* pExpr, SQueryInfo* pQueryInfo, SColumnL
}
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(&pExpr->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(pCmd, &pExpr->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -3131,7 +3131,7 @@ static int32_t validateSQLExpr(tSQLExpr* pExpr, SQueryInfo* pQueryInfo, SColumnL
tSQLExprItem item = {.pNode = pExpr, .aliasName = NULL};
// sql function in selection clause, append sql function info in pSqlCmd structure sequentially
- if (addExprAndResultField(pQueryInfo, outputIndex, &item, false) != TSDB_CODE_SUCCESS) {
+ if (addExprAndResultField(pCmd, pQueryInfo, outputIndex, &item, false) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
}
@@ -3139,19 +3139,19 @@ static int32_t validateSQLExpr(tSQLExpr* pExpr, SQueryInfo* pQueryInfo, SColumnL
return TSDB_CODE_SUCCESS;
}
-static int32_t validateArithmeticSQLExpr(tSQLExpr* pExpr, SQueryInfo* pQueryInfo, SColumnList* pList, int32_t* type) {
+static int32_t validateArithmeticSQLExpr(SSqlCmd* pCmd, tSQLExpr* pExpr, SQueryInfo* pQueryInfo, SColumnList* pList, int32_t* type) {
if (pExpr == NULL) {
return TSDB_CODE_SUCCESS;
}
tSQLExpr* pLeft = pExpr->pLeft;
if (pLeft->nSQLOptr >= TK_PLUS && pLeft->nSQLOptr <= TK_REM) {
- int32_t ret = validateArithmeticSQLExpr(pLeft, pQueryInfo, pList, type);
+ int32_t ret = validateArithmeticSQLExpr(pCmd, pLeft, pQueryInfo, pList, type);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
} else {
- int32_t ret = validateSQLExpr(pLeft, pQueryInfo, pList, type);
+ int32_t ret = validateSQLExpr(pCmd, pLeft, pQueryInfo, pList, type);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
@@ -3159,12 +3159,12 @@ static int32_t validateArithmeticSQLExpr(tSQLExpr* pExpr, SQueryInfo* pQueryInfo
tSQLExpr* pRight = pExpr->pRight;
if (pRight->nSQLOptr >= TK_PLUS && pRight->nSQLOptr <= TK_REM) {
- int32_t ret = validateArithmeticSQLExpr(pRight, pQueryInfo, pList, type);
+ int32_t ret = validateArithmeticSQLExpr(pCmd, pRight, pQueryInfo, pList, type);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
} else {
- int32_t ret = validateSQLExpr(pRight, pQueryInfo, pList, type);
+ int32_t ret = validateSQLExpr(pCmd, pRight, pQueryInfo, pList, type);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
@@ -3243,7 +3243,7 @@ static void exchangeExpr(tSQLExpr* pExpr) {
}
}
-static bool validateJoinExprNode(SQueryInfo* pQueryInfo, tSQLExpr* pExpr, SColumnIndex* pLeftIndex) {
+static bool validateJoinExprNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr* pExpr, SColumnIndex* pLeftIndex) {
const char* msg1 = "illegal column name";
const char* msg2 = "= is expected in join expression";
const char* msg3 = "join column must have same type";
@@ -3257,14 +3257,14 @@ static bool validateJoinExprNode(SQueryInfo* pQueryInfo, tSQLExpr* pExpr, SColum
}
if (pExpr->nSQLOptr != TK_EQ) {
- invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
return false;
}
SColumnIndex rightIndex = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(&pRight->colInfo, pQueryInfo, &rightIndex) != TSDB_CODE_SUCCESS) {
- invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ if (getColumnIndexByName(pCmd, &pRight->colInfo, pQueryInfo, &rightIndex) != TSDB_CODE_SUCCESS) {
+ invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
return false;
}
@@ -3278,16 +3278,16 @@ static bool validateJoinExprNode(SQueryInfo* pQueryInfo, tSQLExpr* pExpr, SColum
int16_t rightType = pRightSchema[rightIndex.columnIndex].type;
if (leftType != rightType) {
- invalidSqlErrMsg(pQueryInfo->msg, msg3);
+ invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
return false;
} else if (pLeftIndex->tableIndex == rightIndex.tableIndex) {
- invalidSqlErrMsg(pQueryInfo->msg, msg4);
+ invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
return false;
}
// table to table/ super table to super table are allowed
if (UTIL_TABLE_IS_SUPER_TABLE(pLeftMeterMeta) != UTIL_TABLE_IS_SUPER_TABLE(pRightMeterMeta)) {
- invalidSqlErrMsg(pQueryInfo->msg, msg5);
+ invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
return false;
}
@@ -3320,8 +3320,8 @@ static int32_t setExprToCond(tSQLExpr** parent, tSQLExpr* pExpr, const char* msg
return TSDB_CODE_SUCCESS;
}
-static int32_t handleExprInQueryCond(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, SCondExpr* pCondExpr, int32_t* type,
- int32_t parentOptr) {
+static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr** pExpr, SCondExpr* pCondExpr,
+ int32_t* type, int32_t parentOptr) {
const char* msg1 = "table query cannot use tags filter";
const char* msg2 = "illegal column name";
const char* msg3 = "only one query time range allowed";
@@ -3337,8 +3337,8 @@ static int32_t handleExprInQueryCond(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, S
int32_t ret = TSDB_CODE_SUCCESS;
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(&pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ if (getColumnIndexByName(pCmd, &pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
assert(isExprDirectParentOfLeaftNode(*pExpr));
@@ -3347,7 +3347,7 @@ static int32_t handleExprInQueryCond(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, S
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
if (index.columnIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX) { // query on time range
- if (!validateJoinExprNode(pQueryInfo, *pExpr, &index)) {
+ if (!validateJoinExprNode(pCmd, pQueryInfo, *pExpr, &index)) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -3370,31 +3370,31 @@ static int32_t handleExprInQueryCond(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, S
} else if (index.columnIndex >= tscGetNumOfColumns(pTableMeta) || index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
// query on tags, check for tag query condition
if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
// check for like expression
if ((*pExpr)->nSQLOptr == TK_LIKE) {
if (pRight->val.nLen > TSDB_PATTERN_STRING_MAX_LEN) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg8);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg8);
}
SSchema* pSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta);
if ((!isTablenameToken(&pLeft->colInfo)) && pSchema[index.columnIndex].type != TSDB_DATA_TYPE_BINARY &&
pSchema[index.columnIndex].type != TSDB_DATA_TYPE_NCHAR) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
}
// in case of in operator, keep it in a seperate attribute
if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
if (!validTableNameOptr(*pExpr)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg7);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7);
}
if (!UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (pCondExpr->pTableCond == NULL) {
@@ -3402,19 +3402,19 @@ static int32_t handleExprInQueryCond(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, S
pCondExpr->relType = parentOptr;
pCondExpr->tableCondIndex = index.tableIndex;
} else {
- return invalidSqlErrMsg(pQueryInfo->msg, msg6);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
*type = TSQL_EXPR_TBNAME;
*pExpr = NULL;
} else {
if (pRight->nSQLOptr == TK_ID) { // join on tag columns for stable query
- if (!validateJoinExprNode(pQueryInfo, *pExpr, &index)) {
+ if (!validateJoinExprNode(pCmd, pQueryInfo, *pExpr, &index)) {
return TSDB_CODE_TSC_INVALID_SQL;
}
if (pCondExpr->pJoinExpr != NULL) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg4);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
pQueryInfo->type |= TSDB_QUERY_TYPE_JOIN_QUERY;
@@ -3433,7 +3433,7 @@ static int32_t handleExprInQueryCond(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, S
*type = TSQL_EXPR_COLUMN;
if (pRight->nSQLOptr == TK_ID) { // other column cannot be served as the join column
- return invalidSqlErrMsg(pQueryInfo->msg, msg5);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
ret = setExprToCond(&pCondExpr->pColumnCond, *pExpr, NULL, parentOptr, pQueryInfo->msg);
@@ -3443,8 +3443,8 @@ static int32_t handleExprInQueryCond(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, S
return ret;
}
-int32_t getQueryCondExpr(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, SCondExpr* pCondExpr, int32_t* type,
- int32_t parentOptr) {
+int32_t getQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr** pExpr, SCondExpr* pCondExpr,
+ int32_t* type, int32_t parentOptr) {
if (pExpr == NULL) {
return TSDB_CODE_SUCCESS;
}
@@ -3462,12 +3462,12 @@ int32_t getQueryCondExpr(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, SCondExpr* pC
int32_t rightType = -1;
if (!isExprDirectParentOfLeaftNode(*pExpr)) {
- int32_t ret = getQueryCondExpr(pQueryInfo, &(*pExpr)->pLeft, pCondExpr, &leftType, (*pExpr)->nSQLOptr);
+ int32_t ret = getQueryCondExpr(pCmd, pQueryInfo, &(*pExpr)->pLeft, pCondExpr, &leftType, (*pExpr)->nSQLOptr);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
- ret = getQueryCondExpr(pQueryInfo, &(*pExpr)->pRight, pCondExpr, &rightType, (*pExpr)->nSQLOptr);
+ ret = getQueryCondExpr(pCmd, pQueryInfo, &(*pExpr)->pRight, pCondExpr, &rightType, (*pExpr)->nSQLOptr);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
@@ -3478,7 +3478,7 @@ int32_t getQueryCondExpr(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, SCondExpr* pC
*/
if (leftType != rightType) {
if ((*pExpr)->nSQLOptr == TK_OR && (leftType + rightType != TSQL_EXPR_TBNAME + TSQL_EXPR_TAG)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
}
@@ -3488,7 +3488,7 @@ int32_t getQueryCondExpr(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, SCondExpr* pC
exchangeExpr(*pExpr);
- return handleExprInQueryCond(pQueryInfo, pExpr, pCondExpr, type, parentOptr);
+ return handleExprInQueryCond(pCmd, pQueryInfo, pExpr, pCondExpr, type, parentOptr);
}
static void doCompactQueryExpr(tSQLExpr** pExpr) {
@@ -3522,12 +3522,12 @@ static void doCompactQueryExpr(tSQLExpr** pExpr) {
}
}
-static void doExtractExprForSTable(tSQLExpr** pExpr, SQueryInfo* pQueryInfo, tSQLExpr** pOut, int32_t tableIndex) {
+static void doExtractExprForSTable(SSqlCmd* pCmd, tSQLExpr** pExpr, SQueryInfo* pQueryInfo, tSQLExpr** pOut, int32_t tableIndex) {
if (isExprDirectParentOfLeaftNode(*pExpr)) {
tSQLExpr* pLeft = (*pExpr)->pLeft;
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(&pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(pCmd, &pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
return;
}
@@ -3544,16 +3544,16 @@ static void doExtractExprForSTable(tSQLExpr** pExpr, SQueryInfo* pQueryInfo, tSQ
} else {
*pOut = tSQLExprCreate(NULL, NULL, (*pExpr)->nSQLOptr);
- doExtractExprForSTable(&(*pExpr)->pLeft, pQueryInfo, &((*pOut)->pLeft), tableIndex);
- doExtractExprForSTable(&(*pExpr)->pRight, pQueryInfo, &((*pOut)->pRight), tableIndex);
+ doExtractExprForSTable(pCmd, &(*pExpr)->pLeft, pQueryInfo, &((*pOut)->pLeft), tableIndex);
+ doExtractExprForSTable(pCmd, &(*pExpr)->pRight, pQueryInfo, &((*pOut)->pRight), tableIndex);
}
}
-static tSQLExpr* extractExprForSTable(tSQLExpr** pExpr, SQueryInfo* pQueryInfo, int32_t tableIndex) {
+static tSQLExpr* extractExprForSTable(SSqlCmd* pCmd, tSQLExpr** pExpr, SQueryInfo* pQueryInfo, int32_t tableIndex) {
tSQLExpr* pResExpr = NULL;
if (*pExpr != NULL) {
- doExtractExprForSTable(pExpr, pQueryInfo, &pResExpr, tableIndex);
+ doExtractExprForSTable(pCmd, pExpr, pQueryInfo, &pResExpr, tableIndex);
doCompactQueryExpr(&pResExpr);
}
@@ -3573,8 +3573,8 @@ int tableNameCompar(const void* lhs, const void* rhs) {
return ret > 0 ? 1 : -1;
}
-static int32_t setTableCondForSTableQuery(SQueryInfo* pQueryInfo, const char* account, tSQLExpr* pExpr,
- int16_t tableCondIndex, SStringBuilder* sb) {
+static int32_t setTableCondForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, const char* account,
+ tSQLExpr* pExpr, int16_t tableCondIndex, SStringBuilder* sb) {
const char* msg = "table name too long";
if (pExpr == NULL) {
@@ -3631,7 +3631,7 @@ static int32_t setTableCondForSTableQuery(SQueryInfo* pQueryInfo, const char* ac
taosStringBuilderDestroy(&sb1);
tfree(segments);
- invalidSqlErrMsg(pQueryInfo->msg, msg);
+ invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
return ret;
}
@@ -3674,7 +3674,7 @@ static bool validateFilterExpr(SQueryInfo* pQueryInfo) {
return true;
}
-static int32_t getTimeRangeFromExpr(SQueryInfo* pQueryInfo, tSQLExpr* pExpr) {
+static int32_t getTimeRangeFromExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr* pExpr) {
const char* msg0 = "invalid timestamp";
const char* msg1 = "only one time stamp window allowed";
@@ -3684,15 +3684,15 @@ static int32_t getTimeRangeFromExpr(SQueryInfo* pQueryInfo, tSQLExpr* pExpr) {
if (!isExprDirectParentOfLeaftNode(pExpr)) {
if (pExpr->nSQLOptr == TK_OR) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
- getTimeRangeFromExpr(pQueryInfo, pExpr->pLeft);
+ getTimeRangeFromExpr(pCmd, pQueryInfo, pExpr->pLeft);
- return getTimeRangeFromExpr(pQueryInfo, pExpr->pRight);
+ return getTimeRangeFromExpr(pCmd, pQueryInfo, pExpr->pRight);
} else {
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(&pExpr->pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(pCmd, &pExpr->pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -3703,7 +3703,7 @@ static int32_t getTimeRangeFromExpr(SQueryInfo* pQueryInfo, tSQLExpr* pExpr) {
STimeWindow win = {.skey = INT64_MIN, .ekey = INT64_MAX};
if (getTimeRange(&win, pRight, pExpr->nSQLOptr, tinfo.precision) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg0);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
}
// update the timestamp query range
@@ -3719,7 +3719,7 @@ static int32_t getTimeRangeFromExpr(SQueryInfo* pQueryInfo, tSQLExpr* pExpr) {
return TSDB_CODE_SUCCESS;
}
-static int32_t validateJoinExpr(SQueryInfo* pQueryInfo, SCondExpr* pCondExpr) {
+static int32_t validateJoinExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondExpr* pCondExpr) {
const char* msg1 = "super table join requires tags column";
const char* msg2 = "timestamp join condition missing";
const char* msg3 = "condition missing for join query";
@@ -3728,7 +3728,7 @@ static int32_t validateJoinExpr(SQueryInfo* pQueryInfo, SCondExpr* pCondExpr) {
if (pQueryInfo->numOfTables == 1) {
return TSDB_CODE_SUCCESS;
} else {
- return invalidSqlErrMsg(pQueryInfo->msg, msg3);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
}
@@ -3736,12 +3736,12 @@ static int32_t validateJoinExpr(SQueryInfo* pQueryInfo, SCondExpr* pCondExpr) {
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { // for stable join, tag columns
// must be present for join
if (pCondExpr->pJoinExpr == NULL) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
}
if (!pCondExpr->tsJoin) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
return TSDB_CODE_SUCCESS;
@@ -3769,12 +3769,12 @@ static void cleanQueryExpr(SCondExpr* pCondExpr) {
}
}
-static void doAddJoinTagsColumnsIntoTagList(SQueryInfo* pQueryInfo, SCondExpr* pCondExpr) {
+static void doAddJoinTagsColumnsIntoTagList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondExpr* pCondExpr) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
if (QUERY_IS_JOIN_QUERY(pQueryInfo->type) && UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
SColumnIndex index = {0};
- if (getColumnIndexByName(&pCondExpr->pJoinExpr->pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(pCmd, &pCondExpr->pJoinExpr->pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
tscError("%p: invalid column name (left)", pQueryInfo);
}
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
@@ -3782,7 +3782,7 @@ static void doAddJoinTagsColumnsIntoTagList(SQueryInfo* pQueryInfo, SCondExpr* p
index.columnIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
tscColumnListInsert(pTableMetaInfo->tagColList, &index);
- if (getColumnIndexByName(&pCondExpr->pJoinExpr->pRight->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(pCmd, &pCondExpr->pJoinExpr->pRight->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
tscError("%p: invalid column name (right)", pQueryInfo);
}
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
@@ -3792,7 +3792,7 @@ static void doAddJoinTagsColumnsIntoTagList(SQueryInfo* pQueryInfo, SCondExpr* p
}
}
-static int32_t getTagQueryCondExpr(SQueryInfo* pQueryInfo, SCondExpr* pCondExpr, tSQLExpr** pExpr) {
+static int32_t getTagQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondExpr* pCondExpr, tSQLExpr** pExpr) {
int32_t ret = TSDB_CODE_SUCCESS;
if (pCondExpr->pTagCond == NULL) {
@@ -3800,7 +3800,7 @@ static int32_t getTagQueryCondExpr(SQueryInfo* pQueryInfo, SCondExpr* pCondExpr,
}
for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
- tSQLExpr* p1 = extractExprForSTable(pExpr, pQueryInfo, i);
+ tSQLExpr* p1 = extractExprForSTable(pCmd, pExpr, pQueryInfo, i);
if (p1 == NULL) { // no query condition on this table
continue;
}
@@ -3808,7 +3808,7 @@ static int32_t getTagQueryCondExpr(SQueryInfo* pQueryInfo, SCondExpr* pCondExpr,
tExprNode* p = NULL;
SArray* colList = taosArrayInit(10, sizeof(SColIndex));
- ret = exprTreeFromSqlExpr(&p, p1, NULL, pQueryInfo, colList);
+ ret = exprTreeFromSqlExpr(pCmd, &p, p1, NULL, pQueryInfo, colList);
SBufferWriter bw = tbufInitWriter(NULL, false);
TRY(0) {
@@ -3859,11 +3859,11 @@ int32_t parseWhereClause(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, SSqlObj* pSql
SCondExpr condExpr = {0};
if ((*pExpr)->pLeft == NULL || (*pExpr)->pRight == NULL) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(&pSql->cmd), msg1);
}
int32_t type = 0;
- if ((ret = getQueryCondExpr(pQueryInfo, pExpr, &condExpr, &type, (*pExpr)->nSQLOptr)) != TSDB_CODE_SUCCESS) {
+ if ((ret = getQueryCondExpr(&pSql->cmd, pQueryInfo, pExpr, &condExpr, &type, (*pExpr)->nSQLOptr)) != TSDB_CODE_SUCCESS) {
return ret;
}
@@ -3873,46 +3873,46 @@ int32_t parseWhereClause(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, SSqlObj* pSql
condExpr.pTagCond = (*pExpr);
// 1. check if it is a join query
- if ((ret = validateJoinExpr(pQueryInfo, &condExpr)) != TSDB_CODE_SUCCESS) {
+ if ((ret = validateJoinExpr(&pSql->cmd, pQueryInfo, &condExpr)) != TSDB_CODE_SUCCESS) {
return ret;
}
// 2. get the query time range
- if ((ret = getTimeRangeFromExpr(pQueryInfo, condExpr.pTimewindow)) != TSDB_CODE_SUCCESS) {
+ if ((ret = getTimeRangeFromExpr(&pSql->cmd, pQueryInfo, condExpr.pTimewindow)) != TSDB_CODE_SUCCESS) {
return ret;
}
// 3. get the tag query condition
- if ((ret = getTagQueryCondExpr(pQueryInfo, &condExpr, pExpr)) != TSDB_CODE_SUCCESS) {
+ if ((ret = getTagQueryCondExpr(&pSql->cmd, pQueryInfo, &condExpr, pExpr)) != TSDB_CODE_SUCCESS) {
return ret;
}
// 4. get the table name query condition
- if ((ret = getTablenameCond(pQueryInfo, condExpr.pTableCond, &sb)) != TSDB_CODE_SUCCESS) {
+ if ((ret = getTablenameCond(&pSql->cmd, pQueryInfo, condExpr.pTableCond, &sb)) != TSDB_CODE_SUCCESS) {
return ret;
}
// 5. other column query condition
- if ((ret = getColumnQueryCondInfo(pQueryInfo, condExpr.pColumnCond, TK_AND)) != TSDB_CODE_SUCCESS) {
+ if ((ret = getColumnQueryCondInfo(&pSql->cmd, pQueryInfo, condExpr.pColumnCond, TK_AND)) != TSDB_CODE_SUCCESS) {
return ret;
}
// 6. join condition
- if ((ret = getJoinCondInfo(pQueryInfo, condExpr.pJoinExpr)) != TSDB_CODE_SUCCESS) {
+ if ((ret = getJoinCondInfo(&pSql->cmd, pQueryInfo, condExpr.pJoinExpr)) != TSDB_CODE_SUCCESS) {
return ret;
}
// 7. query condition for table name
pQueryInfo->tagCond.relType = (condExpr.relType == TK_AND) ? TSDB_RELATION_AND : TSDB_RELATION_OR;
- ret = setTableCondForSTableQuery(pQueryInfo, getAccountId(pSql), condExpr.pTableCond, condExpr.tableCondIndex, &sb);
+ ret = setTableCondForSTableQuery(&pSql->cmd, pQueryInfo, getAccountId(pSql), condExpr.pTableCond, condExpr.tableCondIndex, &sb);
taosStringBuilderDestroy(&sb);
if (!validateFilterExpr(pQueryInfo)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(&pSql->cmd), msg2);
}
- doAddJoinTagsColumnsIntoTagList(pQueryInfo, &condExpr);
+ doAddJoinTagsColumnsIntoTagList(&pSql->cmd, pQueryInfo, &condExpr);
cleanQueryExpr(&condExpr);
return ret;
@@ -4007,7 +4007,7 @@ int32_t getTimeRange(STimeWindow* win, tSQLExpr* pRight, int32_t optr, int16_t t
}
// todo error !!!!
-int32_t tsRewriteFieldNameIfNecessary(SQueryInfo* pQueryInfo) {
+int32_t tsRewriteFieldNameIfNecessary(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
const char rep[] = {'(', ')', '*', ',', '.', '/', '\\', '+', '-', '%', ' '};
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
@@ -4030,7 +4030,7 @@ int32_t tsRewriteFieldNameIfNecessary(SQueryInfo* pQueryInfo) {
for (int32_t j = i + 1; j < pQueryInfo->fieldsInfo.numOfOutput; ++j) {
if (strncasecmp(fieldName, tscFieldInfoGetField(&pQueryInfo->fieldsInfo, j)->name, (TSDB_COL_NAME_LEN - 1)) == 0) {
const char* msg = "duplicated column name in new table";
- return invalidSqlErrMsg(pQueryInfo->msg, msg);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
}
}
}
@@ -4038,7 +4038,7 @@ int32_t tsRewriteFieldNameIfNecessary(SQueryInfo* pQueryInfo) {
return TSDB_CODE_SUCCESS;
}
-int32_t parseFillClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySQL) {
+int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySQL) {
tVariantList* pFillToken = pQuerySQL->fillType;
tVariantListItem* pItem = &pFillToken->a[0];
@@ -4049,7 +4049,7 @@ int32_t parseFillClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySQL) {
const char* msg2 = "invalid fill option";
if (pItem->pVar.nType != TSDB_DATA_TYPE_BINARY) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
size_t size = tscSqlExprNumOfExprs(pQueryInfo);
@@ -4081,7 +4081,7 @@ int32_t parseFillClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySQL) {
pQueryInfo->fillType = TSDB_FILL_SET_VALUE;
if (pFillToken->nExpr == 1) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
int32_t startPos = 1;
@@ -4110,7 +4110,7 @@ int32_t parseFillClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySQL) {
int32_t ret = tVariantDump(&pFillToken->a[j].pVar, (char*)&pQueryInfo->fillVal[i], pFields->type, true);
if (ret != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
}
}
@@ -4128,7 +4128,7 @@ int32_t parseFillClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySQL) {
}
}
} else {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
return TSDB_CODE_SUCCESS;
@@ -4152,7 +4152,7 @@ static void setDefaultOrderInfo(SQueryInfo* pQueryInfo) {
}
}
-int32_t parseOrderbyClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql, SSchema* pSchema) {
+int32_t parseOrderbyClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql, SSchema* pSchema) {
const char* msg0 = "only support order by primary timestamp";
const char* msg1 = "invalid column name";
const char* msg2 = "only support order by primary timestamp and queried column";
@@ -4175,11 +4175,11 @@ int32_t parseOrderbyClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql, SSchema
*/
if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) {
if (pSortorder->nExpr > 1) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg0);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
}
} else {
if (pSortorder->nExpr > 2) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg3);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
}
@@ -4195,8 +4195,8 @@ int32_t parseOrderbyClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql, SSchema
SColumnIndex index = {0};
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { // super table query
- if (getColumnIndexByName(&columnName, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ if (getColumnIndexByName(pCmd, &columnName, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
bool orderByTags = false;
@@ -4207,7 +4207,7 @@ int32_t parseOrderbyClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql, SSchema
// it is a tag column
if (pQueryInfo->groupbyExpr.columnInfo == NULL) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, 0);
if (relTagIndex == pColIndex->colIndex) {
@@ -4222,7 +4222,7 @@ int32_t parseOrderbyClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql, SSchema
}
if (!(orderByTags || orderByTS) && !isTopBottomQuery(pQueryInfo)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg3);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
} else {
assert(!(orderByTags && orderByTS));
}
@@ -4238,7 +4238,7 @@ int32_t parseOrderbyClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql, SSchema
pExpr = tscSqlExprGet(pQueryInfo, 1);
if (pExpr->colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
pQueryInfo->order.order = pQuerySql->pSortOrder->a[0].sortOrder;
@@ -4261,12 +4261,12 @@ int32_t parseOrderbyClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql, SSchema
tVariant* pVar2 = &pSortorder->a[1].pVar;
SSQLToken cname = {pVar2->nLen, pVar2->nType, pVar2->pz};
- if (getColumnIndexByName(&cname, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ if (getColumnIndexByName(pCmd, &cname, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
} else {
pQueryInfo->order.order = pSortorder->a[1].sortOrder;
pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX;
@@ -4274,12 +4274,12 @@ int32_t parseOrderbyClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql, SSchema
}
} else { // meter query
- if (getColumnIndexByName(&columnName, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ if (getColumnIndexByName(pCmd, &columnName, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX && !isTopBottomQuery(pQueryInfo)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
if (isTopBottomQuery(pQueryInfo)) {
@@ -4289,7 +4289,7 @@ int32_t parseOrderbyClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql, SSchema
pExpr = tscSqlExprGet(pQueryInfo, 1);
if (pExpr->colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
pQueryInfo->order.order = pQuerySql->pSortOrder->a[0].sortOrder;
@@ -4335,11 +4335,11 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, DEFAULT_TABLE_INDEX);
if (tscValidateName(&(pAlterSQL->name)) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (tscSetTableFullName(pTableMetaInfo, &(pAlterSQL->name), pSql) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
int32_t ret = tscGetTableMeta(pSql, pTableMetaInfo);
@@ -4352,19 +4352,19 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
if (pAlterSQL->type == TSDB_ALTER_TABLE_ADD_TAG_COLUMN || pAlterSQL->type == TSDB_ALTER_TABLE_DROP_TAG_COLUMN ||
pAlterSQL->type == TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN) {
if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg3);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
} else if ((pAlterSQL->type == TSDB_ALTER_TABLE_UPDATE_TAG_VAL) && (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo))) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg4);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
} else if ((pAlterSQL->type == TSDB_ALTER_TABLE_ADD_COLUMN || pAlterSQL->type == TSDB_ALTER_TABLE_DROP_COLUMN) &&
UTIL_TABLE_IS_CHILD_TABLE(pTableMetaInfo)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg6);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
if (pAlterSQL->type == TSDB_ALTER_TABLE_ADD_TAG_COLUMN) {
tFieldList* pFieldList = pAlterSQL->pAddColumns;
if (pFieldList->nField > 1) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg5);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
if (!validateOneTags(pCmd, &pFieldList->p[0])) {
@@ -4374,31 +4374,31 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &pFieldList->p[0]);
} else if (pAlterSQL->type == TSDB_ALTER_TABLE_DROP_TAG_COLUMN) {
if (tscGetNumOfTags(pTableMeta) == 1) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg7);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7);
}
// numOfTags == 1
if (pAlterSQL->varList->nExpr > 1) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg8);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg8);
}
tVariantListItem* pItem = &pAlterSQL->varList->a[0];
if (pItem->pVar.nLen >= TSDB_COL_NAME_LEN) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg9);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg9);
}
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
SSQLToken name = {.z = pItem->pVar.pz, .n = pItem->pVar.nLen, .type = TK_STRING};
- if (getColumnIndexByName(&name, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(pCmd, &name, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
int32_t numOfCols = tscGetNumOfColumns(pTableMeta);
if (index.columnIndex < numOfCols) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg10);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg10);
} else if (index.columnIndex == numOfCols) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg11);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg11);
}
char name1[128] = {0};
@@ -4416,23 +4416,23 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
tVariantListItem* pDstItem = &pAlterSQL->varList->a[1];
if (pSrcItem->pVar.nLen >= TSDB_COL_NAME_LEN || pDstItem->pVar.nLen >= TSDB_COL_NAME_LEN) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg9);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg9);
}
if (pSrcItem->pVar.nType != TSDB_DATA_TYPE_BINARY || pDstItem->pVar.nType != TSDB_DATA_TYPE_BINARY) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg10);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg10);
}
SColumnIndex srcIndex = COLUMN_INDEX_INITIALIZER;
SColumnIndex destIndex = COLUMN_INDEX_INITIALIZER;
SSQLToken srcToken = {.z = pSrcItem->pVar.pz, .n = pSrcItem->pVar.nLen, .type = TK_STRING};
- if (getColumnIndexByName(&srcToken, pQueryInfo, &srcIndex) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(pCmd, &srcToken, pQueryInfo, &srcIndex) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
SSQLToken destToken = {.z = pDstItem->pVar.pz, .n = pDstItem->pVar.nLen, .type = TK_STRING};
- if (getColumnIndexByName(&destToken, pQueryInfo, &destIndex) == TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(pCmd, &destToken, pQueryInfo, &destIndex) == TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -4452,20 +4452,21 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
tVariantList* pVarList = pAlterSQL->varList;
tVariant* pTagName = &pVarList->a[0].pVar;
+ int16_t numOfTags = tscGetNumOfTags(pTableMeta);
SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER;
SSQLToken name = {.type = TK_STRING, .z = pTagName->pz, .n = pTagName->nLen};
- if (getColumnIndexByName(&name, pQueryInfo, &columnIndex) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(pCmd, &name, pQueryInfo, &columnIndex) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
if (columnIndex.columnIndex < tscGetNumOfColumns(pTableMeta)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg12);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg12);
}
SSchema* pTagsSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, columnIndex.columnIndex);
if (tVariantDump(&pVarList->a[1].pVar, pAlterSQL->tagData.data, pTagsSchema->type, true) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg13);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg13);
}
pAlterSQL->tagData.dataLen = pTagsSchema->bytes;
@@ -4473,10 +4474,12 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
// validate the length of binary
if ((pTagsSchema->type == TSDB_DATA_TYPE_BINARY || pTagsSchema->type == TSDB_DATA_TYPE_NCHAR) &&
(pVarList->a[1].pVar.nLen + VARSTR_HEADER_SIZE) > pTagsSchema->bytes) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg14);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg14);
}
-
- int32_t size = sizeof(SUpdateTableTagValMsg) + pTagsSchema->bytes + TSDB_EXTRA_PAYLOAD_SIZE;
+
+ int32_t schemaLen = sizeof(STColumn) * numOfTags;
+ int32_t size = sizeof(SUpdateTableTagValMsg) + pTagsSchema->bytes + schemaLen + TSDB_EXTRA_PAYLOAD_SIZE;
+
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) {
tscError("%p failed to malloc for alter table msg", pSql);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
@@ -4487,29 +4490,43 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
pUpdateMsg->tid = htonl(pTableMeta->sid);
pUpdateMsg->uid = htobe64(pTableMeta->uid);
pUpdateMsg->colId = htons(pTagsSchema->colId);
- pUpdateMsg->type = htons(pTagsSchema->type);
- pUpdateMsg->bytes = htons(pTagsSchema->bytes);
pUpdateMsg->tversion = htons(pTableMeta->tversion);
-
- tVariantDump(&pVarList->a[1].pVar, pUpdateMsg->data, pTagsSchema->type, true);
+ pUpdateMsg->numOfTags = htons(numOfTags);
+ pUpdateMsg->schemaLen = htonl(schemaLen);
+
+ // the schema is located after the msg body, then followed by true tag value
+ char* d = pUpdateMsg->data;
+ SSchema* pTagCols = tscGetTableTagSchema(pTableMeta);
+ for (int i = 0; i < numOfTags; ++i) {
+ STColumn* pCol = (STColumn*) d;
+ pCol->colId = htons(pTagCols[i].colId);
+ pCol->bytes = htons(pTagCols[i].bytes);
+ pCol->type = pTagCols[i].type;
+ pCol->offset = 0;
+
+ d += sizeof(STColumn);
+ }
+
+ // copy the tag value to msg body
+ tVariantDump(&pVarList->a[1].pVar, pUpdateMsg->data + schemaLen, pTagsSchema->type, true);
int32_t len = 0;
if (pTagsSchema->type != TSDB_DATA_TYPE_BINARY && pTagsSchema->type != TSDB_DATA_TYPE_NCHAR) {
len = tDataTypeDesc[pTagsSchema->type].nSize;
} else {
- len = varDataTLen(pUpdateMsg->data);
+ len = varDataTLen(pUpdateMsg->data + schemaLen);
}
pUpdateMsg->tagValLen = htonl(len); // length may be changed after dump data
- int32_t total = sizeof(SUpdateTableTagValMsg) + len;
+ int32_t total = sizeof(SUpdateTableTagValMsg) + len + schemaLen;
pUpdateMsg->head.contLen = htonl(total);
} else if (pAlterSQL->type == TSDB_ALTER_TABLE_ADD_COLUMN) {
tFieldList* pFieldList = pAlterSQL->pAddColumns;
if (pFieldList->nField > 1) {
const char* msg = "only support add one column";
- return invalidSqlErrMsg(pQueryInfo->msg, msg);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
}
if (!validateOneColumn(pCmd, &pFieldList->p[0])) {
@@ -4530,12 +4547,12 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER;
SSQLToken name = {.type = TK_STRING, .z = pItem->pVar.pz, .n = pItem->pVar.nLen};
- if (getColumnIndexByName(&name, pQueryInfo, &columnIndex) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg17);
+ if (getColumnIndexByName(pCmd, &name, pQueryInfo, &columnIndex) != TSDB_CODE_SUCCESS) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg17);
}
if (columnIndex.columnIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg18);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg18);
}
char name1[TSDB_COL_NAME_LEN] = {0};
@@ -4547,26 +4564,26 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
return TSDB_CODE_SUCCESS;
}
-int32_t validateSqlFunctionInStreamSql(SQueryInfo* pQueryInfo) {
+int32_t validateSqlFunctionInStreamSql(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
const char* msg0 = "sample interval can not be less than 10ms.";
const char* msg1 = "functions not allowed in select clause";
if (pQueryInfo->intervalTime != 0 && pQueryInfo->intervalTime < 10) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg0);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
}
size_t size = taosArrayGetSize(pQueryInfo->exprList);
for (int32_t i = 0; i < size; ++i) {
int32_t functId = tscSqlExprGet(pQueryInfo, i)->functionId;
if (!IS_STREAM_QUERY_VALID(aAggs[functId].nStatus)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
}
return TSDB_CODE_SUCCESS;
}
-int32_t validateFunctionsInIntervalOrGroupbyQuery(SQueryInfo* pQueryInfo) {
+int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
bool isProjectionFunction = false;
const char* msg1 = "column projection is not compatible with interval";
@@ -4599,7 +4616,7 @@ int32_t validateFunctionsInIntervalOrGroupbyQuery(SQueryInfo* pQueryInfo) {
}
if (isProjectionFunction) {
- invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
return isProjectionFunction == true ? TSDB_CODE_TSC_INVALID_SQL : TSDB_CODE_SUCCESS;
@@ -4741,7 +4758,7 @@ bool hasTimestampForPointInterpQuery(SQueryInfo* pQueryInfo) {
return (pQueryInfo->window.skey == pQueryInfo->window.ekey) && (pQueryInfo->window.skey != 0);
}
-int32_t parseLimitClause(SQueryInfo* pQueryInfo, int32_t clauseIndex, SQuerySQL* pQuerySql, SSqlObj* pSql) {
+int32_t parseLimitClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t clauseIndex, SQuerySQL* pQuerySql, SSqlObj* pSql) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
const char* msg0 = "soffset/offset can not be less than 0";
@@ -4758,7 +4775,7 @@ int32_t parseLimitClause(SQueryInfo* pQueryInfo, int32_t clauseIndex, SQuerySQL*
pQueryInfo->limit.offset, pQueryInfo->slimit.limit, pQueryInfo->slimit.offset);
if (pQueryInfo->slimit.offset < 0 || pQueryInfo->limit.offset < 0) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg0);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
}
if (pQueryInfo->limit.limit == 0) {
@@ -4772,7 +4789,7 @@ int32_t parseLimitClause(SQueryInfo* pQueryInfo, int32_t clauseIndex, SQuerySQL*
if (!tscQueryTags(pQueryInfo)) { // local handle the super table tag query
if (tscIsProjectionQueryOnSTable(pQueryInfo, 0)) {
if (pQueryInfo->slimit.limit > 0 || pQueryInfo->slimit.offset > 0) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg3);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
// for projection query on super table, all queries are subqueries
@@ -4825,7 +4842,7 @@ int32_t parseLimitClause(SQueryInfo* pQueryInfo, int32_t clauseIndex, SQuerySQL*
}
} else {
if (pQueryInfo->slimit.limit != -1 || pQueryInfo->slimit.offset != 0) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
size_t size = taosArrayGetSize(pQueryInfo->exprList);
@@ -4843,7 +4860,7 @@ int32_t parseLimitClause(SQueryInfo* pQueryInfo, int32_t clauseIndex, SQuerySQL*
}
if (hasTags && hasOtherFunc) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
}
@@ -5138,7 +5155,7 @@ static void updateTagPrjFunction(SQueryInfo* pQueryInfo) {
* 2. if selectivity function and tagprj function both exist, there should be only
* one selectivity function exists.
*/
-static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo) {
+static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, SSqlCmd* pCmd) {
const char* msg1 = "only one selectivity function allowed in presence of tags function";
const char* msg3 = "aggregation function should not be mixed up with projection";
@@ -5176,7 +5193,7 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo) {
// When the tag projection function on tag column that is not in the group by clause, aggregation function and
// selectivity function exist in select clause is not allowed.
if (numOfAggregation > 0) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
/*
@@ -5198,7 +5215,7 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo) {
}
if (((aAggs[functionId].nStatus & TSDB_FUNCSTATE_SELECTIVITY) != 0) && (functionId != TSDB_FUNC_LAST_ROW)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
}
@@ -5208,7 +5225,7 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo) {
} else {
if ((pQueryInfo->type & TSDB_QUERY_TYPE_PROJECTION_QUERY) != 0) {
if (numOfAggregation > 0 && pQueryInfo->groupbyExpr.numOfGroupCols == 0) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg3);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
if (numOfAggregation > 0 || numOfSelectivity > 0) {
@@ -5222,7 +5239,7 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo) {
return TSDB_CODE_SUCCESS;
}
-static int32_t doAddGroupbyColumnsOnDemand(SQueryInfo* pQueryInfo) {
+static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
const char* msg2 = "interval not allowed in group by normal column";
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
@@ -5271,7 +5288,7 @@ static int32_t doAddGroupbyColumnsOnDemand(SQueryInfo* pQueryInfo) {
} else {
// if this query is "group by" normal column, interval is not allowed
if (pQueryInfo->intervalTime > 0) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
bool hasGroupColumn = false;
@@ -5314,7 +5331,7 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
// check if all the tags prj columns belongs to the group by columns
if (onlyTagPrjFunction(pQueryInfo) && allTagPrjInGroupby(pQueryInfo)) {
updateTagPrjFunction(pQueryInfo);
- return doAddGroupbyColumnsOnDemand(pQueryInfo);
+ return doAddGroupbyColumnsOnDemand(pCmd, pQueryInfo);
}
// check all query functions in selection clause, multi-output functions are not allowed
@@ -5338,21 +5355,21 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
}
if (!qualified) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
}
if (IS_MULTIOUTPUT(aAggs[functId].nStatus) && functId != TSDB_FUNC_TOP && functId != TSDB_FUNC_BOTTOM &&
functId != TSDB_FUNC_TAGPRJ && functId != TSDB_FUNC_PRJ) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (functId == TSDB_FUNC_COUNT && pExpr->colInfo.colIndex == TSDB_TBNAME_COLUMN_INDEX) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
}
- if (checkUpdateTagPrjFunctions(pQueryInfo) != TSDB_CODE_SUCCESS) {
+ if (checkUpdateTagPrjFunctions(pQueryInfo, pCmd) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -5360,7 +5377,7 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
* group by tag function must be not changed the function name, otherwise, the group operation may fail to
* divide the subset of final result.
*/
- if (doAddGroupbyColumnsOnDemand(pQueryInfo) != TSDB_CODE_SUCCESS) {
+ if (doAddGroupbyColumnsOnDemand(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -5371,23 +5388,23 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
return TSDB_CODE_SUCCESS;
} else {
- return checkUpdateTagPrjFunctions(pQueryInfo);
+ return checkUpdateTagPrjFunctions(pQueryInfo, pCmd);
}
}
-int32_t doLocalQueryProcess(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) {
+int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) {
const char* msg1 = "only one expression allowed";
const char* msg2 = "invalid expression in select clause";
const char* msg3 = "invalid function";
tSQLExprList* pExprList = pQuerySql->pSelection;
if (pExprList->nExpr != 1) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
tSQLExpr* pExpr = pExprList->a[0].pNode;
if (pExpr->operand.z == NULL) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
// TODO redefine the function
@@ -5417,7 +5434,7 @@ int32_t doLocalQueryProcess(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) {
pQueryInfo->command = TSDB_SQL_CLI_VERSION;break;
case 4:
pQueryInfo->command = TSDB_SQL_CURRENT_USER;break;
- default: { return invalidSqlErrMsg(pQueryInfo->msg, msg3); }
+ default: { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); }
}
SColumnIndex ind = {0};
@@ -5704,11 +5721,11 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) {
tVariant* pVar = &pSrcMeterName->a[0].pVar;
SSQLToken srcToken = {.z = pVar->pz, .n = pVar->nLen, .type = TK_STRING};
if (tscValidateName(&srcToken) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (tscSetTableFullName(pTableMetaInfo, &srcToken, pSql) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
int32_t code = tscGetTableMeta(pSql, pTableMetaInfo);
@@ -5728,31 +5745,31 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) {
}
// set interval value
- if (parseIntervalClause(pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) {
+ if (parseIntervalClause(pCmd, pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
} else {
if ((pQueryInfo->intervalTime > 0) &&
- (validateFunctionsInIntervalOrGroupbyQuery(pQueryInfo) != TSDB_CODE_SUCCESS)) {
+ (validateFunctionsInIntervalOrGroupbyQuery(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS)) {
return TSDB_CODE_TSC_INVALID_SQL;
}
}
// set the created table[stream] name
if (tscSetTableFullName(pTableMetaInfo, pzTableName, pSql) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (pQuerySql->selectToken.n > TSDB_MAX_SAVED_SQL_LEN) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg5);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
- if (tsRewriteFieldNameIfNecessary(pQueryInfo) != TSDB_CODE_SUCCESS) {
+ if (tsRewriteFieldNameIfNecessary(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
pCmd->numOfCols = pQueryInfo->fieldsInfo.numOfOutput;
- if (validateSqlFunctionInStreamSql(pQueryInfo) != TSDB_CODE_SUCCESS) {
+ if (validateSqlFunctionInStreamSql(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -5762,14 +5779,14 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) {
*/
if (pQuerySql->fillType != NULL) {
if (pQueryInfo->intervalTime == 0) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg3);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
tVariantListItem* pItem = &pQuerySql->fillType->a[0];
if (pItem->pVar.nType == TSDB_DATA_TYPE_BINARY) {
if (!((strncmp(pItem->pVar.pz, "none", 4) == 0 && pItem->pVar.nLen == 4) ||
(strncmp(pItem->pVar.pz, "null", 4) == 0 && pItem->pVar.nLen == 4))) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg4);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
}
}
@@ -5817,7 +5834,7 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
if (pQuerySql->from == NULL) {
assert(pQuerySql->fillType == NULL && pQuerySql->pGroupby == NULL && pQuerySql->pWhere == NULL &&
pQuerySql->pSortOrder == NULL);
- return doLocalQueryProcess(pQueryInfo, pQuerySql);
+ return doLocalQueryProcess(pCmd, pQueryInfo, pQuerySql);
}
if (pQuerySql->from->nExpr > TSDB_MAX_JOIN_TABLE_NUM) {
@@ -5887,17 +5904,17 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
}
// set interval value
- if (parseIntervalClause(pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) {
+ if (parseIntervalClause(pCmd, pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
} else {
if ((pQueryInfo->intervalTime > 0) &&
- (validateFunctionsInIntervalOrGroupbyQuery(pQueryInfo) != TSDB_CODE_SUCCESS)) {
+ (validateFunctionsInIntervalOrGroupbyQuery(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS)) {
return TSDB_CODE_TSC_INVALID_SQL;
}
}
// set order by info
- if (parseOrderbyClause(pQueryInfo, pQuerySql, tscGetTableSchema(pTableMetaInfo->pTableMeta)) != TSDB_CODE_SUCCESS) {
+ if (parseOrderbyClause(pCmd, pQueryInfo, pQuerySql, tscGetTableSchema(pTableMetaInfo->pTableMeta)) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -5932,7 +5949,7 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
}
if (!hasTimestampForPointInterpQuery(pQueryInfo)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
// in case of join query, time range is required.
@@ -5944,7 +5961,7 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
}
}
- if ((code = parseLimitClause(pQueryInfo, index, pQuerySql, pSql)) != TSDB_CODE_SUCCESS) {
+ if ((code = parseLimitClause(pCmd, pQueryInfo, index, pQuerySql, pSql)) != TSDB_CODE_SUCCESS) {
return code;
}
@@ -5967,11 +5984,11 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
int64_t timeRange = labs(pQueryInfo->window.skey - pQueryInfo->window.ekey);
// number of result is not greater than 10,000,000
if ((timeRange == 0) || (timeRange / pQueryInfo->intervalTime) > MAX_RETRIEVE_ROWS_IN_INTERVAL_QUERY) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg6);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
}
- int32_t ret = parseFillClause(pQueryInfo, pQuerySql);
+ int32_t ret = parseFillClause(pCmd, pQueryInfo, pQuerySql);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
@@ -5980,19 +5997,19 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
return TSDB_CODE_SUCCESS; // Does not build query message here
}
-int32_t exprTreeFromSqlExpr(tExprNode **pExpr, const tSQLExpr* pSqlExpr, SArray* pExprInfo, SQueryInfo* pQueryInfo, SArray* pCols) {
+int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSQLExpr* pSqlExpr, SArray* pExprInfo, SQueryInfo* pQueryInfo, SArray* pCols) {
tExprNode* pLeft = NULL;
tExprNode* pRight= NULL;
if (pSqlExpr->pLeft != NULL) {
- int32_t ret = exprTreeFromSqlExpr(&pLeft, pSqlExpr->pLeft, pExprInfo, pQueryInfo, pCols);
+ int32_t ret = exprTreeFromSqlExpr(pCmd, &pLeft, pSqlExpr->pLeft, pExprInfo, pQueryInfo, pCols);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
}
if (pSqlExpr->pRight != NULL) {
- int32_t ret = exprTreeFromSqlExpr(&pRight, pSqlExpr->pRight, pExprInfo, pQueryInfo, pCols);
+ int32_t ret = exprTreeFromSqlExpr(pCmd, &pRight, pSqlExpr->pRight, pExprInfo, pQueryInfo, pCols);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
@@ -6027,7 +6044,7 @@ int32_t exprTreeFromSqlExpr(tExprNode **pExpr, const tSQLExpr* pSqlExpr, SArray*
}
} else if (pSqlExpr->nSQLOptr == TK_ID) { // column name, normal column arithmetic expression
SColumnIndex index = {0};
- int32_t ret = getColumnIndexByName(&pSqlExpr->colInfo, pQueryInfo, &index);
+ int32_t ret = getColumnIndexByName(pCmd, &pSqlExpr->colInfo, pQueryInfo, &index);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c
index e85ade60e5..d73983e77c 100644
--- a/src/client/src/tscServer.c
+++ b/src/client/src/tscServer.c
@@ -14,8 +14,8 @@
*/
#include "os.h"
-#include "qsqltype.h"
#include "tcache.h"
+#include "tcmdtype.h"
#include "trpc.h"
#include "tscLocalMerge.h"
#include "tscLog.h"
@@ -46,10 +46,13 @@ static int32_t minMsgSize() { return tsRpcHeadSize + 100; }
static void tscSetDnodeIpList(SSqlObj* pSql, SCMVgroupInfo* pVgroupInfo) {
SRpcIpSet* pIpList = &pSql->ipList;
+ pIpList->inUse = 0;
+ if (pVgroupInfo == NULL) {
+ pIpList->numOfIps = 0;
+ return;
+ }
pIpList->numOfIps = pVgroupInfo->numOfIps;
- pIpList->inUse = 0;
-
for(int32_t i = 0; i < pVgroupInfo->numOfIps; ++i) {
strcpy(pIpList->fqdn[i], pVgroupInfo->ipAddr[i].fqdn);
pIpList->port[i] = pVgroupInfo->ipAddr[i].port;
@@ -539,14 +542,18 @@ static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char
int32_t index = pTableMetaInfo->vgroupIndex;
assert(index >= 0);
- pVgroupInfo = &pTableMetaInfo->vgroupList->vgroups[index];
+ if (pTableMetaInfo->vgroupList->numOfVgroups > 0) {
+ pVgroupInfo = &pTableMetaInfo->vgroupList->vgroups[index];
+ }
tscDebug("%p query on stable, vgIndex:%d, numOfVgroups:%d", pSql, index, pTableMetaInfo->vgroupList->numOfVgroups);
} else {
pVgroupInfo = &pTableMeta->vgroupInfo;
}
tscSetDnodeIpList(pSql, pVgroupInfo);
- pQueryMsg->head.vgId = htonl(pVgroupInfo->vgId);
+ if (pVgroupInfo != NULL) {
+ pQueryMsg->head.vgId = htonl(pVgroupInfo->vgId);
+ }
STableIdInfo *pTableIdInfo = (STableIdInfo *)pMsg;
pTableIdInfo->tid = htonl(pTableMeta->sid);
@@ -1943,7 +1950,7 @@ int tscProcessUseDbRsp(SSqlObj *pSql) {
}
int tscProcessDropDbRsp(SSqlObj *UNUSED_PARAM(pSql)) {
- taosCacheEmpty(tscCacheHandle);
+ taosCacheEmpty(tscCacheHandle, false);
return 0;
}
@@ -1989,7 +1996,7 @@ int tscProcessAlterTableMsgRsp(SSqlObj *pSql) {
if (isSuperTable) { // if it is a super table, reset whole query cache
tscDebug("%p reset query cache since table:%s is stable", pSql, pTableMetaInfo->name);
- taosCacheEmpty(tscCacheHandle);
+ taosCacheEmpty(tscCacheHandle, false);
}
}
diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c
index f1d69fa261..82cc8cc225 100644
--- a/src/client/src/tscSystem.c
+++ b/src/client/src/tscSystem.c
@@ -148,7 +148,7 @@ void taos_init_imp() {
refreshTime = refreshTime < 10 ? 10 : refreshTime;
if (tscCacheHandle == NULL) {
- tscCacheHandle = taosCacheInit(TSDB_DATA_TYPE_BINARY, refreshTime, false, NULL);
+ tscCacheHandle = taosCacheInit(TSDB_DATA_TYPE_BINARY, refreshTime, false, NULL, "client");
}
tscDebug("client is initialized successfully");
diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c
index 26a81c597f..9b6eff7123 100644
--- a/src/client/src/tscUtil.c
+++ b/src/client/src/tscUtil.c
@@ -1115,31 +1115,6 @@ SColumn* tscColumnListInsert(SArray* pColumnList, SColumnIndex* pColIndex) {
return taosArrayGetP(pColumnList, i);
}
-SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numOfFilters) {
- if (numOfFilters == 0) {
- assert(src == NULL);
- return NULL;
- }
-
- SColumnFilterInfo* pFilter = calloc(1, numOfFilters * sizeof(SColumnFilterInfo));
-
- memcpy(pFilter, src, sizeof(SColumnFilterInfo) * numOfFilters);
- for (int32_t j = 0; j < numOfFilters; ++j) {
-
- if (pFilter[j].filterstr) {
- size_t len = (size_t) pFilter[j].len + 1 * TSDB_NCHAR_SIZE;
- pFilter[j].pz = (int64_t) calloc(1, len);
-
- memcpy((char*)pFilter[j].pz, (char*)src[j].pz, (size_t)len);
- }
- }
-
- assert(src->filterstr == 0 || src->filterstr == 1);
- assert(!(src->lowerRelOptr == TSDB_RELATION_INVALID && src->upperRelOptr == TSDB_RELATION_INVALID));
-
- return pFilter;
-}
-
static void destroyFilterInfo(SColumnFilterInfo* pFilterInfo, int32_t numOfFilters) {
for(int32_t i = 0; i < numOfFilters; ++i) {
if (pFilterInfo[i].filterstr) {
diff --git a/src/common/inc/qsqltype.h b/src/common/inc/tcmdtype.h
similarity index 97%
rename from src/common/inc/qsqltype.h
rename to src/common/inc/tcmdtype.h
index 6f6493d17c..90fb5bf478 100644
--- a/src/common/inc/qsqltype.h
+++ b/src/common/inc/tcmdtype.h
@@ -13,8 +13,8 @@
* along with this program. If not, see .
*/
-#ifndef TDENGINE_QSQLCMD_H
-#define TDENGINE_QSQLCMD_H
+#ifndef TDENGINE_TSQLMSGTYPE_H
+#define TDENGINE_TSQLMSGTYPE_H
#ifdef __cplusplus
extern "C" {
@@ -109,4 +109,4 @@ extern char *sqlCmd[];
}
#endif
-#endif // TDENGINE_QSQLCMD_H
+#endif // TDENGINE_TSQLMSGTYPE_H
diff --git a/src/common/inc/tdataformat.h b/src/common/inc/tdataformat.h
index baa212d8b7..2ed4b81204 100644
--- a/src/common/inc/tdataformat.h
+++ b/src/common/inc/tdataformat.h
@@ -50,8 +50,8 @@ extern "C" {
typedef struct {
int8_t type; // Column type
int16_t colId; // column ID
- int32_t bytes; // column bytes
- int32_t offset; // point offset in SDataRow after the header part
+ int16_t bytes; // column bytes
+ int16_t offset; // point offset in SDataRow after the header part
} STColumn;
#define colType(col) ((col)->type)
@@ -116,7 +116,7 @@ typedef struct {
int tdInitTSchemaBuilder(STSchemaBuilder *pBuilder, int32_t version);
void tdDestroyTSchemaBuilder(STSchemaBuilder *pBuilder);
void tdResetTSchemaBuilder(STSchemaBuilder *pBuilder, int32_t version);
-int tdAddColToSchema(STSchemaBuilder *pBuilder, int8_t type, int16_t colId, int32_t bytes);
+int tdAddColToSchema(STSchemaBuilder *pBuilder, int8_t type, int16_t colId, int16_t bytes);
STSchema *tdGetSchemaFromBuilder(STSchemaBuilder *pBuilder);
// ----------------- Data row structure
diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h
index e7927605cb..da42c064ec 100644
--- a/src/common/inc/tglobal.h
+++ b/src/common/inc/tglobal.h
@@ -33,6 +33,7 @@ extern int32_t tsStatusInterval;
extern int16_t tsNumOfVnodesPerCore;
extern int16_t tsNumOfTotalVnodes;
extern int32_t tsNumOfMnodes;
+extern int32_t tsEnableVnodeBak;
// common
extern int tsRpcTimer;
diff --git a/src/common/inc/tname.h b/src/common/inc/tname.h
index d2008c9ff8..10d725db32 100644
--- a/src/common/inc/tname.h
+++ b/src/common/inc/tname.h
@@ -27,4 +27,6 @@ SSchema tGetTableNameColumnSchema();
bool tscValidateTableNameLength(size_t len);
+SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numOfFilters);
+
#endif // TDENGINE_NAME_H
diff --git a/src/common/src/sqlcmdstr.c b/src/common/src/sqlcmdstr.c
index 8584ba7976..672106523e 100644
--- a/src/common/src/sqlcmdstr.c
+++ b/src/common/src/sqlcmdstr.c
@@ -15,4 +15,4 @@
#define TSDB_SQL_C
-#include "qsqltype.h"
+#include "tcmdtype.h"
diff --git a/src/common/src/tdataformat.c b/src/common/src/tdataformat.c
index e5cbcfd143..7e551759f9 100644
--- a/src/common/src/tdataformat.c
+++ b/src/common/src/tdataformat.c
@@ -43,7 +43,7 @@ int tdEncodeSchema(void **buf, STSchema *pSchema) {
STColumn *pCol = schemaColAt(pSchema, i);
tlen += taosEncodeFixedI8(buf, colType(pCol));
tlen += taosEncodeFixedI16(buf, colColId(pCol));
- tlen += taosEncodeFixedI32(buf, colBytes(pCol));
+ tlen += taosEncodeFixedI16(buf, colBytes(pCol));
}
return tlen;
@@ -65,10 +65,10 @@ void *tdDecodeSchema(void *buf, STSchema **pRSchema) {
for (int i = 0; i < numOfCols; i++) {
int8_t type = 0;
int16_t colId = 0;
- int32_t bytes = 0;
+ int16_t bytes = 0;
buf = taosDecodeFixedI8(buf, &type);
buf = taosDecodeFixedI16(buf, &colId);
- buf = taosDecodeFixedI32(buf, &bytes);
+ buf = taosDecodeFixedI16(buf, &bytes);
if (tdAddColToSchema(&schemaBuilder, type, colId, bytes) < 0) {
tdDestroyTSchemaBuilder(&schemaBuilder);
return NULL;
@@ -105,7 +105,7 @@ void tdResetTSchemaBuilder(STSchemaBuilder *pBuilder, int32_t version) {
pBuilder->version = version;
}
-int tdAddColToSchema(STSchemaBuilder *pBuilder, int8_t type, int16_t colId, int32_t bytes) {
+int tdAddColToSchema(STSchemaBuilder *pBuilder, int8_t type, int16_t colId, int16_t bytes) {
if (!isValidDataType(type)) return -1;
if (pBuilder->nCols >= pBuilder->tCols) {
diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c
index 225f12a210..67c104878a 100644
--- a/src/common/src/tglobal.c
+++ b/src/common/src/tglobal.c
@@ -41,6 +41,8 @@ int32_t tsStatusInterval = 1; // second
int16_t tsNumOfVnodesPerCore = 8;
int16_t tsNumOfTotalVnodes = TSDB_INVALID_VNODE_NUM;
int32_t tsNumOfMnodes = 3;
+int32_t tsEnableVnodeBak = 1;
+
// common
int32_t tsRpcTimer = 1000;
@@ -422,6 +424,16 @@ static void doInitGlobalConfig() {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
+ cfg.option = "vnodeBak";
+ cfg.ptr = &tsEnableVnodeBak;
+ cfg.valType = TAOS_CFG_VTYPE_INT32;
+ cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
+ cfg.minValue = 0;
+ cfg.maxValue = 1;
+ cfg.ptrLength = 1;
+ cfg.unitType = TAOS_CFG_UTYPE_NONE;
+ taosInitConfigOption(cfg);
+
cfg.option = "balance";
cfg.ptr = &tsEnableBalance;
cfg.valType = TAOS_CFG_VTYPE_INT32;
diff --git a/src/common/src/tname.c b/src/common/src/tname.c
index 2514ed26e5..295015d466 100644
--- a/src/common/src/tname.c
+++ b/src/common/src/tname.c
@@ -49,4 +49,29 @@ SSchema tGetTableNameColumnSchema() {
bool tscValidateTableNameLength(size_t len) {
return len < TSDB_TABLE_NAME_LEN;
-}
\ No newline at end of file
+}
+
+SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numOfFilters) {
+ if (numOfFilters == 0) {
+ assert(src == NULL);
+ return NULL;
+ }
+
+ SColumnFilterInfo* pFilter = calloc(1, numOfFilters * sizeof(SColumnFilterInfo));
+
+ memcpy(pFilter, src, sizeof(SColumnFilterInfo) * numOfFilters);
+ for (int32_t j = 0; j < numOfFilters; ++j) {
+
+ if (pFilter[j].filterstr) {
+ size_t len = (size_t) pFilter[j].len + 1 * TSDB_NCHAR_SIZE;
+ pFilter[j].pz = (int64_t) calloc(1, len);
+
+ memcpy((char*)pFilter[j].pz, (char*)src[j].pz, (size_t)len);
+ }
+ }
+
+ assert(src->filterstr == 0 || src->filterstr == 1);
+ assert(!(src->lowerRelOptr == TSDB_RELATION_INVALID && src->upperRelOptr == TSDB_RELATION_INVALID));
+
+ return pFilter;
+}
diff --git a/src/cq/src/cqMain.c b/src/cq/src/cqMain.c
index 3e7e8525ef..a633968616 100644
--- a/src/cq/src/cqMain.c
+++ b/src/cq/src/cqMain.c
@@ -213,6 +213,8 @@ void cqDrop(void *handle) {
pObj->pStream = NULL;
cTrace("vgId:%d, id:%d CQ:%s is dropped", pContext->vgId, pObj->tid, pObj->sqlStr);
+ tdFreeSchema(pObj->pSchema);
+ free(pObj->sqlStr);
free(pObj);
pthread_mutex_unlock(&pContext->mutex);
diff --git a/src/dnode/src/dnodeMgmt.c b/src/dnode/src/dnodeMgmt.c
index 9cf024ba83..10eb77058b 100644
--- a/src/dnode/src/dnodeMgmt.c
+++ b/src/dnode/src/dnodeMgmt.c
@@ -176,6 +176,7 @@ void dnodeCleanupMgmt() {
tsMgmtQset = NULL;
tsMgmtQueue = NULL;
+ vnodeCleanupResources();
}
void dnodeDispatchToMgmtQueue(SRpcMsg *pMsg) {
@@ -242,8 +243,14 @@ static int32_t dnodeGetVnodeList(int32_t vnodeList[], int32_t *numOfVnodes) {
int32_t vnode = atoi(de->d_name + 5);
if (vnode == 0) continue;
- vnodeList[*numOfVnodes] = vnode;
(*numOfVnodes)++;
+
+ if (*numOfVnodes >= TSDB_MAX_VNODES) {
+ dError("vgId:%d, too many vnode directory in disk, exist:%d max:%d", vnode, *numOfVnodes, TSDB_MAX_VNODES);
+ continue;
+ } else {
+ vnodeList[*numOfVnodes - 1] = vnode;
+ }
}
}
closedir(dir);
@@ -337,7 +344,7 @@ static int32_t dnodeOpenVnodes() {
void dnodeStartStream() {
int32_t vnodeList[TSDB_MAX_VNODES];
int32_t numOfVnodes = 0;
- int32_t status = dnodeGetVnodeList(vnodeList, &numOfVnodes);
+ int32_t status = vnodeGetVnodeList(vnodeList, &numOfVnodes);
if (status != TSDB_CODE_SUCCESS) {
dInfo("get dnode list failed");
@@ -352,15 +359,14 @@ void dnodeStartStream() {
}
static void dnodeCloseVnodes() {
- int32_t *vnodeList = (int32_t *)malloc(sizeof(int32_t) * TSDB_MAX_VNODES);
+ int32_t vnodeList[TSDB_MAX_VNODES];
int32_t numOfVnodes;
int32_t status;
- status = dnodeGetVnodeList(vnodeList, &numOfVnodes);
+ status = vnodeGetVnodeList(vnodeList, &numOfVnodes);
if (status != TSDB_CODE_SUCCESS) {
dInfo("get dnode list failed");
- free(vnodeList);
return;
}
@@ -368,7 +374,6 @@ static void dnodeCloseVnodes() {
vnodeClose(vnodeList[i]);
}
- free(vnodeList);
dInfo("total vnodes:%d are all closed", numOfVnodes);
}
diff --git a/src/dnode/src/dnodeVRead.c b/src/dnode/src/dnodeVRead.c
index 6bbb291b6a..acd92db598 100644
--- a/src/dnode/src/dnodeVRead.c
+++ b/src/dnode/src/dnodeVRead.c
@@ -98,11 +98,7 @@ void dnodeDispatchToVnodeReadQueue(SRpcMsg *pMsg) {
pHead->vgId = htonl(pHead->vgId);
pHead->contLen = htonl(pHead->contLen);
- if (pMsg->msgType == TSDB_MSG_TYPE_FETCH) {
- pVnode = vnodeGetVnode(pHead->vgId);
- } else {
- pVnode = vnodeAccquireVnode(pHead->vgId);
- }
+ pVnode = vnodeAccquireVnode(pHead->vgId);
if (pVnode == NULL) {
leftLen -= pHead->contLen;
@@ -179,24 +175,17 @@ void dnodeFreeVnodeRqueue(void *rqueue) {
// dynamically adjust the number of threads
}
-static void dnodeContinueExecuteQuery(void* pVnode, void* qhandle, SReadMsg *pMsg) {
+void dnodePutQhandleIntoReadQueue(void *pVnode, void *qhandle) {
SReadMsg *pRead = (SReadMsg *)taosAllocateQitem(sizeof(SReadMsg));
- pRead->rpcMsg = pMsg->rpcMsg;
- pRead->pCont = qhandle;
- pRead->contLen = 0;
pRead->rpcMsg.msgType = TSDB_MSG_TYPE_QUERY;
+ pRead->pCont = qhandle;
+ pRead->contLen = 0;
- taos_queue queue = vnodeGetRqueue(pVnode);
- taosWriteQitem(queue, TAOS_QTYPE_RPC, pRead);
+ taos_queue queue = vnodeAccquireRqueue(pVnode);
+ taosWriteQitem(queue, TAOS_QTYPE_QUERY, pRead);
}
void dnodeSendRpcReadRsp(void *pVnode, SReadMsg *pRead, int32_t code) {
- if (code == TSDB_CODE_VND_ACTION_IN_PROGRESS) return;
- if (code == TSDB_CODE_VND_ACTION_NEED_REPROCESSED) {
- dnodeContinueExecuteQuery(pVnode, pRead->rspRet.qhandle, pRead);
- code = TSDB_CODE_SUCCESS;
- }
-
SRpcMsg rpcRsp = {
.handle = pRead->rpcMsg.handle,
.pCont = pRead->rspRet.rsp,
@@ -206,6 +195,12 @@ void dnodeSendRpcReadRsp(void *pVnode, SReadMsg *pRead, int32_t code) {
rpcSendResponse(&rpcRsp);
rpcFreeCont(pRead->rpcMsg.pCont);
+ vnodeRelease(pVnode);
+}
+
+void dnodeDispatchNonRspMsg(void *pVnode, SReadMsg *pRead, int32_t code) {
+ vnodeRelease(pVnode);
+ return;
}
static void *dnodeProcessReadQueue(void *param) {
@@ -219,9 +214,16 @@ static void *dnodeProcessReadQueue(void *param) {
break;
}
- dDebug("%p, msg:%s will be processed in vread queue", pReadMsg->rpcMsg.ahandle, taosMsg[pReadMsg->rpcMsg.msgType]);
+ dDebug("%p, msg:%s will be processed in vread queue, qtype:%d", pReadMsg->rpcMsg.ahandle,
+ taosMsg[pReadMsg->rpcMsg.msgType], type);
int32_t code = vnodeProcessRead(pVnode, pReadMsg);
- dnodeSendRpcReadRsp(pVnode, pReadMsg, code);
+
+ if (type == TAOS_QTYPE_RPC) {
+ dnodeSendRpcReadRsp(pVnode, pReadMsg, code);
+ } else {
+ dnodeDispatchNonRspMsg(pVnode, pReadMsg, code);
+ }
+
taosFreeQitem(pReadMsg);
}
diff --git a/src/inc/dnode.h b/src/inc/dnode.h
index b561c407a3..1d33dafbaa 100644
--- a/src/inc/dnode.h
+++ b/src/inc/dnode.h
@@ -53,6 +53,7 @@ void *dnodeAllocateVnodeWqueue(void *pVnode);
void dnodeFreeVnodeWqueue(void *queue);
void *dnodeAllocateVnodeRqueue(void *pVnode);
void dnodeFreeVnodeRqueue(void *rqueue);
+void dnodePutQhandleIntoReadQueue(void *pVnode, void *qhandle);
void dnodeSendRpcVnodeWriteRsp(void *pVnode, void *param, int32_t code);
int32_t dnodeAllocateMnodePqueue();
diff --git a/src/inc/query.h b/src/inc/query.h
index af3a89682c..88badc2d7b 100644
--- a/src/inc/query.h
+++ b/src/inc/query.h
@@ -44,7 +44,7 @@ void qDestroyQueryInfo(qinfo_t qinfo);
* @param qinfo
* @return
*/
-void qTableQuery(qinfo_t qinfo, void (*fp)(void*), void* param);
+void qTableQuery(qinfo_t qinfo);
/**
* Retrieve the produced results information, if current query is not paused or completed,
@@ -84,6 +84,13 @@ bool qHasMoreResultsToRetrieve(qinfo_t qinfo);
*/
int32_t qKillQuery(qinfo_t qinfo);
+void* qOpenQueryMgmt(int32_t vgId);
+void qSetQueryMgmtClosed(void* pExecutor);
+void qCleanupQueryMgmt(void* pExecutor);
+void** qRegisterQInfo(void* pMgmt, void* qInfo);
+void** qAcquireQInfo(void* pMgmt, void** key);
+void** qReleaseQInfo(void* pMgmt, void* pQInfo, bool needFree);
+
#ifdef __cplusplus
}
#endif
diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h
index 76ca99c9ad..e4ee058cef 100644
--- a/src/inc/taosdef.h
+++ b/src/inc/taosdef.h
@@ -365,6 +365,7 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
#define TAOS_QTYPE_FWD 1
#define TAOS_QTYPE_WAL 2
#define TAOS_QTYPE_CQ 3
+#define TAOS_QTYPE_QUERY 4
typedef enum {
TSDB_SUPER_TABLE = 0, // super table
diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h
index cb25242d27..6155f08e76 100644
--- a/src/inc/taosmsg.h
+++ b/src/inc/taosmsg.h
@@ -285,9 +285,9 @@ typedef struct {
int32_t tid;
int16_t tversion;
int16_t colId;
- int16_t type;
- int16_t bytes;
int32_t tagValLen;
+ int16_t numOfTags;
+ int32_t schemaLen;
char data[];
} SUpdateTableTagValMsg;
diff --git a/src/inc/vnode.h b/src/inc/vnode.h
index 9f0c8cc241..19abe35db3 100644
--- a/src/inc/vnode.h
+++ b/src/inc/vnode.h
@@ -52,13 +52,16 @@ void vnodeRelease(void *pVnode);
void* vnodeAccquireVnode(int32_t vgId); // add refcount
void* vnodeGetVnode(int32_t vgId); // keep refcount unchanged
+void* vnodeAccquireRqueue(void *);
void* vnodeGetRqueue(void *);
void* vnodeGetWqueue(int32_t vgId);
void* vnodeGetWal(void *pVnode);
int32_t vnodeProcessWrite(void *pVnode, int qtype, void *pHead, void *item);
-void vnodeBuildStatusMsg(void * param);
+int32_t vnodeGetVnodeList(int32_t vnodeList[], int32_t *numOfVnodes);
+void vnodeBuildStatusMsg(void *param);
void vnodeSetAccess(SDMVgroupAccess *pAccess, int32_t numOfVnodes);
+void vnodeCleanupResources();
int32_t vnodeProcessRead(void *pVnode, SReadMsg *pReadMsg);
diff --git a/src/kit/CMakeLists.txt b/src/kit/CMakeLists.txt
index 66e8cf7398..df3ce10001 100644
--- a/src/kit/CMakeLists.txt
+++ b/src/kit/CMakeLists.txt
@@ -4,3 +4,4 @@ PROJECT(TDengine)
ADD_SUBDIRECTORY(shell)
ADD_SUBDIRECTORY(taosdemo)
ADD_SUBDIRECTORY(taosdump)
+ADD_SUBDIRECTORY(taosmigrate)
\ No newline at end of file
diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c
index 3265285cca..9a5aedcdb7 100644
--- a/src/kit/taosdemo/taosdemo.c
+++ b/src/kit/taosdemo/taosdemo.c
@@ -32,6 +32,7 @@
#include
#include
#include
+#include
#include "taos.h"
#include "tutil.h"
@@ -54,6 +55,7 @@ static struct argp_option options[] = {
{0, 'P', "password", 0, "The password to use when connecting to the server. Default is 'taosdata'.", 3},
{0, 'd', "database", 0, "Destination database. Default is 'test'.", 3},
{0, 'm', "table_prefix", 0, "Table prefix name. Default is 't'.", 3},
+ {0, 's', "sql file", 0, "The select sql file.", 3},
{0, 'M', 0, 0, "Use metric flag.", 13},
{0, 'o', "outputfile", 0, "Direct output to the named file. Default is './output.txt'.", 14},
{0, 'q', "query_mode", 0, "Query mode--0: SYNC, 1: ASYNC. Default is SYNC.", 6},
@@ -79,6 +81,7 @@ typedef struct DemoArguments {
char *password;
char *database;
char *tb_prefix;
+ char *sqlFile;
bool use_metric;
bool insert_only;
char *output_file;
@@ -120,6 +123,9 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
case 'o':
arguments->output_file = arg;
break;
+ case 's':
+ arguments->sqlFile = arg;
+ break;
case 'q':
arguments->mode = atoi(arg);
break;
@@ -179,10 +185,10 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
arguments->tb_prefix = arg;
break;
case 'M':
- arguments->use_metric = true;
+ arguments->use_metric = false;
break;
case 'x':
- arguments->insert_only = true;
+ arguments->insert_only = false;
break;
case 'c':
if (wordexp(arg, &full_path, 0) != 0) {
@@ -253,6 +259,9 @@ typedef struct {
int data_of_rate;
int64_t start_time;
bool do_aggreFunc;
+
+ char* cols;
+ bool use_metric;
sem_t mutex_sem;
int notFinished;
@@ -305,6 +314,8 @@ void rand_string(char *str, int size);
double getCurrentTime();
void callBack(void *param, TAOS_RES *res, int code);
+void multiThreadCreateTable(char* cols, bool use_metric, int threads, int ntables, char* db_name, char* tb_prefix, char *ip_addr, uint16_t port, char *user, char *pass);
+void querySqlFile(TAOS* taos, char* sqlFile);
int main(int argc, char *argv[]) {
SDemoArguments arguments = { NULL, // host
@@ -313,6 +324,7 @@ int main(int argc, char *argv[]) {
"taosdata", // password
"test", // database
"t", // tb_prefix
+ NULL,
false, // use_metric
false, // insert_only
"./output.txt", // output_file
@@ -361,7 +373,7 @@ int main(int argc, char *argv[]) {
abort();
#endif
}
-
+
enum MODE query_mode = arguments.mode;
char *ip_addr = arguments.host;
uint16_t port = arguments.port;
@@ -385,6 +397,13 @@ int main(int argc, char *argv[]) {
char dataString[STRING_LEN];
bool do_aggreFunc = true;
+ if (NULL != arguments.sqlFile) {
+ TAOS* qtaos = taos_connect(ip_addr, user, pass, db_name, port);
+ querySqlFile(qtaos, arguments.sqlFile);
+ taos_close(qtaos);
+ return 0;
+ }
+
memset(dataString, 0, STRING_LEN);
int len = 0;
@@ -495,47 +514,19 @@ int main(int argc, char *argv[]) {
len += snprintf(cols + len, STRING_LEN - len, ",f%d %s(%d))", colIndex + 1, data_type[colIndex % count_data_type], len_of_binary);
}
- if (!use_metric) {
- /* Create all the tables; */
- printf("Creating %d table(s)......\n", ntables);
- for (int i = 0; i < ntables; i++) {
- snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s%d (ts timestamp%s;", db_name, tb_prefix, i, cols);
- queryDB(taos, command);
- }
-
- printf("Table(s) created!\n");
- taos_close(taos);
-
- } else {
+ if (use_metric) {
/* Create metric table */
printf("Creating meters super table...\n");
snprintf(command, BUFFER_SIZE, "create table if not exists %s.meters (ts timestamp%s tags (areaid int, loc binary(10))", db_name, cols);
queryDB(taos, command);
printf("meters created!\n");
- /* Create all the tables; */
- printf("Creating %d table(s)......\n", ntables);
- for (int i = 0; i < ntables; i++) {
- int j;
- if (i % 10 == 0) {
- j = 10;
- } else {
- j = i % 10;
- }
- if (j % 2 == 0) {
- snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s%d using %s.meters tags (%d,\"%s\");", db_name, tb_prefix, i, db_name, j, "shanghai");
- } else {
- snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s%d using %s.meters tags (%d,\"%s\");", db_name, tb_prefix, i, db_name, j, "beijing");
- }
- queryDB(taos, command);
- }
-
- printf("Table(s) created!\n");
taos_close(taos);
}
- /* Wait for table to create */
-
+ /* Wait for table to create */
+ multiThreadCreateTable(cols, use_metric, threads, ntables, db_name, tb_prefix, ip_addr, port, user, pass);
+
/* Insert data */
double ts = getCurrentTime();
printf("Inserting data......\n");
@@ -685,6 +676,198 @@ int main(int argc, char *argv[]) {
return 0;
}
+#define MAX_SQL_SIZE 65536
+void selectSql(TAOS* taos, char* sqlcmd)
+{
+ TAOS_RES *pSql = taos_query(taos, sqlcmd);
+ int32_t code = taos_errno(pSql);
+
+ if (code != 0) {
+ printf("Failed to sqlcmd:%s, reason:%s\n", sqlcmd, taos_errstr(pSql));
+ taos_free_result(pSql);
+ exit(1);
+ }
+
+ int count = 0;
+ while (taos_fetch_row(pSql) != NULL) {
+ count++;
+ }
+
+ taos_free_result(pSql);
+ return;
+}
+
+
+/* Function to do regular expression check */
+static int regexMatch(const char *s, const char *reg, int cflags) {
+ regex_t regex;
+ char msgbuf[100] = {0};
+
+ /* Compile regular expression */
+ if (regcomp(®ex, reg, cflags) != 0) {
+ printf("Fail to compile regex\n");
+ exit(-1);
+ }
+
+ /* Execute regular expression */
+ int reti = regexec(®ex, s, 0, NULL, 0);
+ if (!reti) {
+ regfree(®ex);
+ return 1;
+ } else if (reti == REG_NOMATCH) {
+ regfree(®ex);
+ return 0;
+ } else {
+ regerror(reti, ®ex, msgbuf, sizeof(msgbuf));
+ printf("Regex match failed: %s\n", msgbuf);
+ regfree(®ex);
+ exit(-1);
+ }
+
+ return 0;
+}
+
+static int isCommentLine(char *line) {
+ if (line == NULL) return 1;
+
+ return regexMatch(line, "^\\s*#.*", REG_EXTENDED);
+}
+
+void querySqlFile(TAOS* taos, char* sqlFile)
+{
+ FILE *fp = fopen(sqlFile, "r");
+ if (fp == NULL) {
+ printf("failed to open file %s, reason:%s\n", sqlFile, strerror(errno));
+ exit(-1);
+ }
+
+ int read_len = 0;
+ char * cmd = calloc(1, MAX_SQL_SIZE);
+ size_t cmd_len = 0;
+ char * line = NULL;
+ size_t line_len = 0;
+
+ double t = getCurrentTime();
+
+ while ((read_len = getline(&line, &line_len, fp)) != -1) {
+ if (read_len >= MAX_SQL_SIZE) continue;
+ line[--read_len] = '\0';
+
+ if (read_len == 0 || isCommentLine(line)) { // line starts with #
+ continue;
+ }
+
+ if (line[read_len - 1] == '\\') {
+ line[read_len - 1] = ' ';
+ memcpy(cmd + cmd_len, line, read_len);
+ cmd_len += read_len;
+ continue;
+ }
+
+ memcpy(cmd + cmd_len, line, read_len);
+ selectSql(taos, cmd);
+ memset(cmd, 0, MAX_SQL_SIZE);
+ cmd_len = 0;
+ }
+
+ t = getCurrentTime() - t;
+ printf("run %s took %.6f second(s)\n\n", sqlFile, t);
+
+ free(cmd);
+ if (line) free(line);
+ fclose(fp);
+ return;
+}
+
+void * createTable(void *sarg)
+{
+ char command[BUFFER_SIZE] = "\0";
+
+ info *winfo = (info *)sarg;
+
+ if (!winfo->use_metric) {
+ /* Create all the tables; */
+ printf("Creating table from %d to %d\n", winfo->start_table_id, winfo->end_table_id);
+ for (int i = winfo->start_table_id; i <= winfo->end_table_id; i++) {
+ snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s%d (ts timestamp%s;", winfo->db_name, winfo->tb_prefix, i, winfo->cols);
+ queryDB(winfo->taos, command);
+ }
+
+ taos_close(winfo->taos);
+
+ } else {
+ /* Create all the tables; */
+ printf("Creating table from %d to %d\n", winfo->start_table_id, winfo->end_table_id);
+ for (int i = winfo->start_table_id; i <= winfo->end_table_id; i++) {
+ int j;
+ if (i % 10 == 0) {
+ j = 10;
+ } else {
+ j = i % 10;
+ }
+ if (j % 2 == 0) {
+ snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s%d using %s.meters tags (%d,\"%s\");", winfo->db_name, winfo->tb_prefix, i, winfo->db_name, j, "shanghai");
+ } else {
+ snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s%d using %s.meters tags (%d,\"%s\");", winfo->db_name, winfo->tb_prefix, i, winfo->db_name, j, "beijing");
+ }
+ queryDB(winfo->taos, command);
+ }
+ taos_close(winfo->taos);
+ }
+
+ return NULL;
+}
+
+void multiThreadCreateTable(char* cols, bool use_metric, int threads, int ntables, char* db_name, char* tb_prefix, char *ip_addr, uint16_t port, char *user, char *pass) {
+ double ts = getCurrentTime();
+ printf("create table......\n");
+ pthread_t *pids = malloc(threads * sizeof(pthread_t));
+ info *infos = malloc(threads * sizeof(info));
+
+ int a = ntables / threads;
+ if (a < 1) {
+ threads = ntables;
+ a = 1;
+ }
+
+ int b = 0;
+ if (threads != 0)
+ b = ntables % threads;
+ int last = 0;
+ for (int i = 0; i < threads; i++) {
+ info *t_info = infos + i;
+ t_info->threadID = i;
+ tstrncpy(t_info->db_name, db_name, MAX_DB_NAME_SIZE);
+ tstrncpy(t_info->tb_prefix, tb_prefix, MAX_TB_NAME_SIZE);
+ t_info->taos = taos_connect(ip_addr, user, pass, db_name, port);
+ t_info->start_table_id = last;
+ t_info->end_table_id = i < b ? last + a : last + a - 1;
+ last = t_info->end_table_id + 1;
+ t_info->use_metric = use_metric;
+ t_info->cols = cols;
+ pthread_create(pids + i, NULL, createTable, t_info);
+ }
+
+ for (int i = 0; i < threads; i++) {
+ pthread_join(pids[i], NULL);
+ }
+
+ double t = getCurrentTime() - ts;
+ printf("Spent %.4f seconds to create %d tables with %d connections\n", t, ntables, threads);
+
+ for (int i = 0; i < threads; i++) {
+ info *t_info = infos + i;
+ taos_close(t_info->taos);
+ sem_destroy(&(t_info->mutex_sem));
+ sem_destroy(&(t_info->lock_sem));
+ }
+
+ free(pids);
+ free(infos);
+
+ return ;
+}
+
void *readTable(void *sarg) {
info *rinfo = (info *)sarg;
TAOS *taos = rinfo->taos;
diff --git a/src/kit/taosmigrate/CMakeLists.txt b/src/kit/taosmigrate/CMakeLists.txt
new file mode 100644
index 0000000000..85b2f33f01
--- /dev/null
+++ b/src/kit/taosmigrate/CMakeLists.txt
@@ -0,0 +1,18 @@
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+PROJECT(TDengine)
+
+IF ((TD_LINUX_64) OR (TD_LINUX_32 AND TD_ARM))
+ INCLUDE_DIRECTORIES(${TD_OS_DIR}/inc)
+ INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/inc)
+ INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/util/inc)
+ INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/mnode/inc)
+ INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/vnode/inc)
+ INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/common/inc)
+ INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc)
+ AUX_SOURCE_DIRECTORY(. SRC)
+
+ ADD_EXECUTABLE(taosmigrate ${SRC})
+ TARGET_LINK_LIBRARIES(taosmigrate common tutil cJson)
+ENDIF ()
+
+SET_SOURCE_FILES_PROPERTIES(./taosmigrate.c PROPERTIES COMPILE_FLAGS -w)
diff --git a/src/kit/taosmigrate/taosmigrate.c b/src/kit/taosmigrate/taosmigrate.c
new file mode 100644
index 0000000000..b7bf6fc1ba
--- /dev/null
+++ b/src/kit/taosmigrate/taosmigrate.c
@@ -0,0 +1,224 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "taosmigrate.h"
+
+
+/* The options we understand. */
+static struct argp_option options[] = {
+ {0, 'r', "data dir", 0, "data dir", 0},
+ {0, 'd', "dnodeId", 0, "dnode id", 1},
+ {0, 'p', "port", 0, "dnode port", 1},
+ {0, 'f', "fqdn", 0, "dnode fqdn", 1},
+ {0, 'g', "multi dnodes", 0, "multi dnode info, e.g. \"2 7030 fqdn1, 3 8030 fqdn2\"", 2},
+ {0}};
+
+/* Used by main to communicate with parse_opt. */
+struct arguments {
+ char* dataDir;
+ int32_t dnodeId;
+ uint16_t port;
+ char* fqdn;
+ char* dnodeGroups;
+ char** arg_list;
+ int arg_list_len;
+};
+
+/* Parse a single option. */
+static error_t parse_opt(int key, char *arg, struct argp_state *state) {
+ struct arguments *arguments = state->input;
+ switch (key) {
+ case 'w':
+ arguments->dataDir = arg;
+ break;
+ case 'd':
+ arguments->dnodeId = atoi(arg);
+ break;
+ case 'p':
+ arguments->port = atoi(arg);
+ break;
+ case 'f':
+ arguments->fqdn = arg;
+ case 'g':
+ arguments->dnodeGroups = arg;
+ break;
+ case ARGP_KEY_ARG:
+ arguments->arg_list = &state->argv[state->next - 1];
+ arguments->arg_list_len = state->argc - state->next + 1;
+ state->next = state->argc;
+
+ argp_usage(state);
+ break;
+
+ default:
+ return ARGP_ERR_UNKNOWN;
+ }
+ return 0;
+}
+
+static struct argp argp = {options, parse_opt, 0, 0};
+struct arguments arguments = {NULL, 0, 0, NULL, NULL, NULL, 0};
+SdnodeGroup tsDnodeGroup = {0};
+
+int tSystemShell(const char * cmd)
+{
+ FILE * fp;
+ int res;
+ char buf[1024];
+ if (cmd == NULL) {
+ printf("tSystem cmd is NULL!\n");
+ return -1;
+ }
+
+ if ((fp = popen(cmd, "r") ) == NULL) {
+ printf("popen cmd:%s error: %s/n", cmd, strerror(errno));
+ return -1;
+ } else {
+ while(fgets(buf, sizeof(buf), fp)) {
+ printf("popen result:%s", buf);
+ }
+
+ if ((res = pclose(fp)) == -1) {
+ printf("close popen file pointer fp error!\n");
+ } else {
+ printf("popen res is :%d\n", res);
+ }
+
+ return res;
+ }
+}
+
+void taosMvFile(char* destFile, char *srcFile) {
+ char shellCmd[1024+1] = {0};
+
+ //(void)snprintf(shellCmd, 1024, "cp -rf %s %s", srcDir, destDir);
+ (void)snprintf(shellCmd, 1024, "mv -f %s %s", srcFile, destFile);
+ tSystemShell(shellCmd);
+}
+
+SdnodeIfo* getDnodeInfo(int32_t dnodeId)
+{
+ for (int32_t i = 0; i < tsDnodeGroup.dnodeNum; i++) {
+ if (dnodeId == tsDnodeGroup.dnodeArray[i].dnodeId) {
+ return &(tsDnodeGroup.dnodeArray[i]);
+ }
+ }
+
+ return NULL;
+}
+
+void parseOneDnodeInfo(char* buf, SdnodeIfo* pDnodeInfo)
+{
+ char *ptr;
+ char *p;
+ int32_t i = 0;
+ ptr = strtok_r(buf, " ", &p);
+ while(ptr != NULL) {
+ if (0 == i) {
+ pDnodeInfo->dnodeId = atoi(ptr);
+ } else if (1 == i) {
+ pDnodeInfo->port = atoi(ptr);
+ } else if (2 == i) {
+ tstrncpy(pDnodeInfo->fqdn, ptr, TSDB_FQDN_LEN);
+ } else {
+ printf("input parameter error near:%s\n", buf);
+ exit(-1);
+ }
+ i++;
+ ptr = strtok_r(NULL, " ", &p);
+ }
+
+ snprintf(pDnodeInfo->ep, TSDB_EP_LEN, "%s:%d", pDnodeInfo->fqdn, pDnodeInfo->port);
+}
+
+void saveDnodeGroups()
+{
+ if ((NULL != arguments.fqdn) && (arguments.dnodeId > 0) && (0 != arguments.port)) {
+ //printf("dnodeId:%d port:%d fqdn:%s ep:%s\n", arguments.dnodeId, arguments.port, arguments.fqdn, arguments.ep);
+
+ tsDnodeGroup.dnodeArray[tsDnodeGroup.dnodeNum].dnodeId = arguments.dnodeId;
+ tsDnodeGroup.dnodeArray[tsDnodeGroup.dnodeNum].port = arguments.port;
+ tstrncpy(tsDnodeGroup.dnodeArray[tsDnodeGroup.dnodeNum].fqdn, arguments.fqdn, TSDB_FQDN_LEN);
+ snprintf(tsDnodeGroup.dnodeArray[tsDnodeGroup.dnodeNum].ep, TSDB_EP_LEN, "%s:%d", tsDnodeGroup.dnodeArray[tsDnodeGroup.dnodeNum].fqdn, tsDnodeGroup.dnodeArray[tsDnodeGroup.dnodeNum].port);
+
+ tsDnodeGroup.dnodeNum++;
+ }
+
+ if (NULL == arguments.dnodeGroups) {
+ return;
+ }
+
+ //printf("dnodeGroups:%s\n", arguments.dnodeGroups);
+
+ char buf[1024];
+ char* str = NULL;
+ char* start = arguments.dnodeGroups;
+ while (NULL != (str = strstr(start, ","))) {
+ memcpy(buf, start, str - start);
+ // parse one dnode info: dnodeId port fqdn ep
+ parseOneDnodeInfo(buf, &(tsDnodeGroup.dnodeArray[tsDnodeGroup.dnodeNum]));
+ tsDnodeGroup.dnodeNum++;
+ // next
+ start = str + 1;
+ str = NULL;
+ }
+
+ if (strlen(start)) {
+ parseOneDnodeInfo(start, &(tsDnodeGroup.dnodeArray[tsDnodeGroup.dnodeNum]));
+ tsDnodeGroup.dnodeNum++;
+ }
+}
+
+int32_t main(int32_t argc, char *argv[]) {
+ memset(&tsDnodeGroup, 0, sizeof(SdnodeGroup));
+
+ argp_parse(&argp, argc, argv, 0, 0, &arguments);
+
+ if ((NULL == arguments.dataDir) || ((NULL == arguments.dnodeGroups)
+ && (NULL == arguments.fqdn || arguments.dnodeId < 1 || 0 == arguments.port))) {
+ printf("input parameter error!\n");
+ return -1;
+ }
+
+ saveDnodeGroups();
+
+ printf("===================arguments:==================\n");
+ printf("oldWal:%s\n", arguments.dataDir);
+ for (int32_t i = 0; i < tsDnodeGroup.dnodeNum; i++) {
+ printf("dnodeId:%d port:%d fqdn:%s ep:%s\n", tsDnodeGroup.dnodeArray[i].dnodeId,
+ tsDnodeGroup.dnodeArray[i].port,
+ tsDnodeGroup.dnodeArray[i].fqdn,
+ tsDnodeGroup.dnodeArray[i].ep);
+ }
+ printf("===========================\n");
+
+ // 1. modify wal for mnode
+ char mnodeWal[TSDB_FILENAME_LEN*2] = {0};
+ (void)snprintf(mnodeWal, TSDB_FILENAME_LEN*2, "%s/mnode/wal/wal0", arguments.dataDir);
+ walModWalFile(mnodeWal);
+
+ // 2. modfiy dnode config: mnodeIpList.json
+ char dnodeIpList[TSDB_FILENAME_LEN*2] = {0};
+ (void)snprintf(dnodeIpList, TSDB_FILENAME_LEN*2, "%s/dnode/mnodeIpList.json", arguments.dataDir);
+ modDnodeIpList(dnodeIpList);
+
+ // 3. modify vnode config: config.json
+ char vnodeDir[TSDB_FILENAME_LEN*2] = {0};
+ (void)snprintf(vnodeDir, TSDB_FILENAME_LEN*2, "%s/vnode", arguments.dataDir);
+ modAllVnode(vnodeDir);
+
+ return 0;
+}
+
diff --git a/src/kit/taosmigrate/taosmigrate.h b/src/kit/taosmigrate/taosmigrate.h
new file mode 100644
index 0000000000..a0a02e651c
--- /dev/null
+++ b/src/kit/taosmigrate/taosmigrate.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+#ifndef TAOS_MIGRATE_H
+#define TAOS_MIGRATE_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define _GNU_SOURCE
+
+#ifndef _ALPINE
+#include
+#endif
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "taosdef.h"
+#include "tutil.h"
+#include "twal.h"
+#include "tchecksum.h"
+#include "mnodeDef.h"
+#include "mnodeSdb.h"
+#include "cJSON.h"
+#include "taosmsg.h"
+#include "tglobal.h"
+#include "tsdb.h"
+
+//#include "vnode.h"
+#include "vnodeInt.h"
+
+#define MAX_DNODE_NUM 128
+
+
+typedef struct _SdnodeIfo {
+ int32_t dnodeId;
+ uint16_t port;
+ char fqdn[TSDB_FQDN_LEN+1];
+ char ep[TSDB_EP_LEN+1];
+} SdnodeIfo;
+
+typedef struct _SdnodeGroup {
+ int32_t dnodeNum;
+ SdnodeIfo dnodeArray[MAX_DNODE_NUM];
+} SdnodeGroup;
+
+int tSystemShell(const char * cmd);
+void taosMvFile(char* destFile, char *srcFile) ;
+void walModWalFile(char* walfile);
+SdnodeIfo* getDnodeInfo(int32_t dnodeId);
+void modDnodeIpList(char* dnodeIpList);
+void modAllVnode(char *vnodeDir);
+
+#endif
diff --git a/src/kit/taosmigrate/taosmigrateDnodeCfg.c b/src/kit/taosmigrate/taosmigrateDnodeCfg.c
new file mode 100644
index 0000000000..263d5521e9
--- /dev/null
+++ b/src/kit/taosmigrate/taosmigrateDnodeCfg.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "taosmigrate.h"
+
+//#include "dnodeInt.h"
+//#include "dnodeMgmt.h"
+//#include "dnodeVRead.h"
+//#include "dnodeVWrite.h"
+//#include "dnodeModule.h"
+
+static SDMMnodeInfos tsDnodeIpInfos = {0};
+
+static bool dnodeReadMnodeInfos(char* dnodeIpList) {
+ FILE *fp = fopen(dnodeIpList, "r");
+ if (!fp) {
+ printf("failed to read mnodeIpList.json, file not exist\n");
+ return false;
+ }
+
+ bool ret = false;
+ int maxLen = 2000;
+ char *content = calloc(1, maxLen + 1);
+ int len = fread(content, 1, maxLen, fp);
+ if (len <= 0) {
+ free(content);
+ fclose(fp);
+ printf("failed to read mnodeIpList.json, content is null\n");
+ return false;
+ }
+
+ content[len] = 0;
+ cJSON* root = cJSON_Parse(content);
+ if (root == NULL) {
+ printf("failed to read mnodeIpList.json, invalid json format\n");
+ goto PARSE_OVER;
+ }
+
+ cJSON* inUse = cJSON_GetObjectItem(root, "inUse");
+ if (!inUse || inUse->type != cJSON_Number) {
+ printf("failed to read mnodeIpList.json, inUse not found\n");
+ goto PARSE_OVER;
+ }
+ tsDnodeIpInfos.inUse = inUse->valueint;
+
+ cJSON* nodeNum = cJSON_GetObjectItem(root, "nodeNum");
+ if (!nodeNum || nodeNum->type != cJSON_Number) {
+ printf("failed to read mnodeIpList.json, nodeNum not found\n");
+ goto PARSE_OVER;
+ }
+ tsDnodeIpInfos.nodeNum = nodeNum->valueint;
+
+ cJSON* nodeInfos = cJSON_GetObjectItem(root, "nodeInfos");
+ if (!nodeInfos || nodeInfos->type != cJSON_Array) {
+ printf("failed to read mnodeIpList.json, nodeInfos not found\n");
+ goto PARSE_OVER;
+ }
+
+ int size = cJSON_GetArraySize(nodeInfos);
+ if (size != tsDnodeIpInfos.nodeNum) {
+ printf("failed to read mnodeIpList.json, nodeInfos size not matched\n");
+ goto PARSE_OVER;
+ }
+
+ for (int i = 0; i < size; ++i) {
+ cJSON* nodeInfo = cJSON_GetArrayItem(nodeInfos, i);
+ if (nodeInfo == NULL) continue;
+
+ cJSON *nodeId = cJSON_GetObjectItem(nodeInfo, "nodeId");
+ if (!nodeId || nodeId->type != cJSON_Number) {
+ printf("failed to read mnodeIpList.json, nodeId not found\n");
+ goto PARSE_OVER;
+ }
+ tsDnodeIpInfos.nodeInfos[i].nodeId = nodeId->valueint;
+
+ cJSON *nodeEp = cJSON_GetObjectItem(nodeInfo, "nodeEp");
+ if (!nodeEp || nodeEp->type != cJSON_String || nodeEp->valuestring == NULL) {
+ printf("failed to read mnodeIpList.json, nodeName not found\n");
+ goto PARSE_OVER;
+ }
+ strncpy(tsDnodeIpInfos.nodeInfos[i].nodeEp, nodeEp->valuestring, TSDB_EP_LEN);
+
+ SdnodeIfo* pDnodeInfo = getDnodeInfo(tsDnodeIpInfos.nodeInfos[i].nodeId);
+ if (NULL == pDnodeInfo) {
+ continue;
+ }
+
+ tstrncpy(tsDnodeIpInfos.nodeInfos[i].nodeEp, pDnodeInfo->ep, TSDB_EP_LEN);
+ }
+
+ ret = true;
+
+ //printf("read mnode iplist successed, numOfIps:%d inUse:%d\n", tsDnodeIpInfos.nodeNum, tsDnodeIpInfos.inUse);
+ //for (int32_t i = 0; i < tsDnodeIpInfos.nodeNum; i++) {
+ // printf("mnode:%d, %s\n", tsDnodeIpInfos.nodeInfos[i].nodeId, tsDnodeIpInfos.nodeInfos[i].nodeEp);
+ //}
+
+PARSE_OVER:
+ free(content);
+ cJSON_Delete(root);
+ fclose(fp);
+ return ret;
+}
+
+
+static void dnodeSaveMnodeInfos(char* dnodeIpList) {
+ FILE *fp = fopen(dnodeIpList, "w");
+ if (!fp) return;
+
+ int32_t len = 0;
+ int32_t maxLen = 2000;
+ char * content = calloc(1, maxLen + 1);
+
+ len += snprintf(content + len, maxLen - len, "{\n");
+ len += snprintf(content + len, maxLen - len, " \"inUse\": %d,\n", tsDnodeIpInfos.inUse);
+ len += snprintf(content + len, maxLen - len, " \"nodeNum\": %d,\n", tsDnodeIpInfos.nodeNum);
+ len += snprintf(content + len, maxLen - len, " \"nodeInfos\": [{\n");
+ for (int32_t i = 0; i < tsDnodeIpInfos.nodeNum; i++) {
+ len += snprintf(content + len, maxLen - len, " \"nodeId\": %d,\n", tsDnodeIpInfos.nodeInfos[i].nodeId);
+ len += snprintf(content + len, maxLen - len, " \"nodeEp\": \"%s\"\n", tsDnodeIpInfos.nodeInfos[i].nodeEp);
+ if (i < tsDnodeIpInfos.nodeNum -1) {
+ len += snprintf(content + len, maxLen - len, " },{\n");
+ } else {
+ len += snprintf(content + len, maxLen - len, " }]\n");
+ }
+ }
+ len += snprintf(content + len, maxLen - len, "}\n");
+
+ fwrite(content, 1, len, fp);
+ fflush(fp);
+ fclose(fp);
+ free(content);
+
+ printf("mod mnode iplist successed\n");
+}
+
+void modDnodeIpList(char* dnodeIpList)
+{
+ (void)dnodeReadMnodeInfos(dnodeIpList);
+ dnodeSaveMnodeInfos(dnodeIpList);
+ return;
+}
+
+
diff --git a/src/kit/taosmigrate/taosmigrateMnodeWal.c b/src/kit/taosmigrate/taosmigrateMnodeWal.c
new file mode 100644
index 0000000000..6315ff99f7
--- /dev/null
+++ b/src/kit/taosmigrate/taosmigrateMnodeWal.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "taosmigrate.h"
+
+static void recordWrite(int fd, SWalHead *pHead) {
+
+ taosCalcChecksumAppend(0, (uint8_t *)pHead, sizeof(SWalHead));
+
+ int contLen = pHead->len + sizeof(SWalHead);
+
+ if(write(fd, pHead, contLen) != contLen) {
+ printf("failed to write(%s)", strerror(errno));
+ exit(-1);
+ }
+}
+
+static void recordMod(SWalHead* pHead)
+{
+ SDnodeObj *pDnode;
+
+ ESdbTable tableId = (ESdbTable)(pHead->msgType / 10);
+
+ switch (tableId) {
+ case SDB_TABLE_DNODE:
+ case SDB_TABLE_MNODE:
+ pDnode = (SDnodeObj *)pHead->cont;
+
+ printf("dnodeId:%d port:%d fqdn:%s ep:%s\n", pDnode->dnodeId, pDnode->dnodePort, pDnode->dnodeFqdn, pDnode->dnodeEp);
+
+ SdnodeIfo* pDnodeInfo = getDnodeInfo(pDnode->dnodeId);
+ if (NULL == pDnodeInfo) {
+ break;
+ }
+
+ pDnode->dnodePort = pDnodeInfo->port;
+ tstrncpy(pDnode->dnodeFqdn, pDnodeInfo->fqdn, sizeof(pDnode->dnodeFqdn));
+ tstrncpy(pDnode->dnodeEp, pDnodeInfo->ep, sizeof(pDnode->dnodeEp));
+ break;
+ #if 0
+ case SDB_TABLE_ACCOUNT:
+ SAcctObj *pAcct = (SDnodeObj *)pHead->cont;
+ break;
+ case SDB_TABLE_USER:
+ SUserObj *pUser = (SDnodeObj *)pHead->cont;
+ break;
+ case SDB_TABLE_DB:
+ SDbObj *pDb = (SDnodeObj *)pHead->cont;
+ break;
+ case SDB_TABLE_VGROUP:
+ SVgObj *pVgroup = (SDnodeObj *)pHead->cont;
+ break;
+ case SDB_TABLE_STABLE:
+ SSuperTableObj *pStable = (SDnodeObj *)pHead->cont;
+ break;
+ case SDB_TABLE_CTABLE:
+ SChildTableObj *pCTable = (SDnodeObj *)pHead->cont;
+ break;
+ #endif
+ default:
+ break;
+ }
+}
+
+void walModWalFile(char* walfile) {
+ char *buffer = malloc(1024000); // size for one record
+ if (buffer == NULL) {
+ printf("failed to malloc:%s\n", strerror(errno));
+ return ;
+ }
+
+ SWalHead *pHead = (SWalHead *)buffer;
+
+ int rfd = open(walfile, O_RDONLY);
+ if (rfd < 0) {
+ printf("failed to open %s failed:%s\n", walfile, strerror(errno));
+ free(buffer);
+ return ;
+ }
+
+ char newWalFile[32] = "wal0";
+ int wfd = open(newWalFile, O_WRONLY | O_CREAT, S_IRWXU | S_IRWXG | S_IRWXO);
+
+ if (wfd < 0) {
+ printf("wal:%s, failed to open(%s)\n", newWalFile, strerror(errno));
+ free(buffer);
+ return ;
+ }
+
+ printf("start to mod %s into %s\n", walfile, newWalFile);
+
+ while (1) {
+ memset(buffer, 0, 1024000);
+ int ret = read(rfd, pHead, sizeof(SWalHead));
+ if ( ret == 0) break;
+
+ if (ret != sizeof(SWalHead)) {
+ printf("wal:%s, failed to read head, skip, ret:%d(%s)\n", walfile, ret, strerror(errno));
+ break;
+ }
+
+ if (!taosCheckChecksumWhole((uint8_t *)pHead, sizeof(SWalHead))) {
+ printf("wal:%s, cksum is messed up, skip the rest of file\n", walfile);
+ break;
+ }
+
+ ret = read(rfd, pHead->cont, pHead->len);
+ if ( ret != pHead->len) {
+ printf("wal:%s, failed to read body, skip, len:%d ret:%d\n", walfile, pHead->len, ret);
+ break;
+ }
+
+ recordMod(pHead);
+ recordWrite(wfd, pHead);
+ }
+
+ close(rfd);
+ close(wfd);
+ free(buffer);
+
+ taosMvFile(walfile, newWalFile);
+
+ return ;
+}
+
+
+
diff --git a/src/kit/taosmigrate/taosmigrateVnodeCfg.c b/src/kit/taosmigrate/taosmigrateVnodeCfg.c
new file mode 100644
index 0000000000..1cb2fee353
--- /dev/null
+++ b/src/kit/taosmigrate/taosmigrateVnodeCfg.c
@@ -0,0 +1,324 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "taosmigrate.h"
+
+
+static int32_t saveVnodeCfg(SVnodeObj *pVnode, char* cfgFile)
+{
+ FILE *fp = fopen(cfgFile, "w");
+ if (!fp) {
+ printf("failed to open vnode cfg file for write, file:%s error:%s\n", cfgFile, strerror(errno));
+ return errno;
+ }
+
+ int32_t len = 0;
+ int32_t maxLen = 1000;
+ char * content = calloc(1, maxLen + 1);
+ if (content == NULL) {
+ fclose(fp);
+ return -1;
+ }
+
+ len += snprintf(content + len, maxLen - len, "{\n");
+ len += snprintf(content + len, maxLen - len, " \"db\": \"%s\",\n", pVnode->db);
+ len += snprintf(content + len, maxLen - len, " \"cfgVersion\": %d,\n", pVnode->cfgVersion);
+ len += snprintf(content + len, maxLen - len, " \"cacheBlockSize\": %d,\n", pVnode->tsdbCfg.cacheBlockSize);
+ len += snprintf(content + len, maxLen - len, " \"totalBlocks\": %d,\n", pVnode->tsdbCfg.totalBlocks);
+ len += snprintf(content + len, maxLen - len, " \"maxTables\": %d,\n", pVnode->tsdbCfg.maxTables);
+ len += snprintf(content + len, maxLen - len, " \"daysPerFile\": %d,\n", pVnode->tsdbCfg.daysPerFile);
+ len += snprintf(content + len, maxLen - len, " \"daysToKeep\": %d,\n", pVnode->tsdbCfg.keep);
+ len += snprintf(content + len, maxLen - len, " \"daysToKeep1\": %d,\n", pVnode->tsdbCfg.keep1);
+ len += snprintf(content + len, maxLen - len, " \"daysToKeep2\": %d,\n", pVnode->tsdbCfg.keep2);
+ len += snprintf(content + len, maxLen - len, " \"minRowsPerFileBlock\": %d,\n", pVnode->tsdbCfg.minRowsPerFileBlock);
+ len += snprintf(content + len, maxLen - len, " \"maxRowsPerFileBlock\": %d,\n", pVnode->tsdbCfg.maxRowsPerFileBlock);
+ len += snprintf(content + len, maxLen - len, " \"commitTime\": %d,\n", pVnode->tsdbCfg.commitTime);
+ len += snprintf(content + len, maxLen - len, " \"precision\": %d,\n", pVnode->tsdbCfg.precision);
+ len += snprintf(content + len, maxLen - len, " \"compression\": %d,\n", pVnode->tsdbCfg.compression);
+ len += snprintf(content + len, maxLen - len, " \"walLevel\": %d,\n", pVnode->walCfg.walLevel);
+ len += snprintf(content + len, maxLen - len, " \"replica\": %d,\n", pVnode->syncCfg.replica);
+ len += snprintf(content + len, maxLen - len, " \"wals\": %d,\n", pVnode->walCfg.wals);
+ len += snprintf(content + len, maxLen - len, " \"quorum\": %d,\n", pVnode->syncCfg.quorum);
+
+ len += snprintf(content + len, maxLen - len, " \"nodeInfos\": [{\n");
+ for (int32_t i = 0; i < pVnode->syncCfg.replica; i++) {
+ len += snprintf(content + len, maxLen - len, " \"nodeId\": %d,\n", pVnode->syncCfg.nodeInfo[i].nodeId);
+ len += snprintf(content + len, maxLen - len, " \"nodeEp\": \"%s:%d\"\n", pVnode->syncCfg.nodeInfo[i].nodeFqdn, pVnode->syncCfg.nodeInfo[i].nodePort);
+
+ if (i < pVnode->syncCfg.replica - 1) {
+ len += snprintf(content + len, maxLen - len, " },{\n");
+ } else {
+ len += snprintf(content + len, maxLen - len, " }]\n");
+ }
+ }
+ len += snprintf(content + len, maxLen - len, "}\n");
+
+ fwrite(content, 1, len, fp);
+ fflush(fp);
+ fclose(fp);
+ free(content);
+
+ printf("mod vnode cfg %s successed\n", cfgFile);
+
+ return 0;
+}
+
+static int32_t readVnodeCfg(SVnodeObj *pVnode, char* cfgFile)
+{
+ cJSON *root = NULL;
+ char *content = NULL;
+ int maxLen = 1000;
+ int32_t ret = -1;
+
+ FILE *fp = fopen(cfgFile, "r");
+ if (!fp) {
+ printf("failed to open vnode cfg file:%s to read, error:%s\n", cfgFile, strerror(errno));
+ goto PARSE_OVER;
+ }
+
+ content = calloc(1, maxLen + 1);
+ if (content == NULL) {
+ goto PARSE_OVER;
+ }
+
+ int len = fread(content, 1, maxLen, fp);
+ if (len <= 0) {
+ printf("failed to read vnode cfg, content is null, error:%s\n", strerror(errno));
+ goto PARSE_OVER;
+ }
+
+ root = cJSON_Parse(content);
+ if (root == NULL) {
+ printf("failed to json parse %s, invalid json format\n", cfgFile);
+ goto PARSE_OVER;
+ }
+
+ cJSON *db = cJSON_GetObjectItem(root, "db");
+ if (!db || db->type != cJSON_String || db->valuestring == NULL) {
+ printf("vgId:%d, failed to read vnode cfg, db not found\n", pVnode->vgId);
+ goto PARSE_OVER;
+ }
+ strcpy(pVnode->db, db->valuestring);
+
+ cJSON *cfgVersion = cJSON_GetObjectItem(root, "cfgVersion");
+ if (!cfgVersion || cfgVersion->type != cJSON_Number) {
+ printf("vgId:%d, failed to read vnode cfg, cfgVersion not found\n", pVnode->vgId);
+ goto PARSE_OVER;
+ }
+ pVnode->cfgVersion = cfgVersion->valueint;
+
+ cJSON *cacheBlockSize = cJSON_GetObjectItem(root, "cacheBlockSize");
+ if (!cacheBlockSize || cacheBlockSize->type != cJSON_Number) {
+ printf("vgId:%d, failed to read vnode cfg, cacheBlockSize not found\n", pVnode->vgId);
+ goto PARSE_OVER;
+ }
+ pVnode->tsdbCfg.cacheBlockSize = cacheBlockSize->valueint;
+
+ cJSON *totalBlocks = cJSON_GetObjectItem(root, "totalBlocks");
+ if (!totalBlocks || totalBlocks->type != cJSON_Number) {
+ printf("vgId:%d, failed to read vnode cfg, totalBlocks not found\n", pVnode->vgId);
+ goto PARSE_OVER;
+ }
+ pVnode->tsdbCfg.totalBlocks = totalBlocks->valueint;
+
+ cJSON *maxTables = cJSON_GetObjectItem(root, "maxTables");
+ if (!maxTables || maxTables->type != cJSON_Number) {
+ printf("vgId:%d, failed to read vnode cfg, maxTables not found\n", pVnode->vgId);
+ goto PARSE_OVER;
+ }
+ pVnode->tsdbCfg.maxTables = maxTables->valueint;
+
+ cJSON *daysPerFile = cJSON_GetObjectItem(root, "daysPerFile");
+ if (!daysPerFile || daysPerFile->type != cJSON_Number) {
+ printf("vgId:%d, failed to read vnode cfg, daysPerFile not found\n", pVnode->vgId);
+ goto PARSE_OVER;
+ }
+ pVnode->tsdbCfg.daysPerFile = daysPerFile->valueint;
+
+ cJSON *daysToKeep = cJSON_GetObjectItem(root, "daysToKeep");
+ if (!daysToKeep || daysToKeep->type != cJSON_Number) {
+ printf("vgId:%d, failed to read vnode cfg, daysToKeep not found\n", pVnode->vgId);
+ goto PARSE_OVER;
+ }
+ pVnode->tsdbCfg.keep = daysToKeep->valueint;
+
+ cJSON *daysToKeep1 = cJSON_GetObjectItem(root, "daysToKeep1");
+ if (!daysToKeep1 || daysToKeep1->type != cJSON_Number) {
+ printf("vgId:%d, failed to read vnode cfg, daysToKeep1 not found\n", pVnode->vgId);
+ goto PARSE_OVER;
+ }
+ pVnode->tsdbCfg.keep1 = daysToKeep1->valueint;
+
+ cJSON *daysToKeep2 = cJSON_GetObjectItem(root, "daysToKeep2");
+ if (!daysToKeep2 || daysToKeep2->type != cJSON_Number) {
+ printf("vgId:%d, failed to read vnode cfg, daysToKeep2 not found\n", pVnode->vgId);
+ goto PARSE_OVER;
+ }
+ pVnode->tsdbCfg.keep2 = daysToKeep2->valueint;
+
+ cJSON *minRowsPerFileBlock = cJSON_GetObjectItem(root, "minRowsPerFileBlock");
+ if (!minRowsPerFileBlock || minRowsPerFileBlock->type != cJSON_Number) {
+ printf("vgId:%d, failed to read vnode cfg, minRowsPerFileBlock not found\n", pVnode->vgId);
+ goto PARSE_OVER;
+ }
+ pVnode->tsdbCfg.minRowsPerFileBlock = minRowsPerFileBlock->valueint;
+
+ cJSON *maxRowsPerFileBlock = cJSON_GetObjectItem(root, "maxRowsPerFileBlock");
+ if (!maxRowsPerFileBlock || maxRowsPerFileBlock->type != cJSON_Number) {
+ printf("vgId:%d, failed to read vnode cfg, maxRowsPerFileBlock not found\n", pVnode->vgId);
+ goto PARSE_OVER;
+ }
+ pVnode->tsdbCfg.maxRowsPerFileBlock = maxRowsPerFileBlock->valueint;
+
+ cJSON *commitTime = cJSON_GetObjectItem(root, "commitTime");
+ if (!commitTime || commitTime->type != cJSON_Number) {
+ printf("vgId:%d, failed to read vnode cfg, commitTime not found\n", pVnode->vgId);
+ goto PARSE_OVER;
+ }
+ pVnode->tsdbCfg.commitTime = (int8_t)commitTime->valueint;
+
+ cJSON *precision = cJSON_GetObjectItem(root, "precision");
+ if (!precision || precision->type != cJSON_Number) {
+ printf("vgId:%d, failed to read vnode cfg, precision not found\n", pVnode->vgId);
+ goto PARSE_OVER;
+ }
+ pVnode->tsdbCfg.precision = (int8_t)precision->valueint;
+
+ cJSON *compression = cJSON_GetObjectItem(root, "compression");
+ if (!compression || compression->type != cJSON_Number) {
+ printf("vgId:%d, failed to read vnode cfg, compression not found\n", pVnode->vgId);
+ goto PARSE_OVER;
+ }
+ pVnode->tsdbCfg.compression = (int8_t)compression->valueint;
+
+ cJSON *walLevel = cJSON_GetObjectItem(root, "walLevel");
+ if (!walLevel || walLevel->type != cJSON_Number) {
+ printf("vgId:%d, failed to read vnode cfg, walLevel not found\n", pVnode->vgId);
+ goto PARSE_OVER;
+ }
+ pVnode->walCfg.walLevel = (int8_t) walLevel->valueint;
+
+ cJSON *wals = cJSON_GetObjectItem(root, "wals");
+ if (!wals || wals->type != cJSON_Number) {
+ printf("vgId:%d, failed to read vnode cfg, wals not found\n", pVnode->vgId);
+ goto PARSE_OVER;
+ }
+ pVnode->walCfg.wals = (int8_t)wals->valueint;
+ pVnode->walCfg.keep = 0;
+
+ cJSON *replica = cJSON_GetObjectItem(root, "replica");
+ if (!replica || replica->type != cJSON_Number) {
+ printf("vgId:%d, failed to read vnode cfg, replica not found\n", pVnode->vgId);
+ goto PARSE_OVER;
+ }
+ pVnode->syncCfg.replica = (int8_t)replica->valueint;
+
+ cJSON *quorum = cJSON_GetObjectItem(root, "quorum");
+ if (!quorum || quorum->type != cJSON_Number) {
+ printf("vgId: %d, failed to read vnode cfg, quorum not found\n", pVnode->vgId);
+ goto PARSE_OVER;
+ }
+ pVnode->syncCfg.quorum = (int8_t)quorum->valueint;
+
+ cJSON *nodeInfos = cJSON_GetObjectItem(root, "nodeInfos");
+ if (!nodeInfos || nodeInfos->type != cJSON_Array) {
+ printf("vgId:%d, failed to read vnode cfg, nodeInfos not found\n", pVnode->vgId);
+ goto PARSE_OVER;
+ }
+
+ int size = cJSON_GetArraySize(nodeInfos);
+ if (size != pVnode->syncCfg.replica) {
+ printf("vgId:%d, failed to read vnode cfg, nodeInfos size not matched\n", pVnode->vgId);
+ goto PARSE_OVER;
+ }
+
+ for (int i = 0; i < size; ++i) {
+ cJSON *nodeInfo = cJSON_GetArrayItem(nodeInfos, i);
+ if (nodeInfo == NULL) continue;
+
+ cJSON *nodeId = cJSON_GetObjectItem(nodeInfo, "nodeId");
+ if (!nodeId || nodeId->type != cJSON_Number) {
+ printf("vgId:%d, failed to read vnode cfg, nodeId not found\n", pVnode->vgId);
+ goto PARSE_OVER;
+ }
+ pVnode->syncCfg.nodeInfo[i].nodeId = nodeId->valueint;
+
+ cJSON *nodeEp = cJSON_GetObjectItem(nodeInfo, "nodeEp");
+ if (!nodeEp || nodeEp->type != cJSON_String || nodeEp->valuestring == NULL) {
+ printf("vgId:%d, failed to read vnode cfg, nodeFqdn not found\n", pVnode->vgId);
+ goto PARSE_OVER;
+ }
+
+ taosGetFqdnPortFromEp(nodeEp->valuestring, pVnode->syncCfg.nodeInfo[i].nodeFqdn, &pVnode->syncCfg.nodeInfo[i].nodePort);
+ //pVnode->syncCfg.nodeInfo[i].nodePort += TSDB_PORT_SYNC;
+
+
+ SdnodeIfo* pDnodeInfo = getDnodeInfo(pVnode->syncCfg.nodeInfo[i].nodeId);
+ if (NULL == pDnodeInfo) {
+ continue;
+ }
+
+ pVnode->syncCfg.nodeInfo[i].nodePort = pDnodeInfo->port;
+ tstrncpy(pVnode->syncCfg.nodeInfo[i].nodeFqdn, pDnodeInfo->fqdn, TSDB_FQDN_LEN);
+ }
+
+ ret = 0;
+ //printf("read vnode cfg successfully, replcia:%d\n", pVnode->syncCfg.replica);
+ //for (int32_t i = 0; i < pVnode->syncCfg.replica; i++) {
+ // printf("dnode:%d, %s:%d\n", pVnode->syncCfg.nodeInfo[i].nodeId, pVnode->syncCfg.nodeInfo[i].nodeFqdn, pVnode->syncCfg.nodeInfo[i].nodePort);
+ //}
+
+PARSE_OVER:
+ tfree(content);
+ cJSON_Delete(root);
+ if (fp) fclose(fp);
+ return ret;
+}
+
+static void modVnodeCfg(char* vnodeCfg)
+{
+ int32_t ret;
+ SVnodeObj vnodeObj = {0};
+ ret = readVnodeCfg(&vnodeObj, vnodeCfg);
+ if (0 != ret) {
+ printf("read vnode cfg %s fail!\n", vnodeCfg);
+ return ;
+ }
+
+ (void)saveVnodeCfg(&vnodeObj, vnodeCfg);
+
+ return ;
+}
+
+void modAllVnode(char *vnodeDir)
+{
+ DIR *dir = opendir(vnodeDir);
+ if (dir == NULL) return;
+
+ char filename[1024];
+ struct dirent *de = NULL;
+ while ((de = readdir(dir)) != NULL) {
+ if (strcmp(de->d_name, ".") == 0 || strcmp(de->d_name, "..") == 0) continue;
+
+ if ((de->d_type & DT_DIR) && (strncmp(de->d_name, "vnode", 5) == 0)) {
+ memset(filename, 0, 1024);
+ snprintf(filename, 1023, "%s/%s/config.json", vnodeDir, de->d_name);
+ modVnodeCfg(filename);
+ }
+ }
+
+ closedir(dir);
+}
+
diff --git a/src/mnode/inc/mnodeProfile.h b/src/mnode/inc/mnodeProfile.h
index c9f7cc8e2a..e39496ec9c 100644
--- a/src/mnode/inc/mnodeProfile.h
+++ b/src/mnode/inc/mnodeProfile.h
@@ -41,7 +41,7 @@ int32_t mnodeInitProfile();
void mnodeCleanupProfile();
SConnObj *mnodeCreateConn(char *user, uint32_t ip, uint16_t port);
-SConnObj *mnodeAccquireConn(uint32_t connId, char *user, uint32_t ip, uint16_t port);
+SConnObj *mnodeAccquireConn(int32_t connId, char *user, uint32_t ip, uint16_t port);
void mnodeReleaseConn(SConnObj *pConn);
int32_t mnodeSaveQueryStreamList(SConnObj *pConn, SCMHeartBeatMsg *pHBMsg);
diff --git a/src/mnode/src/mnodeProfile.c b/src/mnode/src/mnodeProfile.c
index a1d4be93c6..af4a09a45a 100644
--- a/src/mnode/src/mnodeProfile.c
+++ b/src/mnode/src/mnodeProfile.c
@@ -43,7 +43,7 @@
extern void *tsMnodeTmr;
static SCacheObj *tsMnodeConnCache = NULL;
-static uint32_t tsConnIndex = 0;
+static int32_t tsConnIndex = 0;
static int32_t mnodeGetQueryMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn);
static int32_t mnodeRetrieveQueries(SShowObj *pShow, char *data, int32_t rows, void *pConn);
@@ -68,7 +68,7 @@ int32_t mnodeInitProfile() {
mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_KILL_STREAM, mnodeProcessKillStreamMsg);
mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_KILL_CONN, mnodeProcessKillConnectionMsg);
- tsMnodeConnCache = taosCacheInitWithCb(TSDB_DATA_TYPE_BINARY, CONN_CHECK_TIME,false, mnodeFreeConn);
+ tsMnodeConnCache = taosCacheInit(TSDB_DATA_TYPE_INT, CONN_CHECK_TIME, false, mnodeFreeConn, "conn");
return 0;
}
@@ -89,7 +89,7 @@ SConnObj *mnodeCreateConn(char *user, uint32_t ip, uint16_t port) {
return NULL;
}
- uint32_t connId = atomic_add_fetch_32(&tsConnIndex, 1);
+ int32_t connId = atomic_add_fetch_32(&tsConnIndex, 1);
if (connId == 0) atomic_add_fetch_32(&tsConnIndex, 1);
SConnObj connObj = {
@@ -100,9 +100,7 @@ SConnObj *mnodeCreateConn(char *user, uint32_t ip, uint16_t port) {
};
tstrncpy(connObj.user, user, sizeof(connObj.user));
- char key[10];
- int32_t len = sprintf(key, "%u", connId);
- SConnObj *pConn = taosCachePut(tsMnodeConnCache, key, len, &connObj, sizeof(connObj), CONN_KEEP_TIME);
+ SConnObj *pConn = taosCachePut(tsMnodeConnCache, &connId, sizeof(int32_t), &connObj, sizeof(connObj), CONN_KEEP_TIME);
mDebug("connId:%d, is created, user:%s ip:%s:%u", connId, user, taosIpStr(ip), port);
return pConn;
@@ -113,12 +111,9 @@ void mnodeReleaseConn(SConnObj *pConn) {
taosCacheRelease(tsMnodeConnCache, (void **)&pConn, false);
}
-SConnObj *mnodeAccquireConn(uint32_t connId, char *user, uint32_t ip, uint16_t port) {
- char key[10];
- int32_t len = sprintf(key, "%u", connId);
+SConnObj *mnodeAccquireConn(int32_t connId, char *user, uint32_t ip, uint16_t port) {
uint64_t expireTime = CONN_KEEP_TIME * 1000 + (uint64_t)taosGetTimestampMs();
-
- SConnObj *pConn = taosCacheUpdateExpireTimeByName(tsMnodeConnCache, key, len, expireTime);
+ SConnObj *pConn = taosCacheUpdateExpireTimeByName(tsMnodeConnCache, &connId, sizeof(int32_t), expireTime);
if (pConn == NULL) {
mError("connId:%d, is already destroyed, user:%s ip:%s:%u", connId, user, taosIpStr(ip), port);
return NULL;
@@ -547,7 +542,8 @@ static int32_t mnodeProcessKillQueryMsg(SMnodeMsg *pMsg) {
int32_t queryId = (int32_t)strtol(queryIdStr, NULL, 10);
- SConnObj *pConn = taosCacheAcquireByKey(tsMnodeConnCache, connIdStr, strlen(connIdStr));
+ int32_t connId = atoi(connIdStr);
+ SConnObj *pConn = taosCacheAcquireByKey(tsMnodeConnCache, &connId, sizeof(int32_t));
if (pConn == NULL) {
mError("connId:%s, failed to kill queryId:%d, conn not exist", connIdStr, queryId);
return TSDB_CODE_MND_INVALID_CONN_ID;
@@ -576,8 +572,9 @@ static int32_t mnodeProcessKillStreamMsg(SMnodeMsg *pMsg) {
}
int32_t streamId = (int32_t)strtol(streamIdStr, NULL, 10);
+ int32_t connId = atoi(connIdStr);
- SConnObj *pConn = taosCacheAcquireByKey(tsMnodeConnCache, connIdStr, strlen(connIdStr));
+ SConnObj *pConn = taosCacheAcquireByKey(tsMnodeConnCache, &connId, sizeof(int32_t));
if (pConn == NULL) {
mError("connId:%s, failed to kill streamId:%d, conn not exist", connIdStr, streamId);
return TSDB_CODE_MND_INVALID_CONN_ID;
@@ -594,7 +591,8 @@ static int32_t mnodeProcessKillConnectionMsg(SMnodeMsg *pMsg) {
if (strcmp(pUser->user, TSDB_DEFAULT_USER) != 0) return TSDB_CODE_MND_NO_RIGHTS;
SCMKillConnMsg *pKill = pMsg->rpcMsg.pCont;
- SConnObj * pConn = taosCacheAcquireByKey(tsMnodeConnCache, pKill->queryId, strlen(pKill->queryId));
+ int32_t connId = atoi(pKill->queryId);
+ SConnObj * pConn = taosCacheAcquireByKey(tsMnodeConnCache, &connId, sizeof(int32_t));
if (pConn == NULL) {
mError("connId:%s, failed to kill, conn not exist", pKill->queryId);
return TSDB_CODE_MND_INVALID_CONN_ID;
diff --git a/src/mnode/src/mnodeShow.c b/src/mnode/src/mnodeShow.c
index 12b434a513..97ffe83914 100644
--- a/src/mnode/src/mnodeShow.c
+++ b/src/mnode/src/mnodeShow.c
@@ -65,7 +65,7 @@ int32_t mnodeInitShow() {
mnodeAddReadMsgHandle(TSDB_MSG_TYPE_CM_CONNECT, mnodeProcessConnectMsg);
mnodeAddReadMsgHandle(TSDB_MSG_TYPE_CM_USE_DB, mnodeProcessUseMsg);
- tsMnodeShowCache = taosCacheInitWithCb(TSDB_DATA_TYPE_BINARY, 5, false, mnodeFreeShowObj);
+ tsMnodeShowCache = taosCacheInit(TSDB_DATA_TYPE_INT, 5, false, mnodeFreeShowObj, "show");
return 0;
}
@@ -364,10 +364,7 @@ static bool mnodeCheckShowFinished(SShowObj *pShow) {
}
static bool mnodeAccquireShowObj(SShowObj *pShow) {
- char key[10];
- int32_t len = sprintf(key, "%d", pShow->index);
-
- SShowObj *pSaved = taosCacheAcquireByKey(tsMnodeShowCache, key, len);
+ SShowObj *pSaved = taosCacheAcquireByKey(tsMnodeShowCache, &pShow->index, sizeof(int32_t));
if (pSaved == pShow) {
mDebug("%p, show is accquired from cache", pShow);
return true;
@@ -378,14 +375,11 @@ static bool mnodeAccquireShowObj(SShowObj *pShow) {
static void *mnodePutShowObj(SShowObj *pShow, int32_t size) {
if (tsMnodeShowCache != NULL) {
- char key[10];
pShow->index = atomic_add_fetch_32(&tsShowObjIndex, 1);
- int32_t len = sprintf(key, "%d", pShow->index);
-
- SShowObj *newQhandle = taosCachePut(tsMnodeShowCache, key, len, pShow, size, 6);
+ SShowObj *newQhandle = taosCachePut(tsMnodeShowCache, &pShow->index, sizeof(int32_t), pShow, size, 6);
+ mDebug("%p, show is put into cache, index:%d", newQhandle, pShow->index);
free(pShow);
- mDebug("%p, show is put into cache, index:%s", newQhandle, key);
return newQhandle;
}
diff --git a/src/plugins/http/src/httpContext.c b/src/plugins/http/src/httpContext.c
index 46e7fd45fa..cdaee53c38 100644
--- a/src/plugins/http/src/httpContext.c
+++ b/src/plugins/http/src/httpContext.c
@@ -53,12 +53,12 @@ static void httpDestroyContext(void *data) {
httpFreeJsonBuf(pContext);
httpFreeMultiCmds(pContext);
- httpDebug("context:%p, is destroyed, refCount:%d", pContext, pContext->refCount);
+ httpDebug("context:%p, is destroyed, refCount:%d data:%p", pContext, pContext->refCount, data);
tfree(pContext);
}
bool httpInitContexts() {
- tsHttpServer.contextCache = taosCacheInitWithCb(TSDB_DATA_TYPE_BINARY, 2, false, httpDestroyContext);
+ tsHttpServer.contextCache = taosCacheInit(TSDB_DATA_TYPE_BIGINT, 2, false, httpDestroyContext, "restc");
if (tsHttpServer.contextCache == NULL) {
httpError("failed to init context cache");
return false;
@@ -103,17 +103,14 @@ HttpContext *httpCreateContext(int32_t fd) {
HttpContext *pContext = calloc(1, sizeof(HttpContext));
if (pContext == NULL) return NULL;
- char contextStr[16] = {0};
- int32_t keySize = snprintf(contextStr, sizeof(contextStr), "%p", pContext);
-
pContext->fd = fd;
pContext->httpVersion = HTTP_VERSION_10;
pContext->lastAccessTime = taosGetTimestampSec();
pContext->state = HTTP_CONTEXT_STATE_READY;
-
- HttpContext **ppContext = taosCachePut(tsHttpServer.contextCache, contextStr, keySize, &pContext, sizeof(HttpContext *), 3);
+
+ HttpContext **ppContext = taosCachePut(tsHttpServer.contextCache, &pContext, sizeof(void *), &pContext, sizeof(void *), 3);
pContext->ppContext = ppContext;
- httpDebug("context:%p, fd:%d, is created, item:%p", pContext, fd, ppContext);
+ httpDebug("context:%p, fd:%d, is created, data:%p", pContext, fd, ppContext);
// set the ref to 0
taosCacheRelease(tsHttpServer.contextCache, (void**)&ppContext, false);
@@ -122,16 +119,13 @@ HttpContext *httpCreateContext(int32_t fd) {
}
HttpContext *httpGetContext(void *ptr) {
- char contextStr[16] = {0};
- int32_t len = snprintf(contextStr, sizeof(contextStr), "%p", ptr);
-
- HttpContext **ppContext = taosCacheAcquireByKey(tsHttpServer.contextCache, contextStr, len);
-
+ HttpContext **ppContext = taosCacheAcquireByKey(tsHttpServer.contextCache, &ptr, sizeof(HttpContext *));
+
if (ppContext) {
HttpContext *pContext = *ppContext;
if (pContext) {
int32_t refCount = atomic_add_fetch_32(&pContext->refCount, 1);
- httpDebug("context:%p, fd:%d, is accquired, refCount:%d", pContext, pContext->fd, refCount);
+ httpDebug("context:%p, fd:%d, is accquired, data:%p refCount:%d", pContext, pContext->fd, ppContext, refCount);
return pContext;
}
}
@@ -141,9 +135,10 @@ HttpContext *httpGetContext(void *ptr) {
void httpReleaseContext(HttpContext *pContext) {
int32_t refCount = atomic_sub_fetch_32(&pContext->refCount, 1);
assert(refCount >= 0);
- httpDebug("context:%p, is releasd, refCount:%d", pContext, refCount);
HttpContext **ppContext = pContext->ppContext;
+ httpDebug("context:%p, is releasd, data:%p refCount:%d", pContext, ppContext, refCount);
+
if (tsHttpServer.contextCache != NULL) {
taosCacheRelease(tsHttpServer.contextCache, (void **)(&ppContext), false);
} else {
diff --git a/src/plugins/http/src/httpServer.c b/src/plugins/http/src/httpServer.c
index 6c82386d81..a5009c2347 100644
--- a/src/plugins/http/src/httpServer.c
+++ b/src/plugins/http/src/httpServer.c
@@ -85,6 +85,7 @@ bool httpReadDataImp(HttpContext *pContext) {
} else {
httpError("context:%p, fd:%d, ip:%s, read from socket error:%d, close connect",
pContext, pContext->fd, pContext->ipstr, errno);
+ httpReleaseContext(pContext);
return false;
}
} else {
@@ -153,6 +154,7 @@ static bool httpReadData(HttpContext *pContext) {
int ret = httpCheckReadCompleted(pContext);
if (ret == HTTP_CHECK_BODY_CONTINUE) {
//httpDebug("context:%p, fd:%d, ip:%s, not finished yet, wait another event", pContext, pContext->fd, pContext->ipstr);
+ httpReleaseContext(pContext);
return false;
} else if (ret == HTTP_CHECK_BODY_SUCCESS){
httpDebug("context:%p, fd:%d, ip:%s, thread:%s, read size:%d, dataLen:%d",
@@ -161,11 +163,13 @@ static bool httpReadData(HttpContext *pContext) {
return true;
} else {
httpNotifyContextClose(pContext);
+ httpReleaseContext(pContext);
return false;
}
} else {
httpError("context:%p, fd:%d, ip:%s, failed to read http body, close connect", pContext, pContext->fd, pContext->ipstr);
httpNotifyContextClose(pContext);
+ httpReleaseContext(pContext);
return false;
}
}
diff --git a/src/plugins/http/src/httpSession.c b/src/plugins/http/src/httpSession.c
index 3a901167d5..256b0c9549 100644
--- a/src/plugins/http/src/httpSession.c
+++ b/src/plugins/http/src/httpSession.c
@@ -115,7 +115,7 @@ void httpCleanUpSessions() {
}
bool httpInitSessions() {
- tsHttpServer.sessionCache = taosCacheInitWithCb(TSDB_DATA_TYPE_BINARY, 5, false, httpDestroySession);
+ tsHttpServer.sessionCache = taosCacheInit(TSDB_DATA_TYPE_BINARY, 5, false, httpDestroySession, "rests");
if (tsHttpServer.sessionCache == NULL) {
httpError("failed to init session cache");
return false;
diff --git a/src/plugins/mqtt/src/mqttSystem.c b/src/plugins/mqtt/src/mqttSystem.c
index 3df62f8bf4..2687106124 100644
--- a/src/plugins/mqtt/src/mqttSystem.c
+++ b/src/plugins/mqtt/src/mqttSystem.c
@@ -111,7 +111,7 @@ void mqttStopSystem() {
}
void mqttCleanUpSystem() {
- mqttInfo("starting to clean up mqtt");
+ mqttInfo("starting to cleanup mqtt");
free(recntStatus.user_name);
free(recntStatus.password);
free(recntStatus.hostname);
diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h
index d7d59230b6..3aa1b60be5 100644
--- a/src/query/inc/qExecutor.h
+++ b/src/query/inc/qExecutor.h
@@ -95,16 +95,13 @@ typedef struct SSingleColumnFilterInfo {
} SSingleColumnFilterInfo;
typedef struct STableQueryInfo { // todo merge with the STableQueryInfo struct
- int32_t tableIndex;
- int32_t groupIndex; // group id in table list
TSKEY lastKey;
- int32_t numOfRes;
+ int32_t groupIndex; // group id in table list
int16_t queryRangeSet; // denote if the query range is set, only available for interval query
int64_t tag;
STimeWindow win;
STSCursor cur;
- void* pTable; // for retrieve the page id list
-
+ void* pTable; // for retrieve the page id list
SWindowResInfo windowResInfo;
} STableQueryInfo;
@@ -127,11 +124,6 @@ typedef struct SQueryCostInfo {
uint64_t computTime;
} SQueryCostInfo;
-//typedef struct SGroupItem {
-// void *pTable;
-// STableQueryInfo *info;
-//} SGroupItem;
-
typedef struct SQuery {
int16_t numOfCols;
int16_t numOfTags;
@@ -173,12 +165,12 @@ typedef struct SQueryRuntimeEnv {
STSBuf* pTSBuf;
STSCursor cur;
SQueryCostInfo summary;
- bool stableQuery; // super table query or not
void* pQueryHandle;
void* pSecQueryHandle; // another thread for
- SDiskbasedResultBuf* pResultBuf; // query result buffer based on blocked-wised disk file
+ bool stableQuery; // super table query or not
bool topBotQuery; // false
int32_t prevGroupId; // previous executed group id
+ SDiskbasedResultBuf* pResultBuf; // query result buffer based on blocked-wised disk file
} SQueryRuntimeEnv;
typedef struct SQInfo {
@@ -205,7 +197,8 @@ typedef struct SQInfo {
*/
int32_t tableIndex;
int32_t numOfGroupResultPages;
- _qinfo_free_fn_t fn;
+ _qinfo_free_fn_t freeFn;
+ jmp_buf env;
} SQInfo;
#endif // TDENGINE_QUERYEXECUTOR_H
diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c
index f33d739ba1..1882aa1850 100644
--- a/src/query/src/qExecutor.c
+++ b/src/query/src/qExecutor.c
@@ -13,7 +13,10 @@
* along with this program. If not, see .
*/
#include "os.h"
+#include "tcache.h"
+#include "tglobal.h"
#include "qfill.h"
+#include "taosmsg.h"
#include "hash.h"
#include "qExecutor.h"
@@ -22,9 +25,8 @@
#include "qresultBuf.h"
#include "query.h"
#include "queryLog.h"
-#include "taosmsg.h"
#include "tlosertree.h"
-#include "tscUtil.h" // todo move the function to common module
+#include "exception.h"
#include "tscompression.h"
#include "ttime.h"
@@ -87,6 +89,19 @@ typedef struct {
STSCursor cur;
} SQueryStatusInfo;
+#if 0
+static UNUSED_FUNC void *u_malloc (size_t __size) {
+ uint32_t v = rand();
+ if (v % 5 <= 1) {
+ return NULL;
+ } else {
+ return malloc(__size);
+ }
+}
+
+#define malloc u_malloc
+#endif
+
#define CLEAR_QUERY_STATUS(q, st) ((q)->status &= (~(st)))
#define GET_NUM_OF_TABLEGROUP(q) taosArrayGetSize((q)->tableqinfoGroupInfo.pGroupList)
#define GET_TABLEGROUP(q, _index) ((SArray*) taosArrayGetP((q)->tableqinfoGroupInfo.pGroupList, (_index)))
@@ -1509,7 +1524,6 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) {
}
static bool isQueryKilled(SQInfo *pQInfo) {
- return false;
return (pQInfo->code == TSDB_CODE_TSC_QUERY_CANCELLED);
}
@@ -2586,7 +2600,6 @@ void copyResToQueryResultBuf(SQInfo *pQInfo, SQuery *pQuery) {
}
int64_t getNumOfResultWindowRes(SQuery *pQuery, SWindowResult *pWindowRes) {
-// int64_t maxOutput = 0;
for (int32_t j = 0; j < pQuery->numOfOutput; ++j) {
int32_t functionId = pQuery->pSelectExpr[j].base.functionId;
@@ -2604,15 +2617,6 @@ int64_t getNumOfResultWindowRes(SQuery *pQuery, SWindowResult *pWindowRes) {
if (pResultInfo->numOfRes > 0) {
return pResultInfo->numOfRes;
}
-// if (pResultInfo != NULL && maxOutput < pResultInfo->numOfRes) {
-// maxOutput = pResultInfo->numOfRes;
-//
-// if (maxOutput > 0) {
-// break;
-// }
-// }
-//
-// assert(pResultInfo != NULL);
}
return 0;
@@ -2623,12 +2627,19 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) {
SQuery * pQuery = pRuntimeEnv->pQuery;
size_t size = taosArrayGetSize(pGroup);
-
tFilePage **buffer = pQuery->sdata;
- int32_t * posList = calloc(size, sizeof(int32_t));
+ int32_t* posList = calloc(size, sizeof(int32_t));
STableQueryInfo **pTableList = malloc(POINTER_BYTES * size);
+ if (pTableList == NULL || posList == NULL) {
+ tfree(posList);
+ tfree(pTableList);
+
+ qError("QInfo:%p failed alloc memory", pQInfo);
+ longjmp(pQInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ }
+
// todo opt for the case of one table per group
int32_t numOfTables = 0;
for (int32_t i = 0; i < size; ++i) {
@@ -4069,7 +4080,7 @@ static SFillColInfo* taosCreateFillColInfo(SQuery* pQuery) {
return pFillCol;
}
-int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bool isSTableQuery, void* freeParam, _qinfo_free_fn_t fn) {
+int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bool isSTableQuery) {
int32_t code = TSDB_CODE_SUCCESS;
SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv;
@@ -4083,8 +4094,6 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo
pQInfo->tsdb = tsdb;
pQInfo->vgId = vgId;
- pQInfo->param = freeParam;
- pQInfo->fn = fn;
pRuntimeEnv->pQuery = pQuery;
pRuntimeEnv->pTSBuf = pTsBuf;
@@ -4333,7 +4342,9 @@ static void sequentialTableProcess(SQInfo *pQInfo) {
taosArrayDestroy(s);
// here we simply set the first table as current table
- pQuery->current = (STableQueryInfo*) GET_TABLEGROUP(pQInfo, 0);
+ SArray* first = GET_TABLEGROUP(pQInfo, pQInfo->groupIndex);
+ pQuery->current = taosArrayGetP(first, 0);
+
scanOneTableDataBlocks(pRuntimeEnv, pQuery->current->lastKey);
int64_t numOfRes = getNumOfResult(pRuntimeEnv);
@@ -4932,14 +4943,6 @@ static void tableQueryImpl(SQInfo *pQInfo) {
// record the total elapsed time
pRuntimeEnv->summary.elapsedTime += (taosGetTimestampUs() - st);
assert(pQInfo->tableqinfoGroupInfo.numOfTables == 1);
-
- /* check if query is killed or not */
- if (isQueryKilled(pQInfo)) {
- qDebug("QInfo:%p query is killed", pQInfo);
- } else {
- qDebug("QInfo:%p query paused, %" PRId64 " rows returned, numOfTotal:%" PRId64 " rows",
- pQInfo, pQuery->rec.rows, pQuery->rec.total + pQuery->rec.rows);
- }
}
static void stableQueryImpl(SQInfo *pQInfo) {
@@ -4961,10 +4964,6 @@ static void stableQueryImpl(SQInfo *pQInfo) {
// record the total elapsed time
pQInfo->runtimeEnv.summary.elapsedTime += (taosGetTimestampUs() - st);
-
- if (pQuery->rec.rows == 0) {
- qDebug("QInfo:%p over, %zu tables queried, %"PRId64" rows are returned", pQInfo, pQInfo->tableqinfoGroupInfo.numOfTables, pQuery->rec.total);
- }
}
static int32_t getColumnIndexInSource(SQueryTableMsg *pQueryMsg, SSqlFuncMsg *pExprMsg, SColumnInfo* pTagCols) {
@@ -5076,6 +5075,8 @@ static char *createTableIdList(SQueryTableMsg *pQueryMsg, char *pMsg, SArray **p
*/
static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList, SSqlFuncMsg ***pExpr,
char **tagCond, char** tbnameCond, SColIndex **groupbyCols, SColumnInfo** tagCols) {
+ int32_t code = TSDB_CODE_SUCCESS;
+
pQueryMsg->numOfTables = htonl(pQueryMsg->numOfTables);
pQueryMsg->window.skey = htobe64(pQueryMsg->window.skey);
@@ -5102,7 +5103,8 @@ static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList,
// query msg safety check
if (!validateQueryMsg(pQueryMsg)) {
- return TSDB_CODE_QRY_INVALID_MSG;
+ code = TSDB_CODE_QRY_INVALID_MSG;
+ goto _cleanup;
}
char *pMsg = (char *)(pQueryMsg->colList) + sizeof(SColumnInfo) * pQueryMsg->numOfCols;
@@ -5174,7 +5176,8 @@ static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList,
int16_t functionId = pExprMsg->functionId;
if (functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TAGPRJ || functionId == TSDB_FUNC_TAG_DUMMY) {
if (pExprMsg->colInfo.flag != TSDB_COL_TAG) { // ignore the column index check for arithmetic expression.
- return TSDB_CODE_QRY_INVALID_MSG;
+ code = TSDB_CODE_QRY_INVALID_MSG;
+ goto _cleanup;
}
} else {
// if (!validateExprColumnInfo(pQueryMsg, pExprMsg)) {
@@ -5186,6 +5189,7 @@ static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList,
}
if (!validateQuerySourceCols(pQueryMsg, *pExpr)) {
+ code = TSDB_CODE_QRY_INVALID_MSG;
goto _cleanup;
}
@@ -5193,6 +5197,10 @@ static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList,
if (pQueryMsg->numOfGroupCols > 0) { // group by tag columns
*groupbyCols = malloc(pQueryMsg->numOfGroupCols * sizeof(SColIndex));
+ if (*groupbyCols == NULL) {
+ code = TSDB_CODE_QRY_OUT_OF_MEMORY;
+ goto _cleanup;
+ }
for (int32_t i = 0; i < pQueryMsg->numOfGroupCols; ++i) {
(*groupbyCols)[i].colId = *(int16_t *)pMsg;
@@ -5248,7 +5256,13 @@ static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList,
if (*pMsg != 0) {
size_t len = strlen(pMsg) + 1;
+
*tbnameCond = malloc(len);
+ if (*tbnameCond == NULL) {
+ code = TSDB_CODE_QRY_OUT_OF_MEMORY;
+ goto _cleanup;
+ }
+
strcpy(*tbnameCond, pMsg);
pMsg += len;
}
@@ -5258,7 +5272,8 @@ static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList,
pQueryMsg, pQueryMsg->numOfTables, pQueryMsg->queryType, pQueryMsg->window.skey, pQueryMsg->window.ekey, pQueryMsg->numOfGroupCols,
pQueryMsg->order, pQueryMsg->numOfOutput, pQueryMsg->numOfCols, pQueryMsg->intervalTime,
pQueryMsg->fillType, pQueryMsg->tsLen, pQueryMsg->tsNumOfBlocks, pQueryMsg->limit, pQueryMsg->offset);
- return 0;
+
+ return TSDB_CODE_SUCCESS;
_cleanup:
tfree(*pExpr);
@@ -5268,7 +5283,8 @@ _cleanup:
tfree(*groupbyCols);
tfree(*tagCols);
tfree(*tagCond);
- return TSDB_CODE_QRY_INVALID_MSG;
+
+ return code;
}
static int32_t buildAirthmeticExprFromMsg(SExprInfo *pArithExprInfo, SQueryTableMsg *pQueryMsg) {
@@ -5656,7 +5672,6 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList,
STableQueryInfo* item = createTableQueryInfo(&pQInfo->runtimeEnv, pTable, window);
item->groupIndex = i;
- item->tableIndex = tableIndex++;
taosArrayPush(p1, &item);
taosHashPut(pQInfo->tableqinfoGroupInfo.map, &id.tid, sizeof(id.tid), &item, POINTER_BYTES);
}
@@ -5670,7 +5685,8 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList,
pQuery->window = pQueryMsg->window;
if (sem_init(&pQInfo->dataReady, 0, 0) != 0) {
- qError("QInfo:%p init dataReady sem failed, reason:%s", pQInfo, strerror(errno));
+ int32_t code = TAOS_SYSTEM_ERROR(errno);
+ qError("QInfo:%p init dataReady sem failed, reason:%s", pQInfo, tstrerror(code));
goto _cleanup;
}
@@ -5681,7 +5697,6 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList,
_cleanup:
freeQInfo(pQInfo);
-
return NULL;
}
@@ -5723,6 +5738,9 @@ static int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQ
return TSDB_CODE_SUCCESS;
}
+ pQInfo->param = param;
+ pQInfo->freeFn = fn;
+
if (pQInfo->tableqinfoGroupInfo.numOfTables == 0) {
qDebug("QInfo:%p no table qualified for tag filter, abort query", pQInfo);
setQueryStatus(pQuery, QUERY_COMPLETED);
@@ -5732,7 +5750,7 @@ static int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQ
}
// filter the qualified
- if ((code = doInitQInfo(pQInfo, pTSBuf, tsdb, vgId, isSTable, param, fn)) != TSDB_CODE_SUCCESS) {
+ if ((code = doInitQInfo(pQInfo, pTSBuf, tsdb, vgId, isSTable)) != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -5786,7 +5804,7 @@ static void freeQInfo(SQInfo *pQInfo) {
// todo refactor, extract method to destroytableDataInfo
int32_t numOfGroups = GET_NUM_OF_TABLEGROUP(pQInfo);
for (int32_t i = 0; i < numOfGroups; ++i) {
- SArray *p = GET_TABLEGROUP(pQInfo, i);;
+ SArray *p = GET_TABLEGROUP(pQInfo, i);
size_t num = taosArrayGetSize(p);
for(int32_t j = 0; j < num; ++j) {
@@ -5895,9 +5913,16 @@ static int32_t doDumpQueryResult(SQInfo *pQInfo, char *data) {
return TSDB_CODE_SUCCESS;
}
+typedef struct SQueryMgmt {
+ SCacheObj *qinfoPool; // query handle pool
+ int32_t vgId;
+ bool closed;
+ pthread_mutex_t lock;
+} SQueryMgmt;
+
int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, void* param, _qinfo_free_fn_t fn,
qinfo_t* pQInfo) {
- assert(pQueryMsg != NULL);
+ assert(pQueryMsg != NULL && tsdb != NULL);
int32_t code = TSDB_CODE_SUCCESS;
@@ -6032,19 +6057,19 @@ void qDestroyQueryInfo(qinfo_t qHandle) {
qDebug("QInfo:%p dec refCount, value:%d", pQInfo, ref);
if (ref == 0) {
- _qinfo_free_fn_t fn = pQInfo->fn;
+ _qinfo_free_fn_t freeFp = pQInfo->freeFn;
void* param = pQInfo->param;
doDestoryQueryInfo(pQInfo);
- if (fn != NULL) {
+ if (freeFp != NULL) {
assert(param != NULL);
- fn(param);
+ freeFp(param);
}
}
}
-void qTableQuery(qinfo_t qinfo, void (*fp)(void*), void* param) {
+void qTableQuery(qinfo_t qinfo) {
SQInfo *pQInfo = (SQInfo *)qinfo;
if (pQInfo == NULL || pQInfo->signature != pQInfo) {
@@ -6054,17 +6079,34 @@ void qTableQuery(qinfo_t qinfo, void (*fp)(void*), void* param) {
if (isQueryKilled(pQInfo)) {
qDebug("QInfo:%p it is already killed, abort", pQInfo);
+
+ sem_post(&pQInfo->dataReady);
qDestroyQueryInfo(pQInfo);
return;
}
if (pQInfo->tableqinfoGroupInfo.numOfTables == 0) {
qDebug("QInfo:%p no table exists for query, abort", pQInfo);
+
+ sem_post(&pQInfo->dataReady);
+ qDestroyQueryInfo(pQInfo);
+ return;
+ }
+
+ int32_t ret = setjmp(pQInfo->env);
+ // error occurs, record the error code and return to client
+ if (ret != TSDB_CODE_SUCCESS) {
+ pQInfo->code = ret;
+ qDebug("QInfo:%p query abort due to error occurs, code:%s", pQInfo, tstrerror(pQInfo->code));
+ sem_post(&pQInfo->dataReady);
+ qDestroyQueryInfo(pQInfo);
+
return;
}
qDebug("QInfo:%p query task is launched", pQInfo);
+ SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv;
if (onlyQueryTags(pQInfo->runtimeEnv.pQuery)) {
assert(pQInfo->runtimeEnv.pQueryHandle == NULL);
buildTagQueryResult(pQInfo); // todo support the limit/offset
@@ -6074,6 +6116,16 @@ void qTableQuery(qinfo_t qinfo, void (*fp)(void*), void* param) {
tableQueryImpl(pQInfo);
}
+ SQuery* pQuery = pRuntimeEnv->pQuery;
+ if (isQueryKilled(pQInfo)) {
+ qDebug("QInfo:%p query is killed", pQInfo);
+ } else if (pQuery->rec.rows == 0) {
+ qDebug("QInfo:%p over, %zu tables queried, %"PRId64" rows are returned", pQInfo, pQInfo->tableqinfoGroupInfo.numOfTables, pQuery->rec.total);
+ } else {
+ qDebug("QInfo:%p query paused, %" PRId64 " rows returned, numOfTotal:%" PRId64 " rows",
+ pQInfo, pQuery->rec.rows, pQuery->rec.total + pQuery->rec.rows);
+ }
+
sem_post(&pQInfo->dataReady);
qDestroyQueryInfo(pQInfo);
}
@@ -6314,3 +6366,112 @@ static void buildTagQueryResult(SQInfo* pQInfo) {
setQueryStatus(pQuery, QUERY_COMPLETED);
}
+void freeqinfoFn(void *qhandle) {
+ void** handle = qhandle;
+ if (handle == NULL || *handle == NULL) {
+ return;
+ }
+
+ qKillQuery(*handle);
+}
+
+void* qOpenQueryMgmt(int32_t vgId) {
+ const int32_t REFRESH_HANDLE_INTERVAL = 2; // every 2 seconds, refresh handle pool
+
+ char cacheName[128] = {0};
+ sprintf(cacheName, "qhandle_%d", vgId);
+
+ SQueryMgmt* pQueryHandle = calloc(1, sizeof(SQueryMgmt));
+
+ pQueryHandle->qinfoPool = taosCacheInit(TSDB_DATA_TYPE_BIGINT, REFRESH_HANDLE_INTERVAL, true, freeqinfoFn, cacheName);
+ pQueryHandle->closed = false;
+ pthread_mutex_init(&pQueryHandle->lock, NULL);
+
+ qDebug("vgId:%d, open querymgmt success", vgId);
+ return pQueryHandle;
+}
+
+void qSetQueryMgmtClosed(void* pQMgmt) {
+ if (pQMgmt == NULL) {
+ return;
+ }
+
+ SQueryMgmt* pQueryMgmt = pQMgmt;
+ qDebug("vgId:%d, set querymgmt closed, wait for all queries cancelled", pQueryMgmt->vgId);
+
+ pthread_mutex_lock(&pQueryMgmt->lock);
+ pQueryMgmt->closed = true;
+ pthread_mutex_unlock(&pQueryMgmt->lock);
+
+ taosCacheEmpty(pQueryMgmt->qinfoPool, true);
+}
+
+void qCleanupQueryMgmt(void* pQMgmt) {
+ if (pQMgmt == NULL) {
+ return;
+ }
+
+ SQueryMgmt* pQueryMgmt = pQMgmt;
+ int32_t vgId = pQueryMgmt->vgId;
+
+ assert(pQueryMgmt->closed);
+
+ SCacheObj* pqinfoPool = pQueryMgmt->qinfoPool;
+ pQueryMgmt->qinfoPool = NULL;
+
+ taosCacheCleanup(pqinfoPool);
+ pthread_mutex_destroy(&pQueryMgmt->lock);
+ tfree(pQueryMgmt);
+
+ qDebug("vgId:%d querymgmt cleanup completed", vgId);
+}
+
+void** qRegisterQInfo(void* pMgmt, void* qInfo) {
+ if (pMgmt == NULL) {
+ return NULL;
+ }
+
+ SQueryMgmt *pQueryMgmt = pMgmt;
+ if (pQueryMgmt->qinfoPool == NULL) {
+ return NULL;
+ }
+
+ pthread_mutex_lock(&pQueryMgmt->lock);
+ if (pQueryMgmt->closed) {
+ pthread_mutex_unlock(&pQueryMgmt->lock);
+
+ return NULL;
+ } else {
+ void** handle = taosCachePut(pQueryMgmt->qinfoPool, qInfo, POINTER_BYTES, &qInfo, POINTER_BYTES, tsShellActivityTimer*2);
+ pthread_mutex_unlock(&pQueryMgmt->lock);
+
+ return handle;
+ }
+}
+
+void** qAcquireQInfo(void* pMgmt, void** key) {
+ SQueryMgmt *pQueryMgmt = pMgmt;
+
+ if (pQueryMgmt->qinfoPool == NULL || pQueryMgmt->closed) {
+ return NULL;
+ }
+
+ void** handle = taosCacheAcquireByKey(pQueryMgmt->qinfoPool, key, POINTER_BYTES);
+ if (handle == NULL || *handle == NULL) {
+ return NULL;
+ } else {
+ return handle;
+ }
+}
+
+void** qReleaseQInfo(void* pMgmt, void* pQInfo, bool needFree) {
+ SQueryMgmt *pQueryMgmt = pMgmt;
+
+ if (pQueryMgmt->qinfoPool == NULL) {
+ return NULL;
+ }
+
+ taosCacheRelease(pQueryMgmt->qinfoPool, pQInfo, needFree);
+ return 0;
+}
+
diff --git a/src/query/src/qast.c b/src/query/src/qast.c
index dc3b1499bb..721cd8ae5a 100644
--- a/src/query/src/qast.c
+++ b/src/query/src/qast.c
@@ -1173,9 +1173,7 @@ tExprNode* exprTreeFromTableName(const char* tbnameCond) {
size_t len = strlen(cond) + VARSTR_HEADER_SIZE;
char* p = exception_malloc(len);
- varDataSetLen(p, len - VARSTR_HEADER_SIZE);
- memcpy(varDataVal(p), cond, len);
-
+ STR_WITH_SIZE_TO_VARSTR(p, cond, len - VARSTR_HEADER_SIZE);
taosArrayPush(pVal->arr, &p);
}
diff --git a/src/query/src/qparserImpl.c b/src/query/src/qparserImpl.c
index 928b9eb873..d4ac540d2f 100644
--- a/src/query/src/qparserImpl.c
+++ b/src/query/src/qparserImpl.c
@@ -15,16 +15,16 @@
#include "os.h"
#include "qsqlparser.h"
+#include "queryLog.h"
#include "taosdef.h"
#include "taosmsg.h"
+#include "tcmdtype.h"
#include "tglobal.h"
#include "tstoken.h"
+#include "tstrbuild.h"
#include "ttime.h"
#include "ttokendef.h"
#include "tutil.h"
-#include "qsqltype.h"
-#include "tstrbuild.h"
-#include "queryLog.h"
SSqlInfo qSQLParse(const char *pStr) {
void *pParser = ParseAlloc(malloc);
diff --git a/src/query/src/sql.c b/src/query/src/sql.c
index e75802a98f..eafb052593 100644
--- a/src/query/src/sql.c
+++ b/src/query/src/sql.c
@@ -25,17 +25,17 @@
#include
/************ Begin %include sections from the grammar ************************/
+#include
+#include
#include
#include
#include
-#include
-#include
-#include "tutil.h"
#include "qsqlparser.h"
+#include "tcmdtype.h"
#include "tstoken.h"
-#include "tvariant.h"
#include "ttokendef.h"
-#include "qsqltype.h"
+#include "tutil.h"
+#include "tvariant.h"
/**************** End of %include directives **********************************/
/* These constants specify the various numeric values for terminal symbols
** in a format understandable to "makeheaders". This section is blank unless
diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c
index 9c9ac1699a..cbbf51d862 100644
--- a/src/tsdb/src/tsdbMeta.c
+++ b/src/tsdb/src/tsdbMeta.c
@@ -255,17 +255,46 @@ _err:
return NULL;
}
+static int32_t colIdCompar(const void* left, const void* right) {
+ int16_t colId = *(int16_t*) left;
+ STColumn* p2 = (STColumn*) right;
+
+ if (colId == p2->colId) {
+ return 0;
+ }
+
+ return (colId < p2->colId)? -1:1;
+}
+
int tsdbUpdateTagValue(TSDB_REPO_T *repo, SUpdateTableTagValMsg *pMsg) {
STsdbRepo *pRepo = (STsdbRepo *)repo;
STsdbMeta *pMeta = pRepo->tsdbMeta;
- int16_t tversion = htons(pMsg->tversion);
- STable *pTable = tsdbGetTableByUid(pMeta, htobe64(pMsg->uid));
+ pMsg->uid = htobe64(pMsg->uid);
+ pMsg->tid = htonl(pMsg->tid);
+ pMsg->tversion = htons(pMsg->tversion);
+ pMsg->colId = htons(pMsg->colId);
+ pMsg->tagValLen = htonl(pMsg->tagValLen);
+ pMsg->numOfTags = htons(pMsg->numOfTags);
+ pMsg->schemaLen = htonl(pMsg->schemaLen);
+ assert(pMsg->schemaLen == sizeof(STColumn) * pMsg->numOfTags);
+
+ char* d = pMsg->data;
+ for(int32_t i = 0; i < pMsg->numOfTags; ++i) {
+ STColumn* pCol = (STColumn*) d;
+ pCol->colId = htons(pCol->colId);
+ pCol->bytes = htons(pCol->bytes);
+ pCol->offset = 0;
+
+ d += sizeof(STColumn);
+ }
+
+ STable *pTable = tsdbGetTableByUid(pMeta, pMsg->uid);
if (pTable == NULL) {
terrno = TSDB_CODE_TDB_INVALID_TABLE_ID;
return -1;
}
- if (TABLE_TID(pTable) != htonl(pMsg->tid)) {
+ if (TABLE_TID(pTable) != pMsg->tid) {
terrno = TSDB_CODE_TDB_INVALID_TABLE_ID;
return -1;
}
@@ -277,10 +306,10 @@ int tsdbUpdateTagValue(TSDB_REPO_T *repo, SUpdateTableTagValMsg *pMsg) {
return -1;
}
- if (schemaVersion(tsdbGetTableTagSchema(pTable)) < tversion) {
+ if (schemaVersion(tsdbGetTableTagSchema(pTable)) < pMsg->tversion) {
tsdbDebug("vgId:%d server tag version %d is older than client tag version %d, try to config", REPO_ID(pRepo),
- schemaVersion(tsdbGetTableTagSchema(pTable)), tversion);
- void *msg = (*pRepo->appH.configFunc)(pRepo->config.tsdbId, htonl(pMsg->tid));
+ schemaVersion(tsdbGetTableTagSchema(pTable)), pMsg->tversion);
+ void *msg = (*pRepo->appH.configFunc)(pRepo->config.tsdbId, pMsg->tid);
if (msg == NULL) return -1;
// Deal with error her
@@ -299,19 +328,24 @@ int tsdbUpdateTagValue(TSDB_REPO_T *repo, SUpdateTableTagValMsg *pMsg) {
STSchema *pTagSchema = tsdbGetTableTagSchema(pTable);
- if (schemaVersion(pTagSchema) > tversion) {
+ if (schemaVersion(pTagSchema) > pMsg->tversion) {
tsdbError(
"vgId:%d failed to update tag value of table %s since version out of date, client tag version %d server tag "
"version %d",
- REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), tversion, schemaVersion(pTable->tagSchema));
+ REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), pMsg->tversion, schemaVersion(pTable->tagSchema));
return TSDB_CODE_TDB_TAG_VER_OUT_OF_DATE;
}
- if (schemaColAt(pTagSchema, DEFAULT_TAG_INDEX_COLUMN)->colId == htons(pMsg->colId)) {
+ if (schemaColAt(pTagSchema, DEFAULT_TAG_INDEX_COLUMN)->colId == pMsg->colId) {
tsdbRemoveTableFromIndex(pMeta, pTable);
}
// TODO: remove table from index if it is the first column of tag
- tdSetKVRowDataOfCol(&pTable->tagVal, htons(pMsg->colId), htons(pMsg->type), pMsg->data);
- if (schemaColAt(pTagSchema, DEFAULT_TAG_INDEX_COLUMN)->colId == htons(pMsg->colId)) {
+
+ // TODO: convert the tag schema from client, and then extract the type and bytes from schema according to colId
+ STColumn* res = bsearch(&pMsg->colId, pMsg->data, pMsg->numOfTags, sizeof(STColumn), colIdCompar);
+ assert(res != NULL);
+
+ tdSetKVRowDataOfCol(&pTable->tagVal, pMsg->colId, res->type, pMsg->data + pMsg->schemaLen);
+ if (schemaColAt(pTagSchema, DEFAULT_TAG_INDEX_COLUMN)->colId == pMsg->colId) {
tsdbAddTableIntoIndex(pMeta, pTable);
}
return TSDB_CODE_SUCCESS;
diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c
index 19a022e0a7..6a9c8e1ff6 100644
--- a/src/tsdb/src/tsdbRead.c
+++ b/src/tsdb/src/tsdbRead.c
@@ -74,9 +74,6 @@ typedef struct STableCheckInfo {
SDataCols* pDataCols;
int32_t chosen; // indicate which iterator should move forward
bool initBuf; // whether to initialize the in-memory skip list iterator or not
- SMemTable* mem; // in-mem buffer, hold the ref count
- SMemTable* imem; // imem buffer, hold the ref count to avoid release
-
SSkipListIterator* iter; // mem buffer skip list iterator
SSkipListIterator* iiter; // imem buffer skip list iterator
} STableCheckInfo;
@@ -113,6 +110,8 @@ typedef struct STsdbQueryHandle {
SFileGroupIter fileIter;
SRWHelper rhelper;
STableBlockInfo* pDataBlockInfo;
+ SMemTable* mem; // mem-table
+ SMemTable* imem; // imem-table, acquired from snapshot
SDataBlockLoadInfo dataBlockLoadInfo; /* record current block load information */
SLoadCompBlockInfo compBlockLoadInfo; /* record current compblock information in SQuery */
@@ -138,9 +137,6 @@ static void tsdbInitCompBlockLoadInfo(SLoadCompBlockInfo* pCompBlockLoadInfo) {
}
TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STableGroupInfo* groupList, void* qinfo) {
- // todo 1. filter not exist table
- // todo 2. add the reference count for each table that is involved in query
-
STsdbQueryHandle* pQueryHandle = calloc(1, sizeof(STsdbQueryHandle));
pQueryHandle->order = pCond->order;
pQueryHandle->window = pCond->twindow;
@@ -154,6 +150,7 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab
pQueryHandle->outputCapacity = ((STsdbRepo*)tsdb)->config.maxRowsPerFileBlock;
tsdbInitReadHelper(&pQueryHandle->rhelper, (STsdbRepo*) tsdb);
+ tsdbTakeMemSnapshot(pQueryHandle->pTsdb, &pQueryHandle->mem, &pQueryHandle->imem);
size_t sizeOfGroup = taosArrayGetSize(groupList->pGroupList);
assert(sizeOfGroup >= 1 && pCond != NULL && pCond->numOfCols > 0);
@@ -252,22 +249,22 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh
pCheckInfo->initBuf = true;
int32_t order = pHandle->order;
- tsdbTakeMemSnapshot(pHandle->pTsdb, &pCheckInfo->mem, &pCheckInfo->imem);
+// tsdbTakeMemSnapshot(pHandle->pTsdb, &pCheckInfo->mem, &pCheckInfo->imem);
// no data in buffer, abort
- if (pCheckInfo->mem == NULL && pCheckInfo->imem == NULL) {
+ if (pHandle->mem == NULL && pHandle->imem == NULL) {
return false;
}
assert(pCheckInfo->iter == NULL && pCheckInfo->iiter == NULL);
- if (pCheckInfo->mem && pCheckInfo->mem->tData[pCheckInfo->tableId.tid] != NULL) {
- pCheckInfo->iter = tSkipListCreateIterFromVal(pCheckInfo->mem->tData[pCheckInfo->tableId.tid]->pData,
+ if (pHandle->mem && pHandle->mem->tData[pCheckInfo->tableId.tid] != NULL) {
+ pCheckInfo->iter = tSkipListCreateIterFromVal(pHandle->mem->tData[pCheckInfo->tableId.tid]->pData,
(const char*) &pCheckInfo->lastKey, TSDB_DATA_TYPE_TIMESTAMP, order);
}
- if (pCheckInfo->imem && pCheckInfo->imem->tData[pCheckInfo->tableId.tid] != NULL) {
- pCheckInfo->iiter = tSkipListCreateIterFromVal(pCheckInfo->imem->tData[pCheckInfo->tableId.tid]->pData,
+ if (pHandle->imem && pHandle->imem->tData[pCheckInfo->tableId.tid] != NULL) {
+ pCheckInfo->iiter = tSkipListCreateIterFromVal(pHandle->imem->tData[pCheckInfo->tableId.tid]->pData,
(const char*) &pCheckInfo->lastKey, TSDB_DATA_TYPE_TIMESTAMP, order);
}
@@ -685,6 +682,7 @@ static bool loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock
// query ended in current block
if (pQueryHandle->window.ekey < pBlock->keyLast || pCheckInfo->lastKey > pBlock->keyFirst) {
if (!doLoadFileDataBlock(pQueryHandle, pBlock, pCheckInfo)) {
+ taosArrayDestroy(sa);
return false;
}
@@ -1504,6 +1502,7 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) {
pQueryHandle->window = pQueryHandle->cur.win;
pQueryHandle->cur.rows = 1;
pQueryHandle->type = TSDB_QUERY_TYPE_EXTERNAL;
+ taosArrayDestroy(sa);
return true;
} else {
STsdbQueryHandle* pSecQueryHandle = calloc(1, sizeof(STsdbQueryHandle));
@@ -1518,7 +1517,8 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) {
pSecQueryHandle->outputCapacity = ((STsdbRepo*)pSecQueryHandle->pTsdb)->config.maxRowsPerFileBlock;
tsdbInitReadHelper(&pSecQueryHandle->rhelper, (STsdbRepo*) pSecQueryHandle->pTsdb);
-
+ tsdbTakeMemSnapshot(pSecQueryHandle->pTsdb, &pSecQueryHandle->mem, &pSecQueryHandle->imem);
+
// allocate buffer in order to load data blocks from file
int32_t numOfCols = QH_GET_NUM_OF_COLS(pQueryHandle);
@@ -2083,26 +2083,15 @@ bool indexedNodeFilterFp(const void* pNode, void* param) {
STable* pTable = *(STable**)(SL_GET_NODE_DATA((SSkipListNode*)pNode));
char* val = NULL;
- int8_t type = pInfo->sch.type;
if (pInfo->colIndex == TSDB_TBNAME_COLUMN_INDEX) {
val = (char*) pTable->name;
- type = TSDB_DATA_TYPE_BINARY;
} else {
val = tdGetKVRowValOfCol(pTable->tagVal, pInfo->sch.colId);
}
//todo :the val is possible to be null, so check it out carefully
- int32_t ret = 0;
- if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
- if (pInfo->optr == TSDB_RELATION_IN) {
- ret = pInfo->compare(val, pInfo->q);
- } else {
- ret = pInfo->compare(val, pInfo->q);
- }
- } else {
- ret = pInfo->compare(val, pInfo->q);
- }
+ int32_t ret = pInfo->compare(val, pInfo->q);
switch (pInfo->optr) {
case TSDB_RELATION_EQUAL: {
@@ -2271,7 +2260,9 @@ int32_t tsdbGetOneTableGroup(TSDB_REPO_T* tsdb, uint64_t uid, STableGroupInfo* p
}
int32_t tsdbGetTableGroupFromIdList(TSDB_REPO_T* tsdb, SArray* pTableIdList, STableGroupInfo* pGroupInfo) {
- if (tsdbRLockRepoMeta(tsdb) < 0) goto _error;
+ if (tsdbRLockRepoMeta(tsdb) < 0) {
+ return terrno;
+ }
assert(pTableIdList != NULL);
size_t size = taosArrayGetSize(pTableIdList);
@@ -2297,15 +2288,15 @@ int32_t tsdbGetTableGroupFromIdList(TSDB_REPO_T* tsdb, SArray* pTableIdList, STa
taosArrayPush(group, &pTable);
}
- if (tsdbUnlockRepoMeta(tsdb) < 0) goto _error;
+ if (tsdbUnlockRepoMeta(tsdb) < 0) {
+ taosArrayDestroy(group);
+ return terrno;
+ }
pGroupInfo->numOfTables = i;
taosArrayPush(pGroupInfo->pGroupList, &group);
return TSDB_CODE_SUCCESS;
-
- _error:
- return terrno;
}
void tsdbCleanupQueryHandle(TsdbQueryHandleT queryHandle) {
@@ -2319,9 +2310,6 @@ void tsdbCleanupQueryHandle(TsdbQueryHandleT queryHandle) {
STableCheckInfo* pTableCheckInfo = taosArrayGet(pQueryHandle->pTableCheckInfo, i);
tSkipListDestroyIter(pTableCheckInfo->iter);
- tsdbUnRefMemTable(pQueryHandle->pTsdb, pTableCheckInfo->mem);
- tsdbUnRefMemTable(pQueryHandle->pTsdb, pTableCheckInfo->imem);
-
if (pTableCheckInfo->pDataCols != NULL) {
tfree(pTableCheckInfo->pDataCols->buf);
}
@@ -2341,9 +2329,12 @@ void tsdbCleanupQueryHandle(TsdbQueryHandleT queryHandle) {
taosArrayDestroy(pQueryHandle->pColumns);
tfree(pQueryHandle->pDataBlockInfo);
tfree(pQueryHandle->statis);
-
+
+ // todo check error
+ tsdbUnRefMemTable(pQueryHandle->pTsdb, pQueryHandle->mem);
+ tsdbUnRefMemTable(pQueryHandle->pTsdb, pQueryHandle->imem);
+
tsdbDestroyHelper(&pQueryHandle->rhelper);
-
tfree(pQueryHandle);
}
diff --git a/src/util/inc/tcache.h b/src/util/inc/tcache.h
index cd3d0d436f..b026ad4386 100644
--- a/src/util/inc/tcache.h
+++ b/src/util/inc/tcache.h
@@ -65,6 +65,7 @@ typedef struct {
int64_t totalSize; // total allocated buffer in this hash table, SCacheObj is not included.
int64_t refreshTime;
STrashElem * pTrash;
+ char* name;
// void * tmrCtrl;
// void * pTimer;
SCacheStatis statistics;
@@ -90,7 +91,7 @@ typedef struct {
* @param fn free resource callback function
* @return
*/
-SCacheObj *taosCacheInit(int32_t keyType, int64_t refreshTimeInSeconds, bool extendLifespan, __cache_freeres_fn_t fn);
+SCacheObj *taosCacheInit(int32_t keyType, int64_t refreshTimeInSeconds, bool extendLifespan, __cache_freeres_fn_t fn, const char *cacheName);
/**
* initialize the cache object and set the free object callback function
@@ -98,7 +99,7 @@ SCacheObj *taosCacheInit(int32_t keyType, int64_t refreshTimeInSeconds, bool ext
* @param freeCb
* @return
*/
-SCacheObj *taosCacheInitWithCb(int32_t keyType, int64_t refreshTimeInSeconds, bool extendLifespan, __cache_freeres_fn_t fn);
+SCacheObj *taosCacheInitWithCb(int32_t keyType, int64_t refreshTimeInSeconds, bool extendLifespan, __cache_freeres_fn_t fn, const char *cacheName);
/**
* add data into cache
@@ -128,7 +129,7 @@ void *taosCacheAcquireByKey(SCacheObj *pCacheObj, const void *key, size_t keyLen
* @param expireTime new expire time of data
* @return
*/
-void* taosCacheUpdateExpireTimeByName(SCacheObj *pCacheObj, const char *key, size_t keyLen, uint64_t expireTime);
+void* taosCacheUpdateExpireTimeByName(SCacheObj *pCacheObj, void *key, size_t keyLen, uint64_t expireTime);
/**
* Add one reference count for the exist data, and assign this data for a new owner.
@@ -162,8 +163,9 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove);
/**
* move all data node into trash, clear node in trash can if it is not referenced by any clients
* @param handle
+ * @param _remove remove the data or not if refcount is greater than 0
*/
-void taosCacheEmpty(SCacheObj *pCacheObj);
+void taosCacheEmpty(SCacheObj *pCacheObj, bool _remove);
/**
* release all allocated memory and destroy the cache object.
diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c
index d763472a12..d546970868 100644
--- a/src/util/src/tcache.c
+++ b/src/util/src/tcache.c
@@ -118,8 +118,9 @@ static FORCE_INLINE void taosCacheReleaseNode(SCacheObj *pCacheObj, SCacheDataNo
int32_t size = pNode->size;
taosHashRemove(pCacheObj->pHashTable, pNode->key, pNode->keySize);
-
- uDebug("key:%p, is removed from cache,total:%" PRId64 ",size:%dbytes", pNode->key, pCacheObj->totalSize, size);
+
+ uDebug("cache:%s, key:%p, %p is destroyed from cache, totalNum:%d totalSize:%" PRId64 "bytes size:%dbytes",
+ pCacheObj->name, pNode->key, pNode->data, (int32_t)taosHashGetSize(pCacheObj->pHashTable), pCacheObj->totalSize, size);
if (pCacheObj->freeFp) pCacheObj->freeFp(pNode->data);
free(pNode);
}
@@ -224,7 +225,7 @@ static void doCleanupDataCache(SCacheObj *pCacheObj);
*/
static void* taosCacheRefresh(void *handle);
-SCacheObj *taosCacheInitWithCb(int32_t keyType, int64_t refreshTimeInSeconds, bool extendLifespan, __cache_freeres_fn_t fn) {
+SCacheObj *taosCacheInit(int32_t keyType, int64_t refreshTimeInSeconds, bool extendLifespan, __cache_freeres_fn_t fn, const char* cacheName) {
if (refreshTimeInSeconds <= 0) {
return NULL;
}
@@ -236,6 +237,7 @@ SCacheObj *taosCacheInitWithCb(int32_t keyType, int64_t refreshTimeInSeconds, bo
}
pCacheObj->pHashTable = taosHashInit(128, taosGetDefaultHashFunction(keyType), false);
+ pCacheObj->name = strdup(cacheName);
if (pCacheObj->pHashTable == NULL) {
free(pCacheObj);
uError("failed to allocate memory, reason:%s", strerror(errno));
@@ -265,10 +267,6 @@ SCacheObj *taosCacheInitWithCb(int32_t keyType, int64_t refreshTimeInSeconds, bo
return pCacheObj;
}
-SCacheObj *taosCacheInit(int32_t keyType, int64_t refreshTimeInSeconds, bool extendLifespan, __cache_freeres_fn_t fn) {
- return taosCacheInitWithCb(keyType, refreshTimeInSeconds, extendLifespan, fn);
-}
-
void *taosCachePut(SCacheObj *pCacheObj, const void *key, size_t keyLen, const void *pData, size_t dataSize, int duration) {
SCacheDataNode *pNode;
@@ -284,19 +282,21 @@ void *taosCachePut(SCacheObj *pCacheObj, const void *key, size_t keyLen, const v
pNode = taosAddToCacheImpl(pCacheObj, key, keyLen, pData, dataSize, duration * 1000L);
if (NULL != pNode) {
pCacheObj->totalSize += pNode->size;
-
- uDebug("key:%p, %p added into cache, added:%" PRIu64 ", expire:%" PRIu64 ", total:%" PRId64 ", size:%" PRId64 " bytes",
- key, pNode, pNode->addedTime, (pNode->lifespan * pNode->extendFactor + pNode->addedTime), pCacheObj->totalSize, dataSize);
+
+ uDebug("cache:%s, key:%p, %p added into cache, added:%" PRIu64 ", expire:%" PRIu64 ", totalNum:%d totalSize:%" PRId64
+ "bytes size:%" PRId64 "bytes",
+ pCacheObj->name, key, pNode->data, pNode->addedTime, (pNode->lifespan * pNode->extendFactor + pNode->addedTime),
+ (int32_t)taosHashGetSize(pCacheObj->pHashTable), pCacheObj->totalSize, dataSize);
} else {
- uError("key:%p, failed to added into cache, out of memory", key);
+ uError("cache:%s, key:%p, failed to added into cache, out of memory", pCacheObj->name, key);
}
} else { // old data exists, update the node
pNode = taosUpdateCacheImpl(pCacheObj, pOld, key, keyLen, pData, dataSize, duration * 1000L);
- uDebug("key:%p, %p exist in cache, updated", key, pNode);
+ uDebug("cache:%s, key:%p, %p exist in cache, updated", pCacheObj->name, key, pNode->data);
}
-
+
__cache_unlock(pCacheObj);
-
+
return (pNode != NULL) ? pNode->data : NULL;
}
@@ -327,17 +327,17 @@ void *taosCacheAcquireByKey(SCacheObj *pCacheObj, const void *key, size_t keyLen
if (ptNode != NULL) {
atomic_add_fetch_32(&pCacheObj->statistics.hitCount, 1);
- uDebug("key:%p, is retrieved from cache, %p refcnt:%d", key, (*ptNode), ref);
+ uDebug("cache:%s, key:%p, %p is retrieved from cache, refcnt:%d", pCacheObj->name, key, (*ptNode)->data, ref);
} else {
atomic_add_fetch_32(&pCacheObj->statistics.missCount, 1);
- uDebug("key:%p, not in cache, retrieved failed", key);
+ uDebug("cache:%s, key:%p, not in cache, retrieved failed", pCacheObj->name, key);
}
atomic_add_fetch_32(&pCacheObj->statistics.totalAccess, 1);
return (ptNode != NULL) ? (*ptNode)->data : NULL;
}
-void* taosCacheUpdateExpireTimeByName(SCacheObj *pCacheObj, const char *key, size_t keyLen, uint64_t expireTime) {
+void* taosCacheUpdateExpireTimeByName(SCacheObj *pCacheObj, void *key, size_t keyLen, uint64_t expireTime) {
if (pCacheObj == NULL || taosHashGetSize(pCacheObj->pHashTable) == 0) {
return NULL;
}
@@ -350,17 +350,18 @@ void* taosCacheUpdateExpireTimeByName(SCacheObj *pCacheObj, const char *key, siz
(*ptNode)->extendFactor += 1;
// (*ptNode)->lifespan = expireTime;
}
-
+
__cache_unlock(pCacheObj);
-
+
if (ptNode != NULL) {
atomic_add_fetch_32(&pCacheObj->statistics.hitCount, 1);
- uDebug("key:%p, expireTime is updated in cache, %p refcnt:%d", key, (*ptNode), T_REF_VAL_GET(*ptNode));
+ uDebug("cache:%s, key:%p, %p expireTime is updated in cache, refcnt:%d", pCacheObj->name, key,
+ (*ptNode)->data, T_REF_VAL_GET(*ptNode));
} else {
atomic_add_fetch_32(&pCacheObj->statistics.missCount, 1);
- uDebug("key:%p, not in cache, retrieved failed", key);
+ uDebug("cache:%s, key:%p, not in cache, retrieved failed", pCacheObj->name, key);
}
-
+
atomic_add_fetch_32(&pCacheObj->statistics.totalAccess, 1);
return (ptNode != NULL) ? (*ptNode)->data : NULL;
}
@@ -375,9 +376,9 @@ void *taosCacheAcquireByData(SCacheObj *pCacheObj, void *data) {
uError("key: %p the data from cache is invalid", ptNode);
return NULL;
}
-
+
int32_t ref = T_REF_INC(ptNode);
- uDebug("%p acquired by data in cache, refcnt:%d", ptNode, ref)
+ uDebug("cache:%s, data: %p acquired by data in cache, refcnt:%d", pCacheObj->name, ptNode->data, ref);
// if the remained life span is less then the (*ptNode)->lifeSpan, add up one lifespan
if (pCacheObj->extendLifespan) {
@@ -385,7 +386,8 @@ void *taosCacheAcquireByData(SCacheObj *pCacheObj, void *data) {
if ((now - ptNode->addedTime) < ptNode->lifespan * ptNode->extendFactor) {
ptNode->extendFactor += 1;
- uDebug("key:%p extend life time to %"PRId64, ptNode, ptNode->lifespan * ptNode->extendFactor + ptNode->addedTime);
+ uDebug("cache:%s, %p extend life time to %" PRId64, pCacheObj->name, ptNode->data,
+ ptNode->lifespan * ptNode->extendFactor + ptNode->addedTime);
}
}
@@ -424,14 +426,14 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) {
SCacheDataNode *pNode = (SCacheDataNode *)((char *)(*data) - offset);
if (pNode->signature != (uint64_t)pNode) {
- uError("key:%p, release invalid cache data", pNode);
+ uError("%p, release invalid cache data", pNode);
return;
}
-
+
*data = NULL;
int16_t ref = T_REF_DEC(pNode);
- uDebug("%p data released, refcnt:%d", pNode, ref);
-
+ uDebug("cache:%s, key:%p, %p is released, refcnt:%d", pCacheObj->name, pNode->key, pNode->data, ref);
+
if (_remove && (!pNode->inTrashCan)) {
__cache_wr_lock(pCacheObj);
@@ -448,7 +450,7 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) {
}
}
-void taosCacheEmpty(SCacheObj *pCacheObj) {
+void taosCacheEmpty(SCacheObj *pCacheObj, bool _remove) {
SHashMutableIterator *pIter = taosHashCreateIter(pCacheObj->pHashTable);
__cache_wr_lock(pCacheObj);
@@ -458,12 +460,16 @@ void taosCacheEmpty(SCacheObj *pCacheObj) {
}
SCacheDataNode *pNode = *(SCacheDataNode **)taosHashIterGet(pIter);
- taosCacheMoveToTrash(pCacheObj, pNode);
+ if (T_REF_VAL_GET(pNode) == 0 || _remove) {
+ taosCacheReleaseNode(pCacheObj, pNode);
+ } else {
+ taosCacheMoveToTrash(pCacheObj, pNode);
+ }
}
__cache_unlock(pCacheObj);
taosHashDestroyIter(pIter);
- taosTrashCanEmpty(pCacheObj, false);
+ taosTrashCanEmpty(pCacheObj, _remove);
}
void taosCacheCleanup(SCacheObj *pCacheObj) {
@@ -474,6 +480,7 @@ void taosCacheCleanup(SCacheObj *pCacheObj) {
pCacheObj->deleting = 1;
pthread_join(pCacheObj->refreshWorker, NULL);
+ uInfo("cache:%s will be cleaned up", pCacheObj->name);
doCleanupDataCache(pCacheObj);
}
@@ -522,7 +529,7 @@ void taosAddToTrash(SCacheObj *pCacheObj, SCacheDataNode *pNode) {
pNode->inTrashCan = true;
pCacheObj->numOfElemsInTrash++;
- uDebug("key:%p, %p move to trash, numOfElem in trash:%d", pNode->key, pNode, pCacheObj->numOfElemsInTrash);
+ uDebug("key:%p, %p move to trash, numOfElem in trash:%d", pNode->key, pNode->data, pCacheObj->numOfElemsInTrash);
}
void taosRemoveFromTrashCan(SCacheObj *pCacheObj, STrashElem *pElem) {
@@ -547,7 +554,6 @@ void taosRemoveFromTrashCan(SCacheObj *pCacheObj, STrashElem *pElem) {
pCacheObj->freeFp(pElem->pData->data);
}
- uError("-------------------free obj:%p", pElem->pData);
free(pElem->pData);
free(pElem);
}
@@ -574,7 +580,7 @@ void taosTrashCanEmpty(SCacheObj *pCacheObj, bool force) {
}
if (force || (T_REF_VAL_GET(pElem->pData) == 0)) {
- uDebug("key:%p, %p removed from trash. numOfElem in trash:%d", pElem->pData->key, pElem->pData,
+ uDebug("key:%p, %p removed from trash. numOfElem in trash:%d", pElem->pData->key, pElem->pData->data,
pCacheObj->numOfElemsInTrash - 1);
STrashElem *p = pElem;
@@ -594,21 +600,25 @@ void doCleanupDataCache(SCacheObj *pCacheObj) {
SHashMutableIterator *pIter = taosHashCreateIter(pCacheObj->pHashTable);
while (taosHashIterNext(pIter)) {
SCacheDataNode *pNode = *(SCacheDataNode **)taosHashIterGet(pIter);
- // if (pNode->expiredTime <= expiredTime && T_REF_VAL_GET(pNode) <= 0) {
- if (T_REF_VAL_GET(pNode) <= 0) {
+
+ int32_t c = T_REF_VAL_GET(pNode);
+ if (c <= 0) {
taosCacheReleaseNode(pCacheObj, pNode);
} else {
- uDebug("key:%p, will not remove from cache, refcnt:%d", pNode->key, T_REF_VAL_GET(pNode));
+ uDebug("cache:%s key:%p, %p will not remove from cache, refcnt:%d", pCacheObj->name, pNode->key,
+ pNode->data, T_REF_VAL_GET(pNode));
}
}
taosHashDestroyIter(pIter);
- taosHashCleanup(pCacheObj->pHashTable);
+ // todo memory leak if there are object with refcount greater than 0 in hash table?
+ taosHashCleanup(pCacheObj->pHashTable);
__cache_unlock(pCacheObj);
taosTrashCanEmpty(pCacheObj, true);
__cache_lock_destroy(pCacheObj);
-
+
+ tfree(pCacheObj->name);
memset(pCacheObj, 0, sizeof(SCacheObj));
free(pCacheObj);
}
diff --git a/src/util/src/tmem.c b/src/util/src/tmem.c
index ec5f90990b..9c512ad8dc 100644
--- a/src/util/src/tmem.c
+++ b/src/util/src/tmem.c
@@ -193,7 +193,7 @@ static void* realloc_detect_leak(void* ptr, size_t size, const char* file, uint3
return malloc_detect_leak(size, file, line);
}
- SMemBlock* blk = ((char*)ptr) - sizeof(SMemBlock);
+ SMemBlock* blk = (SMemBlock *)((char*)ptr) - sizeof(SMemBlock);
if (blk->magic != MEMBLK_MAGIC) {
if (fpAllocLog != NULL) {
fprintf(fpAllocLog, "%s:%d: memory is allocated by default allocator.\n", file, line);
diff --git a/src/util/src/ttime.c b/src/util/src/ttime.c
index 7fb9738ec7..6f67c4a136 100644
--- a/src/util/src/ttime.c
+++ b/src/util/src/ttime.c
@@ -374,3 +374,34 @@ int32_t getTimestampInUsFromStr(char* token, int32_t tokenlen, int64_t* ts) {
return getTimestampInUsFromStrImpl(timestamp, token[tokenlen - 1], ts);
}
+
+// internal function, when program is paused in debugger,
+// one can call this function from debugger to print a
+// timestamp as human readable string, for example (gdb):
+// p fmtts(1593769722)
+// outputs:
+// 2020-07-03 17:48:42
+// and the parameter can also be a variable.
+const char* fmtts(int64_t ts) {
+ static char buf[32];
+
+ time_t tt;
+ if (ts > -62135625943 && ts < 32503651200) {
+ tt = ts;
+ } else if (ts > -62135625943000 && ts < 32503651200000) {
+ tt = ts / 1000;
+ } else {
+ tt = ts / 1000000;
+ }
+
+ struct tm* ptm = localtime(&tt);
+ size_t pos = strftime(buf, sizeof(buf), "%Y-%m-%d %H:%M:%S", ptm);
+
+ if (ts <= -62135625943000 || ts >= 32503651200000) {
+ sprintf(buf + pos, ".%06d", (int)(ts % 1000000));
+ } else if (ts <= -62135625943 || ts >= 32503651200) {
+ sprintf(buf + pos, ".%03d", (int)(ts % 1000));
+ }
+
+ return buf;
+}
\ No newline at end of file
diff --git a/src/util/src/tutil.c b/src/util/src/tutil.c
index 9c5bffef95..1a74359f47 100644
--- a/src/util/src/tutil.c
+++ b/src/util/src/tutil.c
@@ -801,6 +801,11 @@ int tmkdir(const char *path, mode_t mode) {
}
void taosMvDir(char* destDir, char *srcDir) {
+ if (0 == tsEnableVnodeBak) {
+ uInfo("vnode backup not enabled");
+ return;
+ }
+
char shellCmd[1024+1] = {0};
//(void)snprintf(shellCmd, 1024, "cp -rf %s %s", srcDir, destDir);
diff --git a/src/util/tests/cacheTest.cpp b/src/util/tests/cacheTest.cpp
index 43ac689ff4..9100b7e7f6 100644
--- a/src/util/tests/cacheTest.cpp
+++ b/src/util/tests/cacheTest.cpp
@@ -19,7 +19,7 @@ int32_t tsMaxMeterConnections = 200;
// test cache
TEST(testCase, client_cache_test) {
const int32_t REFRESH_TIME_IN_SEC = 2;
- SCacheObj* tscCacheHandle = taosCacheInit(TSDB_DATA_TYPE_BINARY, REFRESH_TIME_IN_SEC, 0, NULL);
+ SCacheObj* tscCacheHandle = taosCacheInit(TSDB_DATA_TYPE_BINARY, REFRESH_TIME_IN_SEC, 0, NULL, "test");
const char* key1 = "test1";
char data1[] = "test11";
@@ -105,7 +105,7 @@ TEST(testCase, client_cache_test) {
TEST(testCase, cache_resize_test) {
const int32_t REFRESH_TIME_IN_SEC = 2;
- auto* pCache = taosCacheInit(TSDB_DATA_TYPE_BINARY, REFRESH_TIME_IN_SEC, false, NULL);
+ auto* pCache = taosCacheInit(TSDB_DATA_TYPE_BINARY, REFRESH_TIME_IN_SEC, false, NULL, "test");
char key[256] = {0};
char data[1024] = "abcdefghijk";
diff --git a/src/vnode/inc/vnodeInt.h b/src/vnode/inc/vnodeInt.h
index 76e53f3962..4f22c7784d 100644
--- a/src/vnode/inc/vnodeInt.h
+++ b/src/vnode/inc/vnodeInt.h
@@ -53,7 +53,7 @@ typedef struct {
STsdbCfg tsdbCfg;
SSyncCfg syncCfg;
SWalCfg walCfg;
- void *qHandlePool; // query handle pool
+ void *qMgmt;
char *rootDir;
char db[TSDB_DB_NAME_LEN];
} SVnodeObj;
diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c
index 192998c8a6..186c874e26 100644
--- a/src/vnode/src/vnodeMain.c
+++ b/src/vnode/src/vnodeMain.c
@@ -46,7 +46,6 @@ static uint32_t vnodeGetFileInfo(void *ahandle, char *name, uint32_t *index, uin
static int vnodeGetWalInfo(void *ahandle, char *name, uint32_t *index);
static void vnodeNotifyRole(void *ahandle, int8_t role);
static void vnodeNotifyFileSynced(void *ahandle, uint64_t fversion);
-static void vnodeFreeqHandle(void* phandle);
static pthread_once_t vnodeModuleInit = PTHREAD_ONCE_INIT;
@@ -69,6 +68,12 @@ static void vnodeInit() {
}
}
+void vnodeCleanupResources() {
+ taosHashCleanup(tsDnodeVnodesHash);
+ vnodeModuleInit = PTHREAD_ONCE_INIT;
+ tsDnodeVnodesHash = NULL;
+}
+
int32_t vnodeCreate(SMDCreateVnodeMsg *pVnodeCfg) {
int32_t code;
pthread_once(&vnodeModuleInit, vnodeInit);
@@ -283,9 +288,7 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) {
if (pVnode->role == TAOS_SYNC_ROLE_MASTER)
cqStart(pVnode->cq);
- const int32_t REFRESH_HANDLE_INTERVAL = 2; // every 2 seconds, rfresh handle pool
- pVnode->qHandlePool = taosCacheInit(TSDB_DATA_TYPE_BIGINT, REFRESH_HANDLE_INTERVAL, true, vnodeFreeqHandle);
-
+ pVnode->qMgmt = qOpenQueryMgmt(pVnode->vgId);
pVnode->events = NULL;
pVnode->status = TAOS_VN_STATUS_READY;
vDebug("vgId:%d, vnode is opened in %s, pVnode:%p", pVnode->vgId, rootDir, pVnode);
@@ -324,10 +327,13 @@ void vnodeRelease(void *pVnodeRaw) {
assert(refCount >= 0);
if (refCount > 0) {
- vTrace("vgId:%d, release vnode, refCount:%d", vgId, refCount);
+ vDebug("vgId:%d, release vnode, refCount:%d", vgId, refCount);
return;
}
+ qCleanupQueryMgmt(pVnode->qMgmt);
+ pVnode->qMgmt = NULL;
+
if (pVnode->tsdb)
tsdbCloseRepo(pVnode->tsdb, 1);
pVnode->tsdb = NULL;
@@ -362,12 +368,6 @@ void vnodeRelease(void *pVnodeRaw) {
int32_t count = atomic_sub_fetch_32(&tsOpennedVnodes, 1);
vDebug("vgId:%d, vnode is released, vnodes:%d", vgId, count);
-
- if (count <= 0) {
- taosHashCleanup(tsDnodeVnodesHash);
- vnodeModuleInit = PTHREAD_ONCE_INIT;
- tsDnodeVnodesHash = NULL;
- }
}
void *vnodeGetVnode(int32_t vgId) {
@@ -388,11 +388,20 @@ void *vnodeAccquireVnode(int32_t vgId) {
if (pVnode == NULL) return pVnode;
atomic_add_fetch_32(&pVnode->refCount, 1);
- vTrace("vgId:%d, get vnode, refCount:%d", pVnode->vgId, pVnode->refCount);
+ vDebug("vgId:%d, get vnode, refCount:%d", pVnode->vgId, pVnode->refCount);
return pVnode;
}
+void *vnodeAccquireRqueue(void *param) {
+ SVnodeObj *pVnode = param;
+ if (pVnode == NULL) return NULL;
+
+ atomic_add_fetch_32(&pVnode->refCount, 1);
+ vDebug("vgId:%d, get vnode rqueue, refCount:%d", pVnode->vgId, pVnode->refCount);
+ return ((SVnodeObj *)pVnode)->rqueue;
+}
+
void *vnodeGetRqueue(void *pVnode) {
return ((SVnodeObj *)pVnode)->rqueue;
}
@@ -424,6 +433,28 @@ static void vnodeBuildVloadMsg(SVnodeObj *pVnode, SDMStatusMsg *pStatus) {
pLoad->replica = pVnode->syncCfg.replica;
}
+int32_t vnodeGetVnodeList(int32_t vnodeList[], int32_t *numOfVnodes) {
+ if (tsDnodeVnodesHash == NULL) return TSDB_CODE_SUCCESS;
+
+ SHashMutableIterator *pIter = taosHashCreateIter(tsDnodeVnodesHash);
+ while (taosHashIterNext(pIter)) {
+ SVnodeObj **pVnode = taosHashIterGet(pIter);
+ if (pVnode == NULL) continue;
+ if (*pVnode == NULL) continue;
+
+ (*numOfVnodes)++;
+ if (*numOfVnodes >= TSDB_MAX_VNODES) {
+ vError("vgId:%d, too many open vnodes, exist:%d max:%d", (*pVnode)->vgId, *numOfVnodes, TSDB_MAX_VNODES);
+ continue;
+ } else {
+ vnodeList[*numOfVnodes - 1] = (*pVnode)->vgId;
+ }
+ }
+
+ taosHashDestroyIter(pIter);
+ return TSDB_CODE_SUCCESS;
+}
+
void vnodeBuildStatusMsg(void *param) {
SDMStatusMsg *pStatus = param;
SHashMutableIterator *pIter = taosHashCreateIter(tsDnodeVnodesHash);
@@ -466,6 +497,7 @@ static void vnodeCleanUp(SVnodeObj *pVnode) {
vTrace("vgId:%d, vnode will cleanup, refCount:%d", pVnode->vgId, pVnode->refCount);
// release local resources only after cutting off outside connections
+ qSetQueryMgmtClosed(pVnode->qMgmt);
vnodeRelease(pVnode);
}
@@ -871,12 +903,3 @@ PARSE_OVER:
if(fp) fclose(fp);
return terrno;
}
-
-void vnodeFreeqHandle(void *qHandle) {
- void** handle = qHandle;
- if (handle == NULL || *handle == NULL) {
- return;
- }
-
- qKillQuery(*handle);
-}
\ No newline at end of file
diff --git a/src/vnode/src/vnodeRead.c b/src/vnode/src/vnodeRead.c
index 6b157a0367..2ca69a3ddb 100644
--- a/src/vnode/src/vnodeRead.c
+++ b/src/vnode/src/vnodeRead.c
@@ -14,6 +14,7 @@
*/
#define _DEFAULT_SOURCE
+#include
#include "os.h"
#include "tglobal.h"
@@ -73,20 +74,23 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
killQueryMsg->free = htons(killQueryMsg->free);
killQueryMsg->qhandle = htobe64(killQueryMsg->qhandle);
- vWarn("QInfo:%p connection %p broken, kill query", (void*)killQueryMsg->qhandle, pReadMsg->rpcMsg.handle);
- assert(pReadMsg->rpcMsg.contLen > 0 && killQueryMsg->free == 1);
-
- // this message arrived here by means of the *query* message, so release the vnode is necessary
- void** qhandle = taosCacheAcquireByKey(pVnode->qHandlePool, (void*) &killQueryMsg->qhandle, sizeof(killQueryMsg->qhandle));
- if (qhandle == NULL || *qhandle == NULL) { // todo handle invalid qhandle error
-
- } else {
-// qKillQuery((qinfo_t) killQueryMsg->qhandle);
- taosCacheRelease(pVnode->qHandlePool, (void**) &qhandle, true);
+ void* handle = NULL;
+ if ((void**) killQueryMsg->qhandle != NULL) {
+ handle = *(void**) killQueryMsg->qhandle;
}
- vnodeRelease(pVnode);
- return TSDB_CODE_TSC_QUERY_CANCELLED; // todo change the error code
+ vWarn("QInfo:%p connection %p broken, kill query", handle, pReadMsg->rpcMsg.handle);
+ assert(pReadMsg->rpcMsg.contLen > 0 && killQueryMsg->free == 1);
+
+ void** qhandle = qAcquireQInfo(pVnode->qMgmt, (void**) killQueryMsg->qhandle);
+ if (qhandle == NULL || *qhandle == NULL) {
+ vWarn("QInfo:%p invalid qhandle, no matched query handle, conn:%p", (void*) killQueryMsg->qhandle, pReadMsg->rpcMsg.handle);
+ } else {
+ assert(qhandle == (void**) killQueryMsg->qhandle);
+ qReleaseQInfo(pVnode->qMgmt, (void**) &qhandle, true);
+ }
+
+ return TSDB_CODE_TSC_QUERY_CANCELLED;
}
int32_t code = TSDB_CODE_SUCCESS;
@@ -94,48 +98,58 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
void** handle = NULL;
if (contLen != 0) {
- code = qCreateQueryInfo(pVnode->tsdb, pVnode->vgId, pQueryTableMsg, pVnode, vnodeRelease, &pQInfo);
+ code = qCreateQueryInfo(pVnode->tsdb, pVnode->vgId, pQueryTableMsg, pVnode, NULL, &pQInfo);
SQueryTableRsp *pRsp = (SQueryTableRsp *) rpcMallocCont(sizeof(SQueryTableRsp));
- pRsp->qhandle = htobe64((uint64_t) (pQInfo));
- pRsp->code = code;
+ pRsp->code = code;
+ pRsp->qhandle = 0;
pRet->len = sizeof(SQueryTableRsp);
pRet->rsp = pRsp;
+ int32_t vgId = pVnode->vgId;
// current connect is broken
if (code == TSDB_CODE_SUCCESS) {
- if (vnodeNotifyCurrentQhandle(pReadMsg->rpcMsg.handle, pQInfo, pVnode->vgId) != TSDB_CODE_SUCCESS) {
- vError("vgId:%d, QInfo:%p, dnode query discarded since link is broken, %p", pVnode->vgId, pQInfo,
- pReadMsg->rpcMsg.handle);
- pRsp->code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
+ // add lock here
+ handle = qRegisterQInfo(pVnode->qMgmt, pQInfo);
+ if (handle == NULL) { // failed to register qhandle
+ pRsp->code = TSDB_CODE_QRY_INVALID_QHANDLE;
- // NOTE: there two refcount, needs to kill twice, todo refactor
- // query has not been put into qhandle pool, kill it directly.
qKillQuery(pQInfo);
qKillQuery(pQInfo);
-
- return pRsp->code;
+ } else {
+ assert(*handle == pQInfo);
+ pRsp->qhandle = htobe64((uint64_t) (handle));
}
- handle = taosCachePut(pVnode->qHandlePool, pQInfo, sizeof(pQInfo), &pQInfo, sizeof(pQInfo), tsShellActivityTimer * 2);
- assert(*handle == pQInfo);
+ if (handle != NULL && vnodeNotifyCurrentQhandle(pReadMsg->rpcMsg.handle, handle, pVnode->vgId) != TSDB_CODE_SUCCESS) {
+ vError("vgId:%d, QInfo:%p, query discarded since link is broken, %p", pVnode->vgId, pQInfo, pReadMsg->rpcMsg.handle);
+ pRsp->code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
+
+ // NOTE: there two refcount, needs to kill twice
+ // query has not been put into qhandle pool, kill it directly.
+ qKillQuery(pQInfo);
+ qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true);
+ return pRsp->code;
+ }
} else {
assert(pQInfo == NULL);
- vnodeRelease(pVnode);
}
- vDebug("vgId:%d, QInfo:%p, dnode query msg disposed", pVnode->vgId, pQInfo);
+ vDebug("vgId:%d, QInfo:%p, dnode query msg disposed", vgId, pQInfo);
} else {
assert(pCont != NULL);
- pQInfo = pCont;
+ pQInfo = *(void**)(pCont);
+ handle = pCont;
code = TSDB_CODE_VND_ACTION_IN_PROGRESS;
+
vDebug("vgId:%d, QInfo:%p, dnode query msg in progress", pVnode->vgId, pQInfo);
}
if (pQInfo != NULL) {
- qTableQuery(pQInfo, vnodeRelease, pVnode); // do execute query
- taosCacheRelease(pVnode->qHandlePool, (void**) &handle, false);
+ qTableQuery(pQInfo); // do execute query
+ assert(handle != NULL);
+ qReleaseQInfo(pVnode->qMgmt, (void**) &handle, false);
}
return code;
@@ -146,55 +160,57 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
SRspRet *pRet = &pReadMsg->rspRet;
SRetrieveTableMsg *pRetrieve = pCont;
- void *pQInfo = (void*) htobe64(pRetrieve->qhandle);
+ void **pQInfo = (void*) htobe64(pRetrieve->qhandle);
pRetrieve->free = htons(pRetrieve->free);
+ vDebug("vgId:%d, QInfo:%p, retrieve msg is disposed", pVnode->vgId, *pQInfo);
+
memset(pRet, 0, sizeof(SRspRet));
int32_t ret = 0;
- void** handle = taosCacheAcquireByKey(pVnode->qHandlePool, &pQInfo, sizeof(pQInfo));
- if (handle == NULL || *handle != pQInfo) {
+ void** handle = qAcquireQInfo(pVnode->qMgmt, pQInfo);
+ if (handle == NULL || handle != pQInfo) {
ret = TSDB_CODE_QRY_INVALID_QHANDLE;
}
if (pRetrieve->free == 1) {
- vDebug("vgId:%d, QInfo:%p, retrieve msg received to kill query and free qhandle", pVnode->vgId, pQInfo);
+ if (ret == TSDB_CODE_SUCCESS) {
+ vDebug("vgId:%d, QInfo:%p, retrieve msg received to kill query and free qhandle", pVnode->vgId, pQInfo);
+ qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true);
- taosCacheRelease(pVnode->qHandlePool, handle, true);
-// int32_t ret = qKillQuery(pQInfo);
-
- pRet->rsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp));
- pRet->len = sizeof(SRetrieveTableRsp);
-
- memset(pRet->rsp, 0, sizeof(SRetrieveTableRsp));
- SRetrieveTableRsp* pRsp = pRet->rsp;
- pRsp->numOfRows = 0;
- pRsp->completed = true;
- pRsp->useconds = 0;
+ pRet->rsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp));
+ pRet->len = sizeof(SRetrieveTableRsp);
+ memset(pRet->rsp, 0, sizeof(SRetrieveTableRsp));
+ SRetrieveTableRsp* pRsp = pRet->rsp;
+ pRsp->numOfRows = 0;
+ pRsp->completed = true;
+ pRsp->useconds = 0;
+ } else { // todo handle error
+ qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true);
+ }
return ret;
}
- vDebug("vgId:%d, QInfo:%p, retrieve msg is received", pVnode->vgId, pQInfo);
-
- int32_t code = qRetrieveQueryResultInfo(pQInfo);
- if (code != TSDB_CODE_SUCCESS) {
+ int32_t code = qRetrieveQueryResultInfo(*pQInfo);
+ if (code != TSDB_CODE_SUCCESS || ret != TSDB_CODE_SUCCESS) {
//TODO
pRet->rsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp));
memset(pRet->rsp, 0, sizeof(SRetrieveTableRsp));
+
} else {
// todo check code and handle error in build result set
- code = qDumpRetrieveResult(pQInfo, (SRetrieveTableRsp **)&pRet->rsp, &pRet->len);
+ code = qDumpRetrieveResult(*pQInfo, (SRetrieveTableRsp **)&pRet->rsp, &pRet->len);
- if (qHasMoreResultsToRetrieve(pQInfo)) {
- pRet->qhandle = pQInfo;
- code = TSDB_CODE_VND_ACTION_NEED_REPROCESSED;
+ if (qHasMoreResultsToRetrieve(*handle)) {
+ dnodePutQhandleIntoReadQueue(pVnode, handle);
+ pRet->qhandle = handle;
+ code = TSDB_CODE_SUCCESS;
} else { // no further execution invoked, release the ref to vnode
- taosCacheRelease(pVnode->qHandlePool, (void**) &handle, true);
-// qDestroyQueryInfo(pQInfo);
+ qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true);
}
}
-
+
return code;
}
diff --git a/tests/pytest/import_merge/importDataLastSub.py b/tests/pytest/import_merge/importDataLastSub.py
index 5c2069c90f..bfcad2d252 100644
--- a/tests/pytest/import_merge/importDataLastSub.py
+++ b/tests/pytest/import_merge/importDataLastSub.py
@@ -32,6 +32,7 @@ class TDTestCase:
tdDnodes.stop(1)
tdDnodes.deploy(1)
tdDnodes.start(1)
+ tdLog.sleep(5)
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
@@ -61,6 +62,7 @@ class TDTestCase:
tdDnodes.stop(1)
tdLog.sleep(5)
tdDnodes.start(1)
+ tdLog.sleep(5)
tdLog.info("================= step5")
tdLog.info("import 10 data totally repetitive")
diff --git a/tests/pytest/query/queryMetaData.py b/tests/pytest/query/queryMetaData.py
index 8fb9d9bf3a..7b95e4a81c 100755
--- a/tests/pytest/query/queryMetaData.py
+++ b/tests/pytest/query/queryMetaData.py
@@ -22,7 +22,7 @@ class MetadataQuery:
def initConnection(self):
self.tables = 100000
self.records = 10
- self.numOfTherads = 10
+ self.numOfTherads = 20
self.ts = 1537146000000
self.host = "127.0.0.1"
self.user = "root"
@@ -55,10 +55,10 @@ class MetadataQuery:
def createTablesAndInsertData(self, threadID):
cursor = self.connectDB()
- cursor.execute("use test")
- base = threadID * self.tables
+ cursor.execute("use test")
tablesPerThread = int (self.tables / self.numOfTherads)
+ base = threadID * tablesPerThread
for i in range(tablesPerThread):
cursor.execute(
'''create table t%d using meters tags(
@@ -75,12 +75,11 @@ class MetadataQuery:
(base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100,
(base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100,
(base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100))
- for j in range(self.records):
- cursor.execute(
- "insert into t%d values(%d, %d)" %
- (base + i + 1, self.ts + j, j))
- cursor.close()
- self.conn.close()
+
+ cursor.execute(
+ "insert into t%d values(%d, 1) (%d, 2) (%d, 3) (%d, 4) (%d, 5)" %
+ (base + i + 1, self.ts + 1, self.ts + 2, self.ts + 3, self.ts + 4, self.ts + 5))
+ cursor.close()
def queryData(self, query):
cursor = self.connectDB()
@@ -108,12 +107,17 @@ if __name__ == '__main__':
print(
"================= Create %d tables and insert %d records into each table =================" %
(t.tables, t.records))
- startTime = datetime.now()
+ startTime = datetime.now()
+ threads = []
for i in range(t.numOfTherads):
thread = threading.Thread(
target=t.createTablesAndInsertData, args=(i,))
thread.start()
- thread.join()
+ threads.append(thread)
+
+ for th in threads:
+ th.join()
+
endTime = datetime.now()
diff = (endTime - startTime).seconds
print(
diff --git a/tests/pytest/query/queryMetaPerformace.py b/tests/pytest/query/queryMetaPerformace.py
new file mode 100644
index 0000000000..0570311b08
--- /dev/null
+++ b/tests/pytest/query/queryMetaPerformace.py
@@ -0,0 +1,149 @@
+
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+import threading
+import time
+from datetime import datetime
+import numpy as np
+
+class MyThread(threading.Thread):
+
+ def __init__(self, func, args=()):
+ super(MyThread, self).__init__()
+ self.func = func
+ self.args = args
+
+ def run(self):
+ self.result = self.func(*self.args)
+
+ def get_result(self):
+ try:
+ return self.result # 如果子线程不使用join方法,此处可能会报没有self.result的错误
+ except Exception:
+ return None
+
+class MetadataQuery:
+ def initConnection(self):
+ self.tables = 100
+ self.records = 10
+ self.numOfTherads =5
+ self.ts = 1537146000000
+ self.host = "127.0.0.1"
+ self.user = "root"
+ self.password = "taosdata"
+ self.config = "/etc/taos"
+ self.conn = taos.connect( self.host, self.user, self.password, self.config)
+ def connectDB(self):
+ return self.conn.cursor()
+
+ def createStable(self):
+ print("================= Create stable meters =================")
+ cursor = self.connectDB()
+ cursor.execute("drop database if exists test")
+ cursor.execute("create database test")
+ cursor.execute("use test")
+ cursor.execute('''create table if not exists meters (ts timestamp, speed int) tags(
+ tgcol1 tinyint, tgcol2 smallint, tgcol3 int, tgcol4 bigint, tgcol5 float, tgcol6 double, tgcol7 bool, tgcol8 binary(20), tgcol9 nchar(20),
+ tgcol10 tinyint, tgcol11 smallint, tgcol12 int, tgcol13 bigint, tgcol14 float, tgcol15 double, tgcol16 bool, tgcol17 binary(20), tgcol18 nchar(20),
+ tgcol19 tinyint, tgcol20 smallint, tgcol21 int, tgcol22 bigint, tgcol23 float, tgcol24 double, tgcol25 bool, tgcol26 binary(20), tgcol27 nchar(20),
+ tgcol28 tinyint, tgcol29 smallint, tgcol30 int, tgcol31 bigint, tgcol32 float, tgcol33 double, tgcol34 bool, tgcol35 binary(20), tgcol36 nchar(20),
+ tgcol37 tinyint, tgcol38 smallint, tgcol39 int, tgcol40 bigint, tgcol41 float, tgcol42 double, tgcol43 bool, tgcol44 binary(20), tgcol45 nchar(20),
+ tgcol46 tinyint, tgcol47 smallint, tgcol48 int, tgcol49 bigint, tgcol50 float, tgcol51 double, tgcol52 bool, tgcol53 binary(20), tgcol54 nchar(20))''')
+ cursor.close()
+
+ def createTablesAndInsertData(self, threadID):
+ cursor = self.connectDB()
+ cursor.execute("use test")
+ base = threadID * self.tables
+
+ tablesPerThread = int (self.tables / self.numOfTherads)
+ for i in range(tablesPerThread):
+ cursor.execute(
+ '''create table t%d using meters tags(
+ %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d',
+ %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d',
+ %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d',
+ %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d',
+ %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d',
+ %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')''' %
+ (base + i + 1,
+ (base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100,
+ (base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100,
+ (base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100,
+ (base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100,
+ (base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100,
+ (base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100))
+ for j in range(self.records):
+ cursor.execute(
+ "insert into t%d values(%d, %d)" %
+ (base + i + 1, self.ts + j, j))
+ cursor.close()
+ def queryWithTagId(self, threadId, tagId, queryNum):
+ print("---------thread%d start-----------"%threadId)
+ query = '''select tgcol1, tgcol2, tgcol3, tgcol4, tgcol5, tgcol6, tgcol7, tgcol8, tgcol9,
+ tgcol10, tgcol11, tgcol12, tgcol13, tgcol14, tgcol15, tgcol16, tgcol17, tgcol18,
+ tgcol19, tgcol20, tgcol21, tgcol22, tgcol23, tgcol24, tgcol25, tgcol26, tgcol27,
+ tgcol28, tgcol29, tgcol30, tgcol31, tgcol32, tgcol33, tgcol34, tgcol35, tgcol36,
+ tgcol37, tgcol38, tgcol39, tgcol40, tgcol41, tgcol42, tgcol43, tgcol44, tgcol45,
+ tgcol46, tgcol47, tgcol48, tgcol49, tgcol50, tgcol51, tgcol52, tgcol53, tgcol54
+ from meters where tgcol{id} > {condition}'''
+ latancy = []
+ cursor = self.connectDB()
+ cursor.execute("use test")
+ for i in range(queryNum):
+ startTime = time.time()
+ cursor.execute(query.format(id = tagId, condition = i))
+ cursor.fetchall()
+ latancy.append((time.time() - startTime))
+ print("---------thread%d end-----------"%threadId)
+ return latancy
+ def queryData(self, query):
+ cursor = self.connectDB()
+ cursor.execute("use test")
+
+ print("================= query tag data =================")
+ startTime = datetime.now()
+ cursor.execute(query)
+ cursor.fetchall()
+ endTime = datetime.now()
+ print(
+ "Query time for the above query is %d seconds" %
+ (endTime - startTime).seconds)
+
+ cursor.close()
+ #self.conn.close()
+
+
+if __name__ == '__main__':
+
+ t = MetadataQuery()
+ t.initConnection()
+
+ latancys = []
+ threads = []
+ tagId = 1
+ queryNum = 1000
+ for i in range(t.numOfTherads):
+ thread = MyThread(t.queryWithTagId, args = (i, tagId, queryNum))
+ threads.append(thread)
+ thread.start()
+ for i in range(t.numOfTherads):
+ threads[i].join()
+ latancys.extend(threads[i].get_result())
+ print("Total query: %d"%(queryNum * t.numOfTherads))
+ print("statistic(s): mean= %f, P50 = %f, P75 = %f, P95 = %f, P99 = %f"
+ %(sum(latancys)/(queryNum * t.numOfTherads), np.percentile(latancys, 50), np.percentile(latancys, 75), np.percentile(latancys, 95), np.percentile(latancys, 99)))
+
diff --git a/tests/pytest/regressiontest.sh b/tests/pytest/regressiontest.sh
index 8ce1fd1294..eada5f67f7 100755
--- a/tests/pytest/regressiontest.sh
+++ b/tests/pytest/regressiontest.sh
@@ -137,6 +137,7 @@ python3 ./test.py -f query/filterFloatAndDouble.py
python3 ./test.py -f query/filterOtherTypes.py
python3 ./test.py -f query/queryError.py
python3 ./test.py -f query/querySort.py
+python3 ./test.py -f query/queryJoin.py
#stream
python3 ./test.py -f stream/stream1.py
diff --git a/tests/pytest/test.py b/tests/pytest/test.py
index 5b35563e1b..a9da8e5671 100644
--- a/tests/pytest/test.py
+++ b/tests/pytest/test.py
@@ -81,7 +81,7 @@ if __name__ == "__main__":
else:
toBeKilled = "valgrind.bin"
- killCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -HUP " % toBeKilled
+ killCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -HUP > /dev/null 2>&1" % toBeKilled
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
processID = subprocess.check_output(psCmd, shell=True)
@@ -91,8 +91,17 @@ if __name__ == "__main__":
time.sleep(1)
processID = subprocess.check_output(psCmd, shell=True)
- fuserCmd = "fuser -k -n tcp 6030"
- os.system(fuserCmd)
+ for port in range(6030, 6041):
+ usePortPID = "lsof -i tcp:%d | grep LISTEn | awk '{print $2}'" % port
+ processID = subprocess.check_output(usePortPID, shell=True)
+
+ if processID:
+ killCmd = "kill -9 %s" % processID
+ os.system(killCmd)
+ fuserCmd = "fuser -k -n tcp %d" % port
+ os.system(fuserCmd)
+ if valgrind:
+ time.sleep(2)
tdLog.info('stop All dnodes')
sys.exit(0)
diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py
index e24af473f3..370af1ba13 100644
--- a/tests/pytest/util/dnodes.py
+++ b/tests/pytest/util/dnodes.py
@@ -251,11 +251,16 @@ class TDDnode:
psCmd, shell=True).decode("utf-8")
while(processID):
- killCmd = "kill -INT %s" % processID
+ killCmd = "kill -INT %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
+ for port in range(6030, 6041):
+ fuserCmd = "fuser -k -n tcp %d" % port
+ os.system(fuserCmd)
+ if self.valgrind:
+ time.sleep(2)
self.running = 0
tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index))
@@ -272,11 +277,16 @@ class TDDnode:
psCmd, shell=True).decode("utf-8")
while(processID):
- killCmd = "kill -KILL %s" % processID
+ killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
+ for port in range(6030, 6041):
+ fuserCmd = "fuser -k -n tcp %d" % port
+ os.system(fuserCmd)
+ if self.valgrind:
+ time.sleep(2)
self.running = 0
tdLog.debug("dnode:%d is stopped by kill -KILL" % (self.index))
@@ -325,7 +335,7 @@ class TDDnodes:
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
while(processID):
- killCmd = "kill -KILL %s" % processID
+ killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
@@ -334,7 +344,7 @@ class TDDnodes:
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
while(processID):
- killCmd = "kill -KILL %s" % processID
+ killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
@@ -440,7 +450,7 @@ class TDDnodes:
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
while(processID):
- killCmd = "kill -KILL %s" % processID
+ killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
@@ -449,7 +459,7 @@ class TDDnodes:
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
while(processID):
- killCmd = "kill -KILL %s" % processID
+ killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt
index e2ebd9af63..3c4733a25b 100644
--- a/tests/script/jenkins/basic.txt
+++ b/tests/script/jenkins/basic.txt
@@ -365,3 +365,7 @@ cd ../../../debug; make
./test.sh -f unique/arbitrator/sync_replica3_dropDb.sim
./test.sh -f unique/arbitrator/sync_replica3_dropTable.sim
+./test.sh -f unique/migrate/mn2_vn2_repl2_rmMnodeDir.sim
+./test.sh -f unique/migrate/mn2_vn2_repl2_rmMnodeVnodeDir.sim
+./test.sh -f unique/migrate/mn2_vn2_repl2_rmMnodeVnodeDir_stopAll_starAll.sim
+./test.sh -f unique/migrate/mn2_vn2_repl2_rmVnodeDir.sim
diff --git a/tests/script/jenkins/unique.txt b/tests/script/jenkins/unique.txt
index afd0ea55c0..06edb8890a 100644
--- a/tests/script/jenkins/unique.txt
+++ b/tests/script/jenkins/unique.txt
@@ -133,3 +133,7 @@ cd ../../../debug; make
./test.sh -f unique/arbitrator/sync_replica3_dropDb.sim
./test.sh -f unique/arbitrator/sync_replica3_dropTable.sim
+./test.sh -f unique/migrate/mn2_vn2_repl2_rmMnodeDir.sim
+./test.sh -f unique/migrate/mn2_vn2_repl2_rmMnodeVnodeDir.sim
+./test.sh -f unique/migrate/mn2_vn2_repl2_rmMnodeVnodeDir_stopAll_starAll.sim
+./test.sh -f unique/migrate/mn2_vn2_repl2_rmVnodeDir.sim
diff --git a/tests/script/sh/deploy.sh b/tests/script/sh/deploy.sh
index 98b402f4ba..eb0a9b526d 100755
--- a/tests/script/sh/deploy.sh
+++ b/tests/script/sh/deploy.sh
@@ -114,10 +114,11 @@ echo "mDebugFlag 135" >> $TAOS_CFG
echo "sdbDebugFlag 135" >> $TAOS_CFG
echo "dDebugFlag 135" >> $TAOS_CFG
echo "vDebugFlag 135" >> $TAOS_CFG
+echo "tsdbDebugFlag 135" >> $TAOS_CFG
echo "cDebugFlag 135" >> $TAOS_CFG
echo "jnidebugFlag 135" >> $TAOS_CFG
echo "odbcdebugFlag 135" >> $TAOS_CFG
-echo "httpDebugFlag 143" >> $TAOS_CFG
+echo "httpDebugFlag 135" >> $TAOS_CFG
echo "monitorDebugFlag 131" >> $TAOS_CFG
echo "mqttDebugFlag 131" >> $TAOS_CFG
echo "qdebugFlag 135" >> $TAOS_CFG
@@ -132,7 +133,7 @@ echo "monitorInterval 1" >> $TAOS_CFG
echo "http 0" >> $TAOS_CFG
echo "numOfThreadsPerCore 2.0" >> $TAOS_CFG
echo "defaultPass taosdata" >> $TAOS_CFG
-echo "numOfLogLines 100000000" >> $TAOS_CFG
+echo "numOfLogLines 10000000" >> $TAOS_CFG
echo "mnodeEqualVnodeNum 0" >> $TAOS_CFG
echo "clog 2" >> $TAOS_CFG
echo "statusInterval 1" >> $TAOS_CFG
diff --git a/tests/script/unique/migrate/mn2_vn2_repl2_rmMnodeDir.sim b/tests/script/unique/migrate/mn2_vn2_repl2_rmMnodeDir.sim
new file mode 100644
index 0000000000..e0b5e9b931
--- /dev/null
+++ b/tests/script/unique/migrate/mn2_vn2_repl2_rmMnodeDir.sim
@@ -0,0 +1,272 @@
+# Test case describe: dnode1/dnode2 include mnode and vnode roles
+# step 1: start dnode1/dnode2, and added into cluster
+# step 2: create db(repl = 2), table, insert data,
+# step 4: stop dnode1, remove its mnode dir, and copy mnode dir of dnode2 to dnode1
+# step 5: restart dnode1, waiting sync end
+# step 6: stop dnode2, reset query cache, and query
+
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+#system sh/deploy.sh -n dnode3 -i 3
+#system sh/deploy.sh -n dnode4 -i 4
+
+system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
+system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
+#system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
+#system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1
+
+system sh/cfg.sh -n dnode1 -c walLevel -v 2
+system sh/cfg.sh -n dnode2 -c walLevel -v 2
+#system sh/cfg.sh -n dnode3 -c walLevel -v 2
+#system sh/cfg.sh -n dnode4 -c walLevel -v 2
+
+system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
+#system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
+#system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
+
+system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
+#system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
+#system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
+
+system sh/cfg.sh -n dnode1 -c alternativeRole -v 0
+system sh/cfg.sh -n dnode2 -c alternativeRole -v 0
+#system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
+#system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
+
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
+#system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
+#system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
+#system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 4
+
+system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
+#system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
+
+print ============== step0: start tarbitrator
+system sh/exec_tarbitrator.sh -s start
+
+print ============== step1: start dnode1/dnode2 and add into cluster
+system sh/exec.sh -n dnode1 -s start
+system sh/exec.sh -n dnode2 -s start
+sleep 1000
+sql connect
+sleep 1000
+sql create dnode $hostname2
+sleep 1000
+
+print ============== step2: create database with replica 2, and create table, insert data
+$totalTableNum = 10
+$sleepTimer = 3000
+
+$db = db
+sql create database $db replica 2 cache 1
+sql use $db
+
+# create table , insert data
+$stb = stb
+sql create table $stb (ts timestamp, c1 double) tags(t1 int)
+$rowNum = 1200
+$tblNum = $totalTableNum
+$totalRows = 0
+$tsStart = 1577808000000 # 2020-01-01 00:00:00.000
+
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql create table $tb using $stb tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
+ $x = $x + 60
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+
+sql select count(*) from $stb
+print rows:$rows data00:$data00 totalRows:$totalRows
+if $rows != 1 then
+ return -1
+endi
+
+if $data00 != $totalRows then
+ return -1
+endi
+
+
+print ============== step3: insert old data(now-15d) and new data(now+15d), control data rows in order to save in cache, not falling disc
+sql insert into $tb values ( now - 20d , -20 )
+sql insert into $tb values ( now - 40d , -40 )
+$totalRows = $totalRows + 2
+
+print ============== step4: stop dnode1
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
+
+$loopCnt = 0
+wait_dnode1_offline:
+$loopCnt = $loopCnt + 1
+if $loopCnt == 10 then
+ return -1
+endi
+
+sql show dnodes
+if $rows != 2 then
+ sleep 2000
+ goto wait_dnode1_offline
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+
+if $dnode1Status != offline then
+ sleep 2000
+ goto wait_dnode1_offline
+endi
+if $dnode2Status != ready then
+ sleep 2000
+ goto wait_dnode1_offline
+endi
+
+# check using select
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+#sql show vgroups
+#print show vgroups:
+#print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
+#print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
+#print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
+
+print ============== step5: remove the mnode dir of dnode1, then copy the monde dir of dnode2
+system_content rm -rf ../../../sim/dnode1/data/mnode
+system_content cp -rf ../../../sim/dnode2/data/mnode ../../../sim/dnode1/data/
+
+print ============== step6: restart dnode1, waiting sync end
+system sh/exec.sh -n dnode1 -s start
+sleep 1000
+
+$loopCnt = 0
+wait_dnode1_ready:
+$loopCnt = $loopCnt + 1
+if $loopCnt == 20 then
+ return -1
+endi
+
+sql show dnodes -x wait_dnode1_ready
+if $rows != 2 then
+ sleep 2000
+ goto wait_dnode1_ready
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+
+if $dnode1Status != ready then
+ sleep 2000
+ goto wait_dnode1_ready
+endi
+if $dnode2Status != ready then
+ sleep 2000
+ goto wait_dnode1_ready
+endi
+
+$loopCnt = 0
+wait_dnode1_vgroup_slave:
+$loopCnt = $loopCnt + 1
+if $loopCnt == 10 then
+ return -1
+endi
+
+sql show vgroups
+if $rows != 3 then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+print show vgroups:
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
+print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 $data5_4 $data6_4 $data7_4 $data8_4 $data9_4
+$d2v2status = $data4_4
+$d2v3status = $data4_2
+$d2v4status = $data4_3
+
+$d1v2status = $data7_4
+$d1v3status = $data7_2
+$d1v4status = $data7_3
+
+if $d2v2status != master then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+if $d2v3status != master then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+if $d2v4status != master then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+
+if $d1v2status != slave then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+if $d1v3status != slave then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+if $d1v4status != slave then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+
+print ============== step7: stop dnode2
+system sh/exec.sh -n dnode2 -s stop -x SIGINT
+
+$loopCnt = 0
+wait_dnode2_offline:
+$loopCnt = $loopCnt + 1
+if $loopCnt == 10 then
+ return -1
+endi
+
+sql show dnodes
+if $rows != 2 then
+ sleep 2000
+ goto wait_dnode2_offline
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+
+if $dnode1Status != ready then
+ sleep 2000
+ goto wait_dnode2_offline
+endi
+if $dnode2Status != offline then
+ sleep 2000
+ goto wait_dnode2_offline
+endi
+
+sql reset query cache
+
+# check using select
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
\ No newline at end of file
diff --git a/tests/script/unique/migrate/mn2_vn2_repl2_rmMnodeVnodeDir.sim b/tests/script/unique/migrate/mn2_vn2_repl2_rmMnodeVnodeDir.sim
new file mode 100644
index 0000000000..ae7fc6af17
--- /dev/null
+++ b/tests/script/unique/migrate/mn2_vn2_repl2_rmMnodeVnodeDir.sim
@@ -0,0 +1,274 @@
+# Test case describe: dnode1/dnode2 include mnode and vnode roles
+# step 1: start dnode1/dnode2, and added into cluster
+# step 2: create db(repl = 2), table, insert data,
+# step 4: stop dnode1, remove its mnode and vnode dir, and copy mnode and vnode dir of dnode2 to dnode1
+# step 5: restart dnode1, waiting sync end
+# step 6: stop dnode2, reset query cache, and query
+
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+#system sh/deploy.sh -n dnode3 -i 3
+#system sh/deploy.sh -n dnode4 -i 4
+
+system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
+system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
+#system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
+#system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1
+
+system sh/cfg.sh -n dnode1 -c walLevel -v 2
+system sh/cfg.sh -n dnode2 -c walLevel -v 2
+#system sh/cfg.sh -n dnode3 -c walLevel -v 2
+#system sh/cfg.sh -n dnode4 -c walLevel -v 2
+
+system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
+#system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
+#system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
+
+system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
+#system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
+#system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
+
+system sh/cfg.sh -n dnode1 -c alternativeRole -v 0
+system sh/cfg.sh -n dnode2 -c alternativeRole -v 0
+#system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
+#system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
+
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
+#system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
+#system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
+#system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 4
+
+system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
+#system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
+
+print ============== step0: start tarbitrator
+system sh/exec_tarbitrator.sh -s start
+
+print ============== step1: start dnode1/dnode2 and add into cluster
+system sh/exec.sh -n dnode1 -s start
+system sh/exec.sh -n dnode2 -s start
+sleep 1000
+sql connect
+sleep 1000
+sql create dnode $hostname2
+sleep 1000
+
+print ============== step2: create database with replica 2, and create table, insert data
+$totalTableNum = 10
+$sleepTimer = 3000
+
+$db = db
+sql create database $db replica 2 cache 1
+sql use $db
+
+# create table , insert data
+$stb = stb
+sql create table $stb (ts timestamp, c1 double) tags(t1 int)
+$rowNum = 1200
+$tblNum = $totalTableNum
+$totalRows = 0
+$tsStart = 1577808000000 # 2020-01-01 00:00:00.000
+
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql create table $tb using $stb tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
+ $x = $x + 60
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+
+sql select count(*) from $stb
+print rows:$rows data00:$data00 totalRows:$totalRows
+if $rows != 1 then
+ return -1
+endi
+
+if $data00 != $totalRows then
+ return -1
+endi
+
+
+print ============== step3: insert old data(now-15d) and new data(now+15d), control data rows in order to save in cache, not falling disc
+sql insert into $tb values ( now - 20d , -20 )
+sql insert into $tb values ( now - 40d , -40 )
+$totalRows = $totalRows + 2
+
+print ============== step4: stop dnode1
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
+
+$loopCnt = 0
+wait_dnode1_offline:
+$loopCnt = $loopCnt + 1
+if $loopCnt == 10 then
+ return -1
+endi
+
+sql show dnodes
+if $rows != 2 then
+ sleep 2000
+ goto wait_dnode1_offline
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+
+if $dnode1Status != offline then
+ sleep 2000
+ goto wait_dnode1_offline
+endi
+if $dnode2Status != ready then
+ sleep 2000
+ goto wait_dnode1_offline
+endi
+
+# check using select
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+#sql show vgroups
+#print show vgroups:
+#print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
+#print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
+#print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
+
+print ============== step5: remove the mnode dir of dnode1, then copy the monde dir of dnode2
+system_content rm -rf ../../../sim/dnode1/data/vnode
+system_content rm -rf ../../../sim/dnode1/data/mnode
+system_content cp -rf ../../../sim/dnode2/data/vnode ../../../sim/dnode1/data/
+system_content cp -rf ../../../sim/dnode2/data/mnode ../../../sim/dnode1/data/
+
+print ============== step6: restart dnode1, waiting sync end
+system sh/exec.sh -n dnode1 -s start
+sleep 1000
+
+$loopCnt = 0
+wait_dnode1_ready:
+$loopCnt = $loopCnt + 1
+if $loopCnt == 20 then
+ return -1
+endi
+
+sql show dnodes -x wait_dnode1_ready
+if $rows != 2 then
+ sleep 2000
+ goto wait_dnode1_ready
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+
+if $dnode1Status != ready then
+ sleep 2000
+ goto wait_dnode1_ready
+endi
+if $dnode2Status != ready then
+ sleep 2000
+ goto wait_dnode1_ready
+endi
+
+$loopCnt = 0
+wait_dnode1_vgroup_slave:
+$loopCnt = $loopCnt + 1
+if $loopCnt == 10 then
+ return -1
+endi
+
+sql show vgroups
+if $rows != 3 then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+print show vgroups:
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
+print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 $data5_4 $data6_4 $data7_4 $data8_4 $data9_4
+$d2v2status = $data4_4
+$d2v3status = $data4_2
+$d2v4status = $data4_3
+
+$d1v2status = $data7_4
+$d1v3status = $data7_2
+$d1v4status = $data7_3
+
+if $d2v2status != master then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+if $d2v3status != master then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+if $d2v4status != master then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+
+if $d1v2status != slave then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+if $d1v3status != slave then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+if $d1v4status != slave then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+
+print ============== step7: stop dnode2
+system sh/exec.sh -n dnode2 -s stop -x SIGINT
+
+$loopCnt = 0
+wait_dnode2_offline:
+$loopCnt = $loopCnt + 1
+if $loopCnt == 10 then
+ return -1
+endi
+
+sql show dnodes
+if $rows != 2 then
+ sleep 2000
+ goto wait_dnode2_offline
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+
+if $dnode1Status != ready then
+ sleep 2000
+ goto wait_dnode2_offline
+endi
+if $dnode2Status != offline then
+ sleep 2000
+ goto wait_dnode2_offline
+endi
+
+sql reset query cache
+
+# check using select
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
\ No newline at end of file
diff --git a/tests/script/unique/migrate/mn2_vn2_repl2_rmMnodeVnodeDir_stopAll_starAll.sim b/tests/script/unique/migrate/mn2_vn2_repl2_rmMnodeVnodeDir_stopAll_starAll.sim
new file mode 100644
index 0000000000..dc9bc62696
--- /dev/null
+++ b/tests/script/unique/migrate/mn2_vn2_repl2_rmMnodeVnodeDir_stopAll_starAll.sim
@@ -0,0 +1,210 @@
+# Test case describe: dnode1/dnode2 include mnode and vnode roles
+# step 1: start dnode1/dnode2, and added into cluster
+# step 2: create db(repl = 2), table, insert data,
+# step 4: stop dnode1, remove its mnode and vnode dir, and copy mnode and vnode dir of dnode2 to dnode1
+# step 5: restart dnode1, waiting sync end
+# step 6: stop dnode2, reset query cache, and query
+
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+#system sh/deploy.sh -n dnode3 -i 3
+#system sh/deploy.sh -n dnode4 -i 4
+
+system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
+system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
+#system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
+#system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1
+
+system sh/cfg.sh -n dnode1 -c walLevel -v 2
+system sh/cfg.sh -n dnode2 -c walLevel -v 2
+#system sh/cfg.sh -n dnode3 -c walLevel -v 2
+#system sh/cfg.sh -n dnode4 -c walLevel -v 2
+
+system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
+#system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
+#system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
+
+system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
+#system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
+#system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
+
+system sh/cfg.sh -n dnode1 -c alternativeRole -v 0
+system sh/cfg.sh -n dnode2 -c alternativeRole -v 0
+#system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
+#system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
+
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
+#system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
+#system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
+#system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 4
+
+system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
+#system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
+
+print ============== step0: start tarbitrator
+system sh/exec_tarbitrator.sh -s start
+
+print ============== step1: start dnode1/dnode2 and add into cluster
+system sh/exec.sh -n dnode1 -s start
+system sh/exec.sh -n dnode2 -s start
+sleep 1000
+sql connect
+sleep 1000
+sql create dnode $hostname2
+sleep 1000
+
+print ============== step2: create database with replica 2, and create table, insert data
+$totalTableNum = 10
+$sleepTimer = 3000
+
+$db = db
+sql create database $db replica 2 cache 1
+sql use $db
+
+# create table , insert data
+$stb = stb
+sql create table $stb (ts timestamp, c1 double) tags(t1 int)
+$rowNum = 1200
+$tblNum = $totalTableNum
+$totalRows = 0
+$tsStart = 1577808000000 # 2020-01-01 00:00:00.000
+
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql create table $tb using $stb tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
+ $x = $x + 60
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+
+sql select count(*) from $stb
+print rows:$rows data00:$data00 totalRows:$totalRows
+if $rows != 1 then
+ return -1
+endi
+
+if $data00 != $totalRows then
+ return -1
+endi
+
+
+print ============== step3: insert old data(now-15d) and new data(now+15d), control data rows in order to save in cache, not falling disc
+sql insert into $tb values ( now - 20d , -20 )
+sql insert into $tb values ( now - 40d , -40 )
+$totalRows = $totalRows + 2
+
+print ============== step4: stop dnode1
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
+system sh/exec.sh -n dnode2 -s stop -x SIGINT
+
+print ============== step5: remove the mnode dir of dnode1, then copy the monde dir of dnode2
+system_content rm -rf ../../../sim/dnode1/data/vnode
+system_content rm -rf ../../../sim/dnode1/data/mnode
+system_content cp -rf ../../../sim/dnode2/data/vnode ../../../sim/dnode1/data/
+system_content cp -rf ../../../sim/dnode2/data/mnode ../../../sim/dnode1/data/
+
+print ============== step6: restart dnode1/dnode2
+system sh/exec.sh -n dnode1 -s start
+system sh/exec.sh -n dnode2 -s start
+sleep 1000
+sql connect
+sql use $db
+
+$loopCnt = 0
+wait_dnode1_ready:
+$loopCnt = $loopCnt + 1
+if $loopCnt == 20 then
+ return -1
+endi
+
+sql show dnodes -x wait_dnode1_ready
+if $rows != 2 then
+ sleep 2000
+ goto wait_dnode1_ready
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+
+if $dnode1Status != ready then
+ sleep 2000
+ goto wait_dnode1_ready
+endi
+if $dnode2Status != ready then
+ sleep 2000
+ goto wait_dnode1_ready
+endi
+
+$loopCnt = 0
+wait_dnode1_vgroup_slave:
+$loopCnt = $loopCnt + 1
+if $loopCnt == 10 then
+ return -1
+endi
+
+sql show vgroups
+if $rows != 3 then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+print show vgroups:
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
+print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 $data5_4 $data6_4 $data7_4 $data8_4 $data9_4
+$d2v2status = $data4_4
+$d2v3status = $data4_2
+$d2v4status = $data4_3
+
+$d1v2status = $data7_4
+$d1v3status = $data7_2
+$d1v4status = $data7_3
+
+if $d2v2status != master then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+if $d2v3status != master then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+if $d2v4status != master then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+
+if $d1v2status != slave then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+if $d1v3status != slave then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+if $d1v4status != slave then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+
+sql reset query cache
+
+# check using select
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
\ No newline at end of file
diff --git a/tests/script/unique/migrate/mn2_vn2_repl2_rmVnodeDir.sim b/tests/script/unique/migrate/mn2_vn2_repl2_rmVnodeDir.sim
new file mode 100644
index 0000000000..b754dc7a49
--- /dev/null
+++ b/tests/script/unique/migrate/mn2_vn2_repl2_rmVnodeDir.sim
@@ -0,0 +1,272 @@
+# Test case describe: dnode1/dnode2 include mnode and vnode roles
+# step 1: start dnode1/dnode2, and added into cluster
+# step 2: create db(repl = 2), table, insert data,
+# step 4: stop dnode1, remove its vnode dir, and copy vnode dir of dnode2 to dnode1
+# step 5: restart dnode1, waiting sync end
+# step 6: stop dnode2, reset query cache, and query
+
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+#system sh/deploy.sh -n dnode3 -i 3
+#system sh/deploy.sh -n dnode4 -i 4
+
+system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
+system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
+#system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
+#system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1
+
+system sh/cfg.sh -n dnode1 -c walLevel -v 2
+system sh/cfg.sh -n dnode2 -c walLevel -v 2
+#system sh/cfg.sh -n dnode3 -c walLevel -v 2
+#system sh/cfg.sh -n dnode4 -c walLevel -v 2
+
+system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
+#system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
+#system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
+
+system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
+#system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
+#system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
+
+system sh/cfg.sh -n dnode1 -c alternativeRole -v 0
+system sh/cfg.sh -n dnode2 -c alternativeRole -v 0
+#system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
+#system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
+
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
+#system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
+#system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
+#system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 4
+
+system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
+#system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
+
+print ============== step0: start tarbitrator
+system sh/exec_tarbitrator.sh -s start
+
+print ============== step1: start dnode1/dnode2 and add into cluster
+system sh/exec.sh -n dnode1 -s start
+system sh/exec.sh -n dnode2 -s start
+sleep 1000
+sql connect
+sleep 1000
+sql create dnode $hostname2
+sleep 1000
+
+print ============== step2: create database with replica 2, and create table, insert data
+$totalTableNum = 10
+$sleepTimer = 3000
+
+$db = db
+sql create database $db replica 2 cache 1
+sql use $db
+
+# create table , insert data
+$stb = stb
+sql create table $stb (ts timestamp, c1 double) tags(t1 int)
+$rowNum = 1200
+$tblNum = $totalTableNum
+$totalRows = 0
+$tsStart = 1577808000000 # 2020-01-01 00:00:00.000
+
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql create table $tb using $stb tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
+ $x = $x + 60
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+
+sql select count(*) from $stb
+print rows:$rows data00:$data00 totalRows:$totalRows
+if $rows != 1 then
+ return -1
+endi
+
+if $data00 != $totalRows then
+ return -1
+endi
+
+
+print ============== step3: insert old data(now-15d) and new data(now+15d), control data rows in order to save in cache, not falling disc
+sql insert into $tb values ( now - 20d , -20 )
+sql insert into $tb values ( now - 40d , -40 )
+$totalRows = $totalRows + 2
+
+print ============== step4: stop dnode1
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
+
+$loopCnt = 0
+wait_dnode1_offline:
+$loopCnt = $loopCnt + 1
+if $loopCnt == 10 then
+ return -1
+endi
+
+sql show dnodes
+if $rows != 2 then
+ sleep 2000
+ goto wait_dnode1_offline
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+
+if $dnode1Status != offline then
+ sleep 2000
+ goto wait_dnode1_offline
+endi
+if $dnode2Status != ready then
+ sleep 2000
+ goto wait_dnode1_offline
+endi
+
+# check using select
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+#sql show vgroups
+#print show vgroups:
+#print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
+#print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
+#print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
+
+print ============== step5: remove the mnode dir of dnode1, then copy the monde dir of dnode2
+system_content rm -rf ../../../sim/dnode1/data/vnode
+system_content cp -rf ../../../sim/dnode2/data/vnode ../../../sim/dnode1/data/
+
+print ============== step6: restart dnode1, waiting sync end
+system sh/exec.sh -n dnode1 -s start
+sleep 1000
+
+$loopCnt = 0
+wait_dnode1_ready:
+$loopCnt = $loopCnt + 1
+if $loopCnt == 20 then
+ return -1
+endi
+
+sql show dnodes -x wait_dnode1_ready
+if $rows != 2 then
+ sleep 2000
+ goto wait_dnode1_ready
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+
+if $dnode1Status != ready then
+ sleep 2000
+ goto wait_dnode1_ready
+endi
+if $dnode2Status != ready then
+ sleep 2000
+ goto wait_dnode1_ready
+endi
+
+$loopCnt = 0
+wait_dnode1_vgroup_slave:
+$loopCnt = $loopCnt + 1
+if $loopCnt == 10 then
+ return -1
+endi
+
+sql show vgroups
+if $rows != 3 then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+print show vgroups:
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
+print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 $data5_4 $data6_4 $data7_4 $data8_4 $data9_4
+$d2v2status = $data4_4
+$d2v3status = $data4_2
+$d2v4status = $data4_3
+
+$d1v2status = $data7_4
+$d1v3status = $data7_2
+$d1v4status = $data7_3
+
+if $d2v2status != master then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+if $d2v3status != master then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+if $d2v4status != master then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+
+if $d1v2status != slave then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+if $d1v3status != slave then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+if $d1v4status != slave then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+
+print ============== step7: stop dnode2
+system sh/exec.sh -n dnode2 -s stop -x SIGINT
+
+$loopCnt = 0
+wait_dnode2_offline:
+$loopCnt = $loopCnt + 1
+if $loopCnt == 10 then
+ return -1
+endi
+
+sql show dnodes
+if $rows != 2 then
+ sleep 2000
+ goto wait_dnode2_offline
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+
+if $dnode1Status != ready then
+ sleep 2000
+ goto wait_dnode2_offline
+endi
+if $dnode2Status != offline then
+ sleep 2000
+ goto wait_dnode2_offline
+endi
+
+sql reset query cache
+
+# check using select
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
\ No newline at end of file
diff --git a/tests/stress/.gitignore b/tests/stress/.gitignore
new file mode 100644
index 0000000000..25a8031c43
--- /dev/null
+++ b/tests/stress/.gitignore
@@ -0,0 +1,3 @@
+stress
+stress.exe
+cases.json
\ No newline at end of file
diff --git a/tests/stress/README.md b/tests/stress/README.md
new file mode 100644
index 0000000000..a7f8a2dac6
--- /dev/null
+++ b/tests/stress/README.md
@@ -0,0 +1,80 @@
+# STRESS
+
+Stress test tool for TDengine. It run a set of test cases randomly and show statistics.
+
+## COMMAND LINE
+
+``` bash
+$ ./stress [-h=] [-P=<0>] [-d=] [-u=] [-p=] [-c=<4>] [-f=] [-l=] [path_or_sql]
+```
+
+* **-h**: host name or IP address of TDengine server (default: localhost).
+* **-P**: port number of TDengine server (default: 0).
+* **-u**: user name (default: root).
+* **-p**: password (default: taosdata).
+* **-c**: concurrency, number of concurrent goroutines for query (default: 4).
+* **-f**: fetch data or not (default: true).
+* **-l**: log file path (default: no log).
+* **path_or_sql**: a SQL statement or path of a JSON file which contains the test cases (default: cases.json).
+
+## TEST CASE FILE
+
+```json
+[{
+ "weight": 1,
+ "sql": "select * from meters where ts>=now+%dm and ts<=now-%dm and c1=%v and c2=%d and c3='%s' and tbname='%s'",
+ "args": [{
+ "type": "range",
+ "min": 30,
+ "max": 60
+ }, {
+ "type": "bool"
+ }, {
+ "type": "int",
+ "min": -10,
+ "max": 20
+ }, {
+ "type": "string",
+ "min": 0,
+ "max": 10,
+ }, {
+ "type": "list",
+ "list": [
+ "table1",
+ "table2",
+ "table3",
+ "table4"
+ ]
+ }]
+}]
+```
+
+The test case file is a standard JSON file which contains an array of test cases. For test cases, field `sql` is mandatory, and it can optionally include a `weight` field and an `args` field which is an array of arguments.
+
+`sql` is a SQL statement, it can include zero or more arguments (placeholders).
+
+`weight` defines the possibility of the case being selected, the greater value the higher possibility. It must be an non-negative integer and the default value is zero, but, if all cases have a zero weight, all the weights are regarded as 1.
+
+Placeholders of `sql` are replaced by arguments in `args` at runtime. There are 5 types of arguments currently:
+
+* **bool**: generate a `boolean` value randomly.
+* **int**: generate an `integer` between [`min`, `max`] randomly, the default value of `min` is 0 and `max` is 100.
+* **range**: generate two `integer`s between [`min`, `max`] randomly, the first is less than the second, the default value of `min` is 0 and `max` is 100.
+* **string**: generate a `string` with length between [`min`, `max`] randomly, the default value of `min` is 0 and `max` is 100.
+* **list**: select an item from `list` randomly.
+
+## OUTPUT
+
+```
+ 00:00:08 | TOTAL REQ | TOTAL TIME(us) | TOTAL AVG(us) | REQUEST | TIME(us) | AVERAGE(us) |
+ TOTAL | 3027 | 26183890 | 8650.11 | 287 | 3060935 | 10665.28 |
+ SUCCESS | 3027 | 26183890 | 8650.11 | 287 | 3060935 | 10665.28 |
+ FAIL | 0 | 0 | 0.00 | 0 | 0 | 0.00 |
+```
+
+* **Col 2**: total number of request since test start.
+* **Col 3**: total time of all request since test start.
+* **Col 4**: average time of all request since test start.
+* **Col 5**: number of request in last second.
+* **Col 6**: time of all request in last second.
+* **Col 7**: average time of all request in last second.
diff --git a/tests/stress/go.mod b/tests/stress/go.mod
new file mode 100644
index 0000000000..df9b2806b5
--- /dev/null
+++ b/tests/stress/go.mod
@@ -0,0 +1,7 @@
+module github.com/taosdata/stress
+
+go 1.14
+
+require (
+ github.com/taosdata/driver-go v0.0.0-20200606095205-b786bac1857f
+)
diff --git a/tests/stress/main.go b/tests/stress/main.go
new file mode 100644
index 0000000000..c3b9290a37
--- /dev/null
+++ b/tests/stress/main.go
@@ -0,0 +1,406 @@
+package main
+
+import (
+ "database/sql"
+ "encoding/json"
+ "errors"
+ "flag"
+ "fmt"
+ "math/rand"
+ "os"
+ "os/signal"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ _ "github.com/taosdata/driver-go/taosSql"
+)
+
+type argument struct {
+ Type string `json:"type"`
+ Min int `json:"min"`
+ Max int `json:"max"`
+ List []interface{} `json:"list, omitempty"`
+}
+
+type testCase struct {
+ isQuery bool `json:"-"`
+ numArgs int `json:"-"`
+ Weight int `json:"weight"`
+ SQL string `json:"sql"`
+ Args []argument `json:"args"`
+}
+
+func (arg *argument) check() (int, error) {
+ if arg.Type == "list" {
+ if len(arg.List) == 0 {
+ return 0, errors.New("list cannot be empty")
+ }
+ return 1, nil
+ }
+
+ if arg.Max < arg.Min {
+ return 0, errors.New("invalid min/max value")
+ }
+
+ if arg.Type == "string" {
+ if arg.Min < 0 {
+ return 0, errors.New("negative string length")
+ }
+ }
+
+ if arg.Type == "int" && arg.Min == 0 && arg.Max == 0 {
+ arg.Max = arg.Min + 100
+ }
+
+ if arg.Type == "range" {
+ return 2, nil
+ }
+
+ return 1, nil
+}
+
+func (arg *argument) generate(args []interface{}) []interface{} {
+ const chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
+
+ switch arg.Type {
+ case "bool":
+ if rand.Intn(2) == 1 {
+ args = append(args, true)
+ } else {
+ args = append(args, false)
+ }
+
+ case "int":
+ v := rand.Intn(arg.Max-arg.Min+1) + arg.Min
+ args = append(args, v)
+
+ case "range":
+ v := rand.Intn(arg.Max-arg.Min) + arg.Min
+ args = append(args, v)
+ v = rand.Intn(arg.Max-v+1) + v
+ args = append(args, v)
+
+ case "string":
+ l := rand.Intn(arg.Max-arg.Min+1) + arg.Min
+ sb := strings.Builder{}
+ for i := 0; i < l; i++ {
+ sb.WriteByte(chars[rand.Intn(len(chars))])
+ }
+ args = append(args, sb.String())
+
+ case "list":
+ v := arg.List[rand.Intn(len(arg.List))]
+ args = append(args, v)
+ }
+
+ return args
+}
+
+func (tc *testCase) buildSql() string {
+ args := make([]interface{}, 0, tc.numArgs)
+ for i := 0; i < len(tc.Args); i++ {
+ args = tc.Args[i].generate(args)
+ }
+ return fmt.Sprintf(tc.SQL, args...)
+}
+
+type statitics struct {
+ succeeded int64
+ failed int64
+ succeededDuration int64
+ failedDuration int64
+}
+
+var (
+ host string
+ port uint
+ database string
+ user string
+ password string
+ fetch bool
+
+ chLog chan string
+ wgLog sync.WaitGroup
+ startAt time.Time
+ shouldStop int64
+ wgTest sync.WaitGroup
+ stat statitics
+ totalWeight int
+ cases []testCase
+)
+
+func loadTestCaseFromFile(file *os.File) error {
+ if e := json.NewDecoder(file).Decode(&cases); e != nil {
+ return e
+ }
+
+ if len(cases) == 0 {
+ return fmt.Errorf("no test case loaded.")
+ }
+
+ for i := 0; i < len(cases); i++ {
+ c := &cases[i]
+ c.SQL = strings.TrimSpace(c.SQL)
+ c.isQuery = strings.ToLower(c.SQL[:6]) == "select"
+ if c.Weight < 0 {
+ return fmt.Errorf("test %d: negative weight", i)
+ }
+ totalWeight += c.Weight
+
+ for j := 0; j < len(c.Args); j++ {
+ arg := &c.Args[j]
+ arg.Type = strings.ToLower(arg.Type)
+ n, e := arg.check()
+ if e != nil {
+ return fmt.Errorf("test case %d argument %d: %s", i, j, e.Error())
+ }
+ c.numArgs += n
+ }
+ }
+
+ if totalWeight == 0 {
+ for i := 0; i < len(cases); i++ {
+ cases[i].Weight = 1
+ }
+ totalWeight = len(cases)
+ }
+
+ return nil
+}
+
+func loadTestCase(pathOrSQL string) error {
+ if f, e := os.Open(pathOrSQL); e == nil {
+ defer f.Close()
+ return loadTestCaseFromFile(f)
+ }
+
+ pathOrSQL = strings.TrimSpace(pathOrSQL)
+ if strings.ToLower(pathOrSQL[:6]) != "select" {
+ return fmt.Errorf("'%s' is not a valid file or SQL statement", pathOrSQL)
+ }
+
+ cases = append(cases, testCase{
+ isQuery: true,
+ Weight: 1,
+ numArgs: 0,
+ SQL: pathOrSQL,
+ })
+ totalWeight = 1
+
+ return nil
+}
+
+func selectTestCase() *testCase {
+ sum, target := 0, rand.Intn(totalWeight)
+ var c *testCase
+ for i := 0; i < len(cases); i++ {
+ c = &cases[i]
+ sum += c.Weight
+ if sum > target {
+ break
+ }
+ }
+ return c
+}
+
+func runTest() {
+ defer wgTest.Done()
+ db, e := sql.Open("taosSql", fmt.Sprintf("%s:%s@tcp(%s:%v)/%s", user, password, host, port, database))
+ if e != nil {
+ fmt.Printf("failed to connect to database: %s\n", e.Error())
+ return
+ }
+ defer db.Close()
+
+ for atomic.LoadInt64(&shouldStop) == 0 {
+ c := selectTestCase()
+ str := c.buildSql()
+
+ start := time.Now()
+ if c.isQuery {
+ var rows *sql.Rows
+ if rows, e = db.Query(str); rows != nil {
+ if fetch {
+ for rows.Next() {
+ }
+ }
+ rows.Close()
+ }
+ } else {
+ _, e = db.Exec(str)
+ }
+ duration := time.Now().Sub(start).Microseconds()
+
+ if e != nil {
+ if chLog != nil {
+ chLog <- str + ": " + e.Error()
+ }
+ atomic.AddInt64(&stat.failed, 1)
+ atomic.AddInt64(&stat.failedDuration, duration)
+ } else {
+ atomic.AddInt64(&stat.succeeded, 1)
+ atomic.AddInt64(&stat.succeededDuration, duration)
+ }
+ }
+}
+
+func getStatPrinter() func(tm time.Time) {
+ var last statitics
+ lastPrintAt := startAt
+
+ return func(tm time.Time) {
+ var current statitics
+
+ current.succeeded = atomic.LoadInt64(&stat.succeeded)
+ current.failed = atomic.LoadInt64(&stat.failed)
+ current.succeededDuration = atomic.LoadInt64(&stat.succeededDuration)
+ current.failedDuration = atomic.LoadInt64(&stat.failedDuration)
+
+ seconds := int64(tm.Sub(startAt).Seconds())
+ format := "\033[47;30m %02v:%02v:%02v | TOTAL REQ | TOTAL TIME(us) | TOTAL AVG(us) | REQUEST | TIME(us) | AVERAGE(us) |\033[0m\n"
+ fmt.Printf(format, seconds/3600, seconds%3600/60, seconds%60)
+
+ tr := current.succeeded + current.failed
+ td := current.succeededDuration + current.failedDuration
+ r := tr - last.succeeded - last.failed
+ d := td - last.succeededDuration - last.failedDuration
+ ta, a := 0.0, 0.0
+ if tr > 0 {
+ ta = float64(td) / float64(tr)
+ }
+ if r > 0 {
+ a = float64(d) / float64(r)
+ }
+ format = " TOTAL | %9v | %14v | %13.2f | %7v | %10v | % 13.2f |\n"
+ fmt.Printf(format, tr, td, ta, r, d, a)
+
+ tr = current.succeeded
+ td = current.succeededDuration
+ r = tr - last.succeeded
+ d = td - last.succeededDuration
+ ta, a = 0.0, 0.0
+ if tr > 0 {
+ ta = float64(td) / float64(tr)
+ }
+ if r > 0 {
+ a = float64(d) / float64(r)
+ }
+ format = " SUCCESS | \033[32m%9v\033[0m | \033[32m%14v\033[0m | \033[32m%13.2f\033[0m | \033[32m%7v\033[0m | \033[32m%10v\033[0m | \033[32m%13.2f\033[0m |\n"
+ fmt.Printf(format, tr, td, ta, r, d, a)
+
+ tr = current.failed
+ td = current.failedDuration
+ r = tr - last.failed
+ d = td - last.failedDuration
+ ta, a = 0.0, 0.0
+ if tr > 0 {
+ ta = float64(td) / float64(tr)
+ }
+ if r > 0 {
+ a = float64(d) / float64(r)
+ }
+ format = " FAIL | \033[31m%9v\033[0m | \033[31m%14v\033[0m | \033[31m%13.2f\033[0m | \033[31m%7v\033[0m | \033[31m%10v\033[0m | \033[31m%13.2f\033[0m |\n"
+ fmt.Printf(format, tr, td, ta, r, d, a)
+
+ last = current
+ lastPrintAt = tm
+ }
+}
+
+func startLogger(path string) error {
+ if len(path) == 0 {
+ return nil
+ }
+
+ f, e := os.Create(path)
+ if e != nil {
+ return e
+ }
+
+ chLog = make(chan string, 100)
+ wgLog.Add(1)
+ go func() {
+ for s := range chLog {
+ if f != nil {
+ f.WriteString(s)
+ f.WriteString("\n")
+ }
+ }
+ f.Close()
+ wgLog.Done()
+ }()
+
+ return nil
+}
+
+func main() {
+ var concurrency uint
+ var logPath string
+ flag.StringVar(&host, "h", "localhost", "host name or IP address of TDengine server")
+ flag.UintVar(&port, "P", 0, "port (default 0)")
+ flag.StringVar(&database, "d", "test", "database name")
+ flag.StringVar(&user, "u", "root", "user name")
+ flag.StringVar(&password, "p", "taosdata", "password")
+ flag.BoolVar(&fetch, "f", true, "fetch result or not")
+ flag.UintVar(&concurrency, "c", 4, "concurrency, number of goroutines for query")
+ flag.StringVar(&logPath, "l", "", "path of log file (default: no log)")
+ flag.Parse()
+
+ if e := startLogger(logPath); e != nil {
+ fmt.Println("failed to open log file:", e.Error())
+ return
+ }
+
+ pathOrSQL := flag.Arg(0)
+ if len(pathOrSQL) == 0 {
+ pathOrSQL = "cases.json"
+ }
+ if e := loadTestCase(pathOrSQL); e != nil {
+ fmt.Println("failed to load test cases:", e.Error())
+ return
+ }
+
+ rand.Seed(time.Now().UnixNano())
+
+ fmt.Printf("\nSERVER: %s DATABASE: %s CONCURRENCY: %d FETCH DATA: %v\n\n", host, database, concurrency, fetch)
+
+ startAt = time.Now()
+ printStat := getStatPrinter()
+ printStat(startAt)
+
+ for i := uint(0); i < concurrency; i++ {
+ wgTest.Add(1)
+ go runTest()
+ }
+
+ interrupt := make(chan os.Signal, 1)
+ signal.Notify(interrupt, os.Interrupt)
+ ticker := time.NewTicker(time.Second)
+
+ fmt.Println("Ctrl + C to exit....\033[1A")
+
+LOOP:
+ for {
+ select {
+ case <-interrupt:
+ break LOOP
+ case tm := <-ticker.C:
+ fmt.Print("\033[4A")
+ printStat(tm)
+ }
+ }
+
+ atomic.StoreInt64(&shouldStop, 1)
+ fmt.Print("\033[100D'Ctrl + C' received, Waiting started query to stop...")
+ wgTest.Wait()
+
+ if chLog != nil {
+ close(chLog)
+ wgLog.Wait()
+ }
+ fmt.Print("\033[4A\033[100D")
+ printStat(time.Now())
+ fmt.Println()
+}