diff --git a/importSampleData/app/main.go b/importSampleData/app/main.go index d714fc339c..6996047026 100644 --- a/importSampleData/app/main.go +++ b/importSampleData/app/main.go @@ -18,7 +18,7 @@ import ( "sync" "time" - _ "github.com/taosdata/TDengine/src/connector/go/taosSql" + _ "github.com/taosdata/driver-go/taosSql" ) const ( diff --git a/src/client/inc/tschemautil.h b/src/client/inc/tschemautil.h index 99ffa4e766..67942ad42a 100644 --- a/src/client/inc/tschemautil.h +++ b/src/client/inc/tschemautil.h @@ -110,8 +110,6 @@ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg, size_t* size //todo tags value as well as the table id structure needs refactor char *tsGetTagsValue(STableMeta *pMeta); -void extractTableNameFromToken(SSQLToken *pToken, SSQLToken* pTable); - #ifdef __cplusplus } #endif diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index c8754e5beb..a398ad659e 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -320,6 +320,8 @@ typedef struct SSqlStream { SSqlObj *pSql; uint32_t streamId; char listed; + bool isProject; + int16_t precision; int64_t num; // number of computing count /* @@ -334,7 +336,6 @@ typedef struct SSqlStream { int64_t etime; // stream end query time, when time is larger then etime, the stream will be closed int64_t interval; int64_t slidingTime; - int16_t precision; void * pTimer; void (*fp)(); diff --git a/src/client/src/tscFunctionImpl.c b/src/client/src/tscFunctionImpl.c index afdad05b43..0917a3b19c 100644 --- a/src/client/src/tscFunctionImpl.c +++ b/src/client/src/tscFunctionImpl.c @@ -14,7 +14,6 @@ */ #include "os.h" -#include "qast.h" #include "qextbuffer.h" #include "qfill.h" #include "qhistogram.h" @@ -23,6 +22,7 @@ #include "qtsbuf.h" #include "taosdef.h" #include "taosmsg.h" +#include "qast.h" #include "tscLog.h" #include "tscSubquery.h" #include "tscompression.h" diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c index 1d66fb0467..83700ce0a5 100644 --- a/src/client/src/tscLocal.c +++ b/src/client/src/tscLocal.c @@ -406,7 +406,7 @@ int tscProcessLocalCmd(SSqlObj *pSql) { pSql->res.qhandle = 0x1; pSql->res.numOfRows = 0; } else if (pCmd->command == TSDB_SQL_RESET_CACHE) { - taosCacheEmpty(tscCacheHandle,false); + taosCacheEmpty(tscCacheHandle); } else if (pCmd->command == TSDB_SQL_SERV_VERSION) { tscProcessServerVer(pSql); } else if (pCmd->command == TSDB_SQL_CLI_VERSION) { diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index ea88e4a935..65e2c976e0 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -18,19 +18,19 @@ #define _DEFAULT_SOURCE #include "os.h" -#include "qast.h" #include "taos.h" #include "taosmsg.h" -#include "tstoken.h" -#include "tstrbuild.h" -#include "ttime.h" +#include "qast.h" +#include "tcompare.h" +#include "tname.h" #include "tscLog.h" #include "tscUtil.h" #include "tschemautil.h" #include "tsclient.h" +#include "tstoken.h" +#include "tstrbuild.h" +#include "ttime.h" #include "ttokendef.h" -#include "tname.h" -#include "tcompare.h" #define DEFAULT_PRIMARY_TIMESTAMP_COL_NAME "_c0" @@ -90,6 +90,7 @@ static int32_t validateSqlFunctionInStreamSql(SSqlCmd* pCmd, SQueryInfo* pQueryI static int32_t buildArithmeticExprString(tSQLExpr* pExpr, char** exprString); static int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo); static int32_t validateArithmeticSQLExpr(SSqlCmd* pCmd, tSQLExpr* pExpr, SQueryInfo* pQueryInfo, SColumnList* pList, int32_t* type); +static int32_t validateEp(char* ep); static int32_t validateDNodeConfig(tDCLSQL* pOptions); static int32_t validateLocalConfig(tDCLSQL* pOptions); static int32_t validateColumnName(char* name); @@ -359,6 +360,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { case TSDB_SQL_CFG_DNODE: { const char* msg2 = "invalid configure options or values"; + const char* msg3 = "invalid dnode ep"; /* validate the ip address */ tDCLSQL* pDCL = pInfo->pDCLInfo; @@ -375,6 +377,10 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { strncpy(pCfg->ep, pDCL->a[0].z, pDCL->a[0].n); + if (validateEp(pCfg->ep) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); + } + strncpy(pCfg->config, pDCL->a[1].z, pDCL->a[1].n); if (pDCL->nTokens == 3) { @@ -654,11 +660,14 @@ int32_t parseIntervalClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQ int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) { const char* msg0 = "sliding value too small"; const char* msg1 = "sliding value no larger than the interval value"; + const char* msg2 = "sliding value can not less than 1% of interval value"; + + const static int32_t INTERVAL_SLIDING_FACTOR = 100; STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); - SSQLToken* pSliding = &pQuerySql->sliding; STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta); + SSQLToken* pSliding = &pQuerySql->sliding; if (pSliding->n != 0) { getTimestampInUsFromStr(pSliding->z, pSliding->n, &pQueryInfo->slidingTime); if (tinfo.precision == TSDB_TIME_PRECISION_MILLI) { @@ -676,6 +685,10 @@ int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQu pQueryInfo->slidingTime = pQueryInfo->intervalTime; } + if ((pQueryInfo->intervalTime != 0) && (pQueryInfo->intervalTime/pQueryInfo->slidingTime > INTERVAL_SLIDING_FACTOR)) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); + } + return TSDB_CODE_SUCCESS; } @@ -4629,6 +4642,24 @@ typedef struct SDNodeDynConfOption { int32_t len; // name string length } SDNodeDynConfOption; + +int32_t validateEp(char* ep) { + char buf[TSDB_EP_LEN + 1] = {0}; + tstrncpy(buf, ep, TSDB_EP_LEN); + + char *pos = strchr(buf, ':'); + if (NULL == pos) { + return TSDB_CODE_TSC_INVALID_SQL; + } + + uint16_t port = atoi(pos+1); + if (0 == port) { + return TSDB_CODE_TSC_INVALID_SQL; + } + + return TSDB_CODE_SUCCESS; +} + int32_t validateDNodeConfig(tDCLSQL* pOptions) { if (pOptions->nTokens < 2 || pOptions->nTokens > 3) { return TSDB_CODE_TSC_INVALID_SQL; @@ -6096,16 +6127,12 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSQLExpr* pS } } - + // NOTE: binary|nchar data allows the >|< type filter if ((*pExpr)->_node.optr != TSDB_RELATION_EQUAL && (*pExpr)->_node.optr != TSDB_RELATION_NOT_EQUAL) { if (pRight->nodeType == TSQL_NODE_VALUE) { if (pRight->pVal->nType == TSDB_DATA_TYPE_BOOL) { return TSDB_CODE_TSC_INVALID_SQL; } - if ((pRight->pVal->nType == TSDB_DATA_TYPE_BINARY || pRight->pVal->nType == TSDB_DATA_TYPE_NCHAR) - && (*pExpr)->_node.optr != TSDB_RELATION_LIKE) { - return TSDB_CODE_TSC_INVALID_SQL; - } } } } diff --git a/src/client/src/tscSchemaUtil.c b/src/client/src/tscSchemaUtil.c index da06e3e5e2..934a562387 100644 --- a/src/client/src/tscSchemaUtil.c +++ b/src/client/src/tscSchemaUtil.c @@ -215,25 +215,3 @@ __attribute__ ((unused)) static FORCE_INLINE size_t copy(char* dst, const char* return len; } -/* - * tablePrefix.columnName - * extract table name and save it in pTable, with only column name in pToken - */ -void extractTableNameFromToken(SSQLToken* pToken, SSQLToken* pTable) { - const char sep = TS_PATH_DELIMITER[0]; - - if (pToken == pTable || pToken == NULL || pTable == NULL) { - return; - } - - char* r = strnchr(pToken->z, sep, pToken->n, false); - - if (r != NULL) { // record the table name token - pTable->n = r - pToken->z; - pTable->z = pToken->z; - - r += 1; - pToken->n -= (r - pToken->z); - pToken->z = r; - } -} diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 584e70ba6c..88fcc3828e 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -438,8 +438,9 @@ void tscKillSTableQuery(SSqlObj *pSql) { * here, we cannot set the command = TSDB_SQL_KILL_QUERY. Otherwise, it may cause * sub-queries not correctly released and master sql object of super table query reaches an abnormal state. */ - pSql->pSubs[i]->res.code = TSDB_CODE_TSC_QUERY_CANCELLED; - rpcCancelRequest(pSql->pSubs[i]->pRpcCtx); + rpcCancelRequest(pSub->pRpcCtx); + pSub->res.code = TSDB_CODE_TSC_QUERY_CANCELLED; + tscQueueAsyncRes(pSub); } /* @@ -1955,7 +1956,7 @@ int tscProcessUseDbRsp(SSqlObj *pSql) { } int tscProcessDropDbRsp(SSqlObj *UNUSED_PARAM(pSql)) { - taosCacheEmpty(tscCacheHandle, false); + taosCacheEmpty(tscCacheHandle); return 0; } @@ -2001,7 +2002,7 @@ int tscProcessAlterTableMsgRsp(SSqlObj *pSql) { if (isSuperTable) { // if it is a super table, reset whole query cache tscDebug("%p reset query cache since table:%s is stable", pSql, pTableMetaInfo->name); - taosCacheEmpty(tscCacheHandle, false); + taosCacheEmpty(tscCacheHandle); } } diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index 8fbc1f0109..f9f93b3f89 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -617,19 +617,18 @@ void taos_stop_query(TAOS_RES *res) { if (pSql->signature != pSql) return; tscDebug("%p start to cancel query", res); - pSql->res.code = TSDB_CODE_TSC_QUERY_CANCELLED; SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); if (tscIsTwoStageSTableQuery(pQueryInfo, 0)) { tscKillSTableQuery(pSql); - return; } - if (pSql->cmd.command >= TSDB_SQL_LOCAL) { - return; + if (pSql->cmd.command < TSDB_SQL_LOCAL) { + rpcCancelRequest(pSql->pRpcCtx); } + pSql->res.code = TSDB_CODE_TSC_QUERY_CANCELLED; + tscQueueAsyncRes(pSql); - rpcCancelRequest(pSql->pRpcCtx); tscDebug("%p query is cancelled", res); } diff --git a/src/client/src/tscStream.c b/src/client/src/tscStream.c index cc703e0f93..f214e76cc7 100644 --- a/src/client/src/tscStream.c +++ b/src/client/src/tscStream.c @@ -71,6 +71,7 @@ static void tscProcessStreamLaunchQuery(SSchedMsg *pMsg) { pSql->fp = tscProcessStreamQueryCallback; pSql->param = pStream; + pSql->res.completed = false; SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); @@ -86,7 +87,7 @@ static void tscProcessStreamLaunchQuery(SSchedMsg *pMsg) { // failed to get meter/metric meta, retry in 10sec. if (code != TSDB_CODE_SUCCESS) { int64_t retryDelayTime = tscGetRetryDelayTime(pStream->slidingTime, pStream->precision); - tscError("%p stream:%p,get metermeta failed, retry in %" PRId64 "ms", pStream->pSql, pStream, retryDelayTime); + tscDebug("%p stream:%p,get metermeta failed, retry in %" PRId64 "ms", pStream->pSql, pStream, retryDelayTime); tscSetRetryTimer(pStream, pSql, retryDelayTime); } else { @@ -108,7 +109,7 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) { SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); tscDebug("%p add into timer", pSql); - if (isProjectStream(pQueryInfo)) { + if (pStream->isProject) { /* * pQueryInfo->window.ekey, which is the start time, does not change in case of * repeat first execution, once the first execution failed. @@ -121,7 +122,19 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) { } } else { pQueryInfo->window.skey = pStream->stime - pStream->interval; - pQueryInfo->window.ekey = pStream->stime - 1; + int64_t etime = taosGetTimestamp(pStream->precision); + // delay to wait all data in last time window + if (pStream->precision == TSDB_TIME_PRECISION_MICRO) { + etime -= tsMaxStreamComputDelay * 1000l; + } else { + etime -= tsMaxStreamComputDelay; + } + if (etime > pStream->etime) { + etime = pStream->etime; + } else { + etime = pStream->stime + (etime - pStream->stime) / pStream->interval * pStream->interval; + } + pQueryInfo->window.ekey = etime; } // launch stream computing in a new thread @@ -137,7 +150,7 @@ static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOf SSqlStream *pStream = (SSqlStream *)param; if (tres == NULL || numOfRows < 0) { int64_t retryDelay = tscGetRetryDelayTime(pStream->slidingTime, pStream->precision); - tscError("%p stream:%p, query data failed, code:%d, retry in %" PRId64 "ms", pStream->pSql, pStream, numOfRows, + tscError("%p stream:%p, query data failed, code:0x%08x, retry in %" PRId64 "ms", pStream->pSql, pStream, numOfRows, retryDelay); STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pStream->pSql->cmd, 0, 0); @@ -151,17 +164,45 @@ static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOf taos_fetch_rows_a(tres, tscProcessStreamRetrieveResult, param); } -static void tscSetTimestampForRes(SSqlStream *pStream, SSqlObj *pSql) { - SSqlRes *pRes = &pSql->res; - - int64_t timestamp = *(int64_t *)pRes->data; - int64_t actualTimestamp = pStream->stime - pStream->interval; - - if (timestamp != actualTimestamp) { - // reset the timestamp of each agg point by using start time of each interval - *((int64_t *)pRes->data) = actualTimestamp; - tscWarn("%p stream:%p, timestamp of points is:%" PRId64 ", reset to %" PRId64, pSql, pStream, timestamp, actualTimestamp); +// no need to be called as this is alreay done in the query +static void tscStreamFillTimeGap(SSqlStream* pStream, TSKEY ts) { +#if 0 + SSqlObj * pSql = pStream->pSql; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + + if (pQueryInfo->fillType != TSDB_FILL_SET_VALUE && pQueryInfo->fillType != TSDB_FILL_NULL) { + return; } + + SSqlRes *pRes = &pSql->res; + /* failed to retrieve any result in this retrieve */ + pSql->res.numOfRows = 1; + void *row[TSDB_MAX_COLUMNS] = {0}; + char tmpRes[TSDB_MAX_BYTES_PER_ROW] = {0}; + void *oldPtr = pSql->res.data; + pSql->res.data = tmpRes; + int32_t rowNum = 0; + + while (pStream->stime + pStream->slidingTime < ts) { + pStream->stime += pStream->slidingTime; + *(TSKEY*)row[0] = pStream->stime; + for (int32_t i = 1; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) { + int16_t offset = tscFieldInfoGetOffset(pQueryInfo, i); + TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i); + assignVal(pSql->res.data + offset, (char *)(&pQueryInfo->fillVal[i]), pField->bytes, pField->type); + row[i] = pSql->res.data + offset; + } + (*pStream->fp)(pStream->param, pSql, row); + ++rowNum; + } + + if (rowNum > 0) { + tscDebug("%p stream:%p %d rows padded", pSql, pStream, rowNum); + } + + pRes->numOfRows = 0; + pRes->data = oldPtr; +#endif } static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOfRows) { @@ -170,7 +211,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf if (pSql == NULL || numOfRows < 0) { int64_t retryDelayTime = tscGetRetryDelayTime(pStream->slidingTime, pStream->precision); - tscError("%p stream:%p, retrieve data failed, code:%d, retry in %" PRId64 "ms", pSql, pStream, numOfRows, retryDelayTime); + tscError("%p stream:%p, retrieve data failed, code:0x%08x, retry in %" PRId64 "ms", pSql, pStream, numOfRows, retryDelayTime); tscSetRetryTimer(pStream, pStream->pSql, retryDelayTime); return; @@ -180,16 +221,11 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf if (numOfRows > 0) { // when reaching here the first execution of stream computing is successful. pStream->numOfRes += numOfRows; - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); - for(int32_t i = 0; i < numOfRows; ++i) { TAOS_ROW row = taos_fetch_row(res); tscDebug("%p stream:%p fetch result", pSql, pStream); - if (isProjectStream(pQueryInfo)) { - pStream->stime = *(TSKEY *)row[0]; - } else { - tscSetTimestampForRes(pStream, pSql); - } + tscStreamFillTimeGap(pStream, *(TSKEY*)row[0]); + pStream->stime = *(TSKEY *)row[0]; // user callback function (*pStream->fp)(pStream->param, res, row); @@ -199,55 +235,18 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf taos_fetch_rows_a(res, tscProcessStreamRetrieveResult, pStream); } else { // numOfRows == 0, all data has been retrieved pStream->useconds += pSql->res.useconds; - - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); - if (pStream->numOfRes == 0) { - if (pQueryInfo->fillType == TSDB_FILL_SET_VALUE || pQueryInfo->fillType == TSDB_FILL_NULL) { - SSqlRes *pRes = &pSql->res; - - /* failed to retrieve any result in this retrieve */ - pSql->res.numOfRows = 1; - void *row[TSDB_MAX_COLUMNS] = {0}; - char tmpRes[TSDB_MAX_BYTES_PER_ROW] = {0}; - - void *oldPtr = pSql->res.data; - pSql->res.data = tmpRes; - - for (int32_t i = 1; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) { - int16_t offset = tscFieldInfoGetOffset(pQueryInfo, i); - TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i); - - assignVal(pSql->res.data + offset, (char *)(&pQueryInfo->fillVal[i]), pField->bytes, pField->type); - row[i] = pSql->res.data + offset; - } - - tscSetTimestampForRes(pStream, pSql); - row[0] = pRes->data; - - // char result[512] = {0}; - // taos_print_row(result, row, pQueryInfo->fieldsInfo.pFields, pQueryInfo->fieldsInfo.numOfOutput); - // tscInfo("%p stream:%p query result: %s", pSql, pStream, result); - tscDebug("%p stream:%p fetch result", pSql, pStream); - - // user callback function - (*pStream->fp)(pStream->param, res, row); - - pRes->numOfRows = 0; - pRes->data = oldPtr; - } else if (isProjectStream(pQueryInfo)) { + if (pStream->isProject) { /* no resuls in the query range, retry */ // todo set retry dynamic time int32_t retry = tsProjectExecInterval; - tscError("%p stream:%p, retrieve no data, code:%d, retry in %" PRId32 "ms", pSql, pStream, numOfRows, retry); + tscError("%p stream:%p, retrieve no data, code:0x%08x, retry in %" PRId32 "ms", pSql, pStream, numOfRows, retry); tscSetRetryTimer(pStream, pStream->pSql, retry); return; } - } else { - if (isProjectStream(pQueryInfo)) { - pStream->stime += 1; - } + } else if (pStream->isProject) { + pStream->stime += 1; } tscDebug("%p stream:%p, query on:%s, fetch result completed, fetched rows:%" PRId64, pSql, pStream, pTableMetaInfo->name, @@ -262,10 +261,9 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf } static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer) { - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); int64_t delay = getDelayValueAfterTimewindowClosed(pStream, timer); - if (isProjectStream(pQueryInfo)) { + if (pStream->isProject) { int64_t now = taosGetTimestamp(pStream->precision); int64_t etime = now > pStream->etime ? pStream->etime : now; @@ -323,8 +321,7 @@ static int64_t getLaunchTimeDelay(const SSqlStream* pStream) { static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) { int64_t timer = 0; - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); - if (isProjectStream(pQueryInfo)) { + if (pStream->isProject) { /* * for project query, no mater fetch data successfully or not, next launch will issue * more than the sliding time window @@ -342,7 +339,6 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) { return; } } else { - pStream->stime += pStream->slidingTime; if ((pStream->stime - pStream->interval) >= pStream->etime) { tscDebug("%p stream:%p, stime:%" PRId64 " is larger than end time: %" PRId64 ", stop the stream", pStream->pSql, pStream, pStream->stime, pStream->etime); @@ -409,14 +405,16 @@ static void tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) { pStream->slidingTime = pQueryInfo->slidingTime; - pQueryInfo->intervalTime = 0; // clear the interval value to avoid the force time window split by query processor - pQueryInfo->slidingTime = 0; + if (pStream->isProject) { + pQueryInfo->intervalTime = 0; // clear the interval value to avoid the force time window split by query processor + pQueryInfo->slidingTime = 0; + } } static int64_t tscGetStreamStartTimestamp(SSqlObj *pSql, SSqlStream *pStream, int64_t stime) { SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); - if (isProjectStream(pQueryInfo)) { + if (pStream->isProject) { // no data in table, flush all data till now to destination meter, 10sec delay pStream->interval = tsProjectExecInterval; pStream->slidingTime = tsProjectExecInterval; @@ -489,7 +487,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p SSqlStream *pStream = (SSqlStream *)calloc(1, sizeof(SSqlStream)); if (pStream == NULL) { - tscError("%p open stream failed, sql:%s, reason:%s, code:%d", pSql, sqlstr, pCmd->payload, pRes->code); + tscError("%p open stream failed, sql:%s, reason:%s, code:0x%08x", pSql, sqlstr, pCmd->payload, pRes->code); tscFreeSqlObj(pSql); return NULL; } @@ -514,7 +512,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p if (pRes->code != TSDB_CODE_SUCCESS) { setErrorInfo(pSql, pRes->code, pCmd->payload); - tscError("%p open stream failed, sql:%s, reason:%s, code:%d", pSql, sqlstr, pCmd->payload, pRes->code); + tscError("%p open stream failed, sql:%s, reason:%s, code:0x%08x", pSql, sqlstr, pCmd->payload, pRes->code); tscFreeSqlObj(pSql); return NULL; } @@ -523,6 +521,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta); + pStream->isProject = isProjectStream(pQueryInfo); pStream->fp = fp; pStream->callback = callback; pStream->param = param; @@ -565,6 +564,8 @@ void taos_close_stream(TAOS_STREAM *handle) { taosTmrStopA(&(pStream->pTimer)); tscDebug("%p stream:%p is closed", pSql, pStream); + // notify CQ to release the pStream object + pStream->fp(pStream->param, NULL, NULL); tscFreeSqlObj(pSql); pStream->pSql = NULL; diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 0243d115f0..b5b659de0c 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -14,12 +14,12 @@ */ #include "os.h" -#include "tscSubquery.h" +#include "qtsbuf.h" #include "qast.h" #include "tcompare.h" -#include "tschemautil.h" -#include "qtsbuf.h" #include "tscLog.h" +#include "tscSubquery.h" +#include "tschemautil.h" #include "tsclient.h" typedef struct SInsertSupporter { @@ -57,10 +57,15 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSupporter* pSupporter1, SJ pSubQueryInfo1->tsBuf = output1; pSubQueryInfo2->tsBuf = output2; + // no result generated, return directly + if (pSupporter1->pTSBuf == NULL || pSupporter2->pTSBuf == NULL) { + tscDebug("%p at least one ts-comp is empty, 0 for secondary query after ts blocks intersecting", pSql); + return 0; + } + tsBufResetPos(pSupporter1->pTSBuf); tsBufResetPos(pSupporter2->pTSBuf); - // TODO add more details information if (!tsBufNextPos(pSupporter1->pTSBuf)) { tsBufFlush(output1); tsBufFlush(output2); @@ -210,6 +215,7 @@ static void tscDestroyJoinSupporter(SJoinSupporter* pSupporter) { pSupporter->f = NULL; } + tfree(pSupporter->pIdTagList); tscTagCondRelease(&pSupporter->tagCond); free(pSupporter); } @@ -420,43 +426,6 @@ static void updateQueryTimeRange(SQueryInfo* pQueryInfo, STimeWindow* win) { pQueryInfo->window = *win; } -static UNUSED_FUNC void tSIntersectionAndLaunchSecQuery(SJoinSupporter* pSupporter, SSqlObj* pSql) { - SSqlObj* pParentSql = pSupporter->pObj; - SQueryInfo* pParentQueryInfo = tscGetQueryInfoDetail(&pParentSql->cmd, pParentSql->cmd.clauseIndex); - -// if (tscNonOrderedProjectionQueryOnSTable(pParentQueryInfo, 0)) { -// STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); -// assert(pQueryInfo->numOfTables == 1); -// -// // for projection query, need to try next vnode -//// int32_t totalVnode = pTableMetaInfo->pMetricMeta->numOfVnodes; -// int32_t totalVnode = 0; -// if ((++pTableMetaInfo->vgroupIndex) < totalVnode) { -// tscDebug("%p current vnode:%d exhausted, try next:%d. total vnode:%d. current numOfRes:%d", pSql, -// pTableMetaInfo->vgroupIndex - 1, pTableMetaInfo->vgroupIndex, totalVnode, pRes->numOfTotal); -// -// pSql->cmd.command = TSDB_SQL_SELECT; -// pSql->fp = tscJoinQueryCallback; -// tscProcessSql(pSql); -// -// return; -// } -// } - - SJoinSupporter* p1 = pParentSql->pSubs[0]->param; - SJoinSupporter* p2 = pParentSql->pSubs[1]->param; - - STimeWindow win = TSWINDOW_INITIALIZER; - int64_t num = doTSBlockIntersect(pParentSql, p1, p2, &win); - if (num <= 0) { // no result during ts intersect - tscDebug("%p free all sub SqlObj and quit", pParentSql); - freeJoinSubqueryObj(pParentSql); - } else { - updateQueryTimeRange(pParentQueryInfo, &win); - tscLaunchRealSubqueries(pParentSql); - } -} - int32_t tscCompareTidTags(const void* p1, const void* p2) { const STidTags* t1 = (const STidTags*) varDataVal(p1); const STidTags* t2 = (const STidTags*) varDataVal(p2); @@ -713,9 +682,12 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow SArray *s1 = NULL, *s2 = NULL; getIntersectionOfTableTuple(pQueryInfo, pParentSql, &s1, &s2); if (taosArrayGetSize(s1) == 0 || taosArrayGetSize(s2) == 0) { // no results,return. - tscDebug("%p free all sub SqlObj and quit", pParentSql); + tscDebug("%p tag intersect does not generated qualified tables for join, free all sub SqlObj and quit", pParentSql); freeJoinSubqueryObj(pParentSql); + // set no result command + pParentSql->cmd.command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; + (*pParentSql->fp)(pParentSql->param, pParentSql, 0); } else { // proceed to for ts_comp query SSqlCmd* pSubCmd1 = &pParentSql->pSubs[0]->cmd; @@ -846,7 +818,10 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow if (num <= 0) { // no result during ts intersect tscDebug("%p no results generated in ts intersection, free all sub SqlObj and quit", pParentSql); freeJoinSubqueryObj(pParentSql); - + + // set no result command + pParentSql->cmd.command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; + (*pParentSql->fp)(pParentSql->param, pParentSql, 0); return; } diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 791073f7a8..ed9822eea9 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -14,21 +14,21 @@ */ #include "os.h" -#include "qast.h" +#include "hash.h" +#include "tscUtil.h" #include "taosmsg.h" +#include "qast.h" #include "tcache.h" #include "tkey.h" #include "tmd5.h" -#include "tscProfile.h" #include "tscLocalMerge.h" +#include "tscLog.h" +#include "tscProfile.h" #include "tscSubquery.h" #include "tschemautil.h" #include "tsclient.h" #include "ttimer.h" #include "ttokendef.h" -#include "tscLog.h" -#include "tscUtil.h" -#include "hash.h" static void freeQueryInfoImpl(SQueryInfo* pQueryInfo); static void clearAllTableMetaInfo(SQueryInfo* pQueryInfo, const char* address, bool removeFromCache); diff --git a/src/common/inc/tname.h b/src/common/inc/tname.h index 25f78cde7e..2263a5dae1 100644 --- a/src/common/inc/tname.h +++ b/src/common/inc/tname.h @@ -3,6 +3,7 @@ #include "os.h" #include "taosmsg.h" +#include "tstoken.h" typedef struct SDataStatis { int16_t colId; @@ -23,6 +24,8 @@ void extractTableName(const char *tableId, char *name); char* extractDBName(const char *tableId, char *name); +void extractTableNameFromToken(SSQLToken *pToken, SSQLToken* pTable); + SSchema tGetTableNameColumnSchema(); bool tscValidateTableNameLength(size_t len); diff --git a/src/query/inc/tvariant.h b/src/common/inc/tvariant.h similarity index 100% rename from src/query/inc/tvariant.h rename to src/common/inc/tvariant.h diff --git a/src/common/src/tname.c b/src/common/src/tname.c index b3ff15f936..8b85ecfbc7 100644 --- a/src/common/src/tname.c +++ b/src/common/src/tname.c @@ -105,3 +105,26 @@ int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, in } return start; } + +/* + * tablePrefix.columnName + * extract table name and save it in pTable, with only column name in pToken + */ +void extractTableNameFromToken(SSQLToken* pToken, SSQLToken* pTable) { + const char sep = TS_PATH_DELIMITER[0]; + + if (pToken == pTable || pToken == NULL || pTable == NULL) { + return; + } + + char* r = strnchr(pToken->z, sep, pToken->n, false); + + if (r != NULL) { // record the table name token + pTable->n = r - pToken->z; + pTable->z = pToken->z; + + r += 1; + pToken->n -= (r - pToken->z); + pToken->z = r; + } +} diff --git a/src/query/src/tvariant.c b/src/common/src/tvariant.c similarity index 100% rename from src/query/src/tvariant.c rename to src/common/src/tvariant.c diff --git a/src/connector/jdbc/buildTDengine.sh b/src/connector/jdbc/buildTDengine.sh deleted file mode 100755 index cf98215c85..0000000000 --- a/src/connector/jdbc/buildTDengine.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash -ulimit -c unlimited - -function buildTDengine { - cd /root/TDengine - - git remote update - REMOTE_COMMIT=`git rev-parse --short remotes/origin/develop` - LOCAL_COMMIT=`git rev-parse --short @` - - echo " LOCAL: $LOCAL_COMMIT" - echo "REMOTE: $REMOTE_COMMIT" - if [ "$LOCAL_COMMIT" == "$REMOTE_COMMIT" ]; then - echo "repo up-to-date" - else - echo "repo need to pull" - git pull - - LOCAL_COMMIT=`git rev-parse --short @` - cd /root/TDengine/debug - rm -rf /root/TDengine/debug/* - cmake .. - make > /dev/null - make install - fi -} - -function restartTaosd { - systemctl stop taosd - pkill -KILL -x taosd - sleep 10 - - logDir=`grep 'logDir' /etc/taos/taos.cfg|awk 'END{print $2}'` - dataDir=`grep 'dataDir' /etc/taos/taos.cfg|awk '{print $2}'` - - rm -rf $logDir/* - rm -rf $dataDir/* - - taosd 2>&1 > /dev/null & - sleep 10 -} - -buildTDengine -restartTaosd diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNode.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNode.java index 12d026c0b3..7145673c71 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNode.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNode.java @@ -9,8 +9,7 @@ public class TDNode { private int index; private int running; private int deployed; - private boolean testCluster; - private int valgrind; + private boolean testCluster; private String path; private String cfgDir; private String dataDir; @@ -21,18 +20,13 @@ public class TDNode { this.index = index; running = 0; deployed = 0; - testCluster = false; - valgrind = 0; + testCluster = false; } public void setPath(String path) { this.path = path; } - public void setValgrind(int valgrind) { - this.valgrind = valgrind; - } - public void setTestCluster(boolean testCluster) { this.testCluster = testCluster; } @@ -58,7 +52,7 @@ public class TDNode { public void start() { String selfPath = System.getProperty("user.dir"); String binPath = ""; - String projDir = selfPath + "../../../../"; + String projDir = selfPath + "/../../../../"; try { ArrayList taosdPath = new ArrayList<>(); @@ -95,15 +89,9 @@ public class TDNode { return; } - String cmd = ""; - if(this.valgrind == 0) { - cmd = "nohup " + binPath + " -c " + cfgDir + " > /dev/null 2>&1 & "; - System.out.println("start taosd cmd: " + cmd); - } else { - String valgrindCmdline = "valgrind --tool=memcheck --leak-check=full --show-reac∏hable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"; - cmd = "nohup " + valgrindCmdline + " " + binPath + " -c " + this.cfgDir + " 2>&1 & "; - } - + String cmd = "nohup " + binPath + " -c " + cfgDir + " > /dev/null 2>&1 & "; + System.out.println("start taosd cmd: " + cmd); + try{ Runtime.getRuntime().exec(cmd); TimeUnit.SECONDS.sleep(5); @@ -115,12 +103,7 @@ public class TDNode { } public void stop() { - String toBeKilled = ""; - if (this.valgrind == 0) { - toBeKilled = "taosd"; - } else { - toBeKilled = "valgrind.bin"; - } + String toBeKilled = "taosd"; if (this.running != 0) { String psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print " + toBeKilled + "}'"; @@ -136,10 +119,6 @@ public class TDNode { String fuserCmd = "fuser -k -n tcp " + port; Runtime.getRuntime().exec(fuserCmd).waitFor(); } - - if (this.valgrind == 1) { - TimeUnit.SECONDS.sleep(2); - } } catch (Exception e) { e.printStackTrace(); } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNodes.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNodes.java index 2aa33a1840..948cda95cf 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNodes.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNodes.java @@ -5,8 +5,7 @@ import java.util.*; public class TDNodes { private ArrayList tdNodes; - private boolean testCluster; - private int valgrind; + private boolean testCluster; public TDNodes () { tdNodes = new ArrayList<>(); @@ -21,13 +20,7 @@ public class TDNodes { Process ps = Runtime.getRuntime().exec(psCmd); ps.waitFor(); String killCmd = "kill -9 " + ps.pid(); - Runtime.getRuntime().exec(killCmd).waitFor(); - - psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"; - ps = Runtime.getRuntime().exec(psCmd); - ps.waitFor(); - killCmd = "kill -9 " + ps.pid(); - Runtime.getRuntime().exec(killCmd).waitFor(); + Runtime.getRuntime().exec(killCmd).waitFor(); String binPath = System.getProperty("user.dir"); binPath += "/../../../debug"; @@ -54,10 +47,6 @@ public class TDNodes { this.testCluster = testCluster; } - public void setValgrid(int valgrind) { - this.valgrind = valgrind; - } - public void check(int index) { if(index < 1 || index > 10) { System.out.println("index: " + index + " should on a scale of [1, 10]"); @@ -70,8 +59,7 @@ public class TDNodes { File file = new File(System.getProperty("user.dir") + "/../../../"); String projectRealPath = file.getCanonicalPath(); check(index); - tdNodes.get(index - 1).setTestCluster(this.testCluster); - tdNodes.get(index - 1).setValgrind(valgrind); + tdNodes.get(index - 1).setTestCluster(this.testCluster); tdNodes.get(index - 1).setPath(projectRealPath); tdNodes.get(index - 1).deploy(); } catch (Exception e) { diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/BaseTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/BaseTest.java index 8156818c1b..5f105fb782 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/BaseTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/BaseTest.java @@ -9,20 +9,18 @@ import org.junit.BeforeClass; public class BaseTest { private static boolean testCluster = false; - private static String deployPath = System.getProperty("user.dir"); - private static int valgrind = 0; + private static String deployPath = System.getProperty("user.dir"); private static TDNodes tdNodes = new TDNodes(); @BeforeClass - public static void setUpEvn() { + public static void setupEnv() { try{ File file = new File(deployPath + "/../../../"); String rootPath = file.getCanonicalPath(); tdNodes.setPath(rootPath); - tdNodes.setTestCluster(testCluster); - tdNodes.setValgrid(valgrind); + tdNodes.setTestCluster(testCluster); tdNodes.deploy(1); tdNodes.start(1); diff --git a/src/connector/python/linux/python3/taos/cursor.py b/src/connector/python/linux/python3/taos/cursor.py index 06d6a19462..3f0f315d33 100644 --- a/src/connector/python/linux/python3/taos/cursor.py +++ b/src/connector/python/linux/python3/taos/cursor.py @@ -1,6 +1,7 @@ from .cinterface import CTaosInterface from .error import * from .constants import FieldType +import threading # querySeqNum = 0 @@ -37,6 +38,7 @@ class TDengineCursor(object): self._block_iter = 0 self._affected_rows = 0 self._logfile = "" + self._threadId = threading.get_ident() if connection is not None: self._connection = connection @@ -103,6 +105,12 @@ class TDengineCursor(object): def execute(self, operation, params=None): """Prepare and execute a database operation (query or command). """ + # if threading.get_ident() != self._threadId: + # info ="Cursor execute:Thread ID not match,creater:"+str(self._threadId)+" caller:"+str(threading.get_ident()) + # raise OperationalError(info) + # print(info) + # return None + if not operation: return None @@ -188,6 +196,11 @@ class TDengineCursor(object): def fetchall(self): """Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation. """ + # if threading.get_ident() != self._threadId: + # info ="[WARNING] Cursor fetchall:Thread ID not match,creater:"+str(self._threadId)+" caller:"+str(threading.get_ident()) + # raise OperationalError(info) + # print(info) + # return None if self._result is None or self._fields is None: raise OperationalError("Invalid use of fetchall") @@ -232,6 +245,12 @@ class TDengineCursor(object): def _handle_result(self): """Handle the return result from query. """ + # if threading.get_ident() != self._threadId: + # info = "Cursor handleresult:Thread ID not match,creater:"+str(self._threadId)+" caller:"+str(threading.get_ident()) + # raise OperationalError(info) + # print(info) + # return None + self._description = [] for ele in self._fields: self._description.append( diff --git a/src/cq/src/cqMain.c b/src/cq/src/cqMain.c index a633968616..04d3a6fd6d 100644 --- a/src/cq/src/cqMain.c +++ b/src/cq/src/cqMain.c @@ -109,6 +109,8 @@ void cqClose(void *handle) { while (pObj) { SCqObj *pTemp = pObj; pObj = pObj->next; + tdFreeSchema(pTemp->pSchema); + tfree(pTemp->sqlStr); free(pTemp); } @@ -242,6 +244,10 @@ static void cqCreateStream(SCqContext *pContext, SCqObj *pObj) { static void cqProcessStreamRes(void *param, TAOS_RES *tres, TAOS_ROW row) { SCqObj *pObj = (SCqObj *)param; + if (tres == NULL && row == NULL) { + pObj->pStream = NULL; + return; + } SCqContext *pContext = pObj->pContext; STSchema *pSchema = pObj->pSchema; if (pObj->pStream == NULL) return; @@ -263,8 +269,14 @@ static void cqProcessStreamRes(void *param, TAOS_RES *tres, TAOS_ROW row) { void* val = row[i]; if (val == NULL) { val = getNullValue(c->type); - } else if (IS_VAR_DATA_TYPE(c->type)) { + } else if (c->type == TSDB_DATA_TYPE_BINARY) { val = ((char*)val) - sizeof(VarDataLenT); + } else if (c->type == TSDB_DATA_TYPE_NCHAR) { + char buf[TSDB_MAX_NCHAR_LEN]; + size_t len = taos_fetch_lengths(tres)[i]; + taosMbsToUcs4(val, len, buf, sizeof(buf), &len); + memcpy(val + sizeof(VarDataLenT), buf, len); + varDataLen(val) = len; } tdAppendColVal(trow, val, c->type, c->bytes, c->offset); } diff --git a/src/dnode/src/dnodeMWrite.c b/src/dnode/src/dnodeMWrite.c index 90d857155a..b53c66e00c 100644 --- a/src/dnode/src/dnodeMWrite.c +++ b/src/dnode/src/dnodeMWrite.c @@ -131,8 +131,8 @@ static void dnodeFreeMnodeWriteMsg(SMnodeMsg *pWrite) { taosFreeQitem(pWrite); } -void dnodeSendRpcMnodeWriteRsp(void *pRaw, int32_t code) { - SMnodeMsg *pWrite = pRaw; +void dnodeSendRpcMnodeWriteRsp(void *pMsg, int32_t code) { + SMnodeMsg *pWrite = pMsg; if (pWrite == NULL) return; if (code == TSDB_CODE_MND_ACTION_IN_PROGRESS) return; if (code == TSDB_CODE_MND_ACTION_NEED_REPROCESSED) { diff --git a/src/dnode/src/dnodeMgmt.c b/src/dnode/src/dnodeMgmt.c index 9c85d4341b..4f489d2af2 100644 --- a/src/dnode/src/dnodeMgmt.c +++ b/src/dnode/src/dnodeMgmt.c @@ -402,6 +402,7 @@ static int32_t dnodeProcessCreateVnodeMsg(SRpcMsg *rpcMsg) { void *pVnode = vnodeAcquireVnode(pCreate->cfg.vgId); if (pVnode != NULL) { + dDebug("vgId:%d, already exist, processed as alter msg", pCreate->cfg.vgId); int32_t code = vnodeAlter(pVnode, pCreate); vnodeRelease(pVnode); return code; diff --git a/src/inc/query.h b/src/inc/query.h index 88badc2d7b..c648270b21 100644 --- a/src/inc/query.h +++ b/src/inc/query.h @@ -87,8 +87,8 @@ int32_t qKillQuery(qinfo_t qinfo); void* qOpenQueryMgmt(int32_t vgId); void qSetQueryMgmtClosed(void* pExecutor); void qCleanupQueryMgmt(void* pExecutor); -void** qRegisterQInfo(void* pMgmt, void* qInfo); -void** qAcquireQInfo(void* pMgmt, void** key); +void** qRegisterQInfo(void* pMgmt, uint64_t qInfo); +void** qAcquireQInfo(void* pMgmt, uint64_t key); void** qReleaseQInfo(void* pMgmt, void* pQInfo, bool needFree); #ifdef __cplusplus diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h index fc433463c5..d2bef9ea57 100644 --- a/src/inc/taoserror.h +++ b/src/inc/taoserror.h @@ -129,9 +129,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_CLUSTER_CFG_INCONSISTENT, 0, 0x0335, "mnode clus TAOS_DEFINE_ERROR(TSDB_CODE_MND_ACCT_ALREADY_EXIST, 0, 0x0340, "mnode accounts already exist") TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_ACCT, 0, 0x0341, "mnode invalid account") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_ACCT_PARA, 0, 0x0342, "mnode invalid account parameter") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_ACCT_OPTION, 0, 0x0343, "mnode invalid acct option") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_ACCTS, 0, 0x0344, "mnode too many accounts") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_ACCT_OPTION, 0, 0x0342, "mnode invalid acct option") TAOS_DEFINE_ERROR(TSDB_CODE_MND_USER_ALREADY_EXIST, 0, 0x0350, "mnode user already exist") TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_USER, 0, 0x0351, "mnode invalid user") @@ -145,7 +143,6 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TABLE_ID, 0, 0x0361, "mnode inva TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TABLE_NAME, 0, 0x0362, "mnode invalid table name") TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TABLE_TYPE, 0, 0x0363, "mnode invalid table type") TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_TAGS, 0, 0x0364, "mnode too many tags") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_TABLES, 0, 0x0365, "mnode too many tables") TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_TIMESERIES, 0, 0x0366, "mnode not enough time series") TAOS_DEFINE_ERROR(TSDB_CODE_MND_NOT_SUPER_TABLE, 0, 0x0367, "mnode no super table") // operation only available for super table TAOS_DEFINE_ERROR(TSDB_CODE_MND_COL_NAME_TOO_LONG, 0, 0x0368, "mnode column name too long") @@ -161,13 +158,13 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_DB_OPTION, 0, 0x0382, "mnode inva TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_DB, 0, 0x0383, "mnode invalid database") TAOS_DEFINE_ERROR(TSDB_CODE_MND_MONITOR_DB_FORBIDDEN, 0, 0x0384, "mnode monitor db forbidden") TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_DATABASES, 0, 0x0385, "mnode too many databases") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_DB_IN_DROPPING, 0, 0x0386, "mnode db in dropping") // dnode TAOS_DEFINE_ERROR(TSDB_CODE_DND_MSG_NOT_PROCESSED, 0, 0x0400, "dnode message not processed") TAOS_DEFINE_ERROR(TSDB_CODE_DND_OUT_OF_MEMORY, 0, 0x0401, "dnode out of memory") TAOS_DEFINE_ERROR(TSDB_CODE_DND_NO_WRITE_ACCESS, 0, 0x0402, "dnode no disk write access") TAOS_DEFINE_ERROR(TSDB_CODE_DND_INVALID_MSG_LEN, 0, 0x0403, "dnode invalid message length") -TAOS_DEFINE_ERROR(TSDB_CODE_DND_INVALID_FILE_FORMAT, 0, 0x0404, "dnode invalid file format") // vnode TAOS_DEFINE_ERROR(TSDB_CODE_VND_ACTION_IN_PROGRESS, 0, 0x0500, "vnode action in progress") diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h index 8c987b6ef1..ac89d1dabb 100644 --- a/src/inc/taosmsg.h +++ b/src/inc/taosmsg.h @@ -473,7 +473,7 @@ typedef struct { typedef struct { int32_t code; - uint64_t qhandle; + uint64_t qhandle; // query handle } SQueryTableRsp; typedef struct { @@ -486,7 +486,7 @@ typedef struct SRetrieveTableRsp { int32_t numOfRows; int8_t completed; // all results are returned to client int16_t precision; - int64_t offset; // updated offset value for multi-vnode projection query + int64_t offset; // updated offset value for multi-vnode projection query int64_t useconds; char data[]; } SRetrieveTableRsp; diff --git a/src/inc/tsdb.h b/src/inc/tsdb.h index a599f100f9..b8cc1768e8 100644 --- a/src/inc/tsdb.h +++ b/src/inc/tsdb.h @@ -235,9 +235,10 @@ bool tsdbNextDataBlock(TsdbQueryHandleT *pQueryHandle); * Get current data block information * * @param pQueryHandle + * @param pBlockInfo * @return */ -SDataBlockInfo tsdbRetrieveDataBlockInfo(TsdbQueryHandleT *pQueryHandle); +void tsdbRetrieveDataBlockInfo(TsdbQueryHandleT *pQueryHandle, SDataBlockInfo* pBlockInfo); /** * diff --git a/src/kit/shell/src/shellImport.c b/src/kit/shell/src/shellImport.c index ba123ac2d4..a440db7301 100644 --- a/src/kit/shell/src/shellImport.c +++ b/src/kit/shell/src/shellImport.c @@ -206,9 +206,10 @@ static void shellSourceFile(TAOS *con, char *fptr) { if (code != 0) { fprintf(stderr, "DB error: %s: %s (%d)\n", taos_errstr(con), fname, lineNo); - /* free local resouce: allocated memory/metric-meta refcnt */ - taos_free_result(pSql); } + + /* free local resouce: allocated memory/metric-meta refcnt */ + taos_free_result(pSql); memset(cmd, 0, MAX_COMMAND_SIZE); cmd_len = 0; diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 9a5aedcdb7..305302b71a 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -520,9 +520,8 @@ int main(int argc, char *argv[]) { snprintf(command, BUFFER_SIZE, "create table if not exists %s.meters (ts timestamp%s tags (areaid int, loc binary(10))", db_name, cols); queryDB(taos, command); printf("meters created!\n"); - - taos_close(taos); } + taos_close(taos); /* Wait for table to create */ multiThreadCreateTable(cols, use_metric, threads, ntables, db_name, tb_prefix, ip_addr, port, user, pass); @@ -792,9 +791,6 @@ void * createTable(void *sarg) snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s%d (ts timestamp%s;", winfo->db_name, winfo->tb_prefix, i, winfo->cols); queryDB(winfo->taos, command); } - - taos_close(winfo->taos); - } else { /* Create all the tables; */ printf("Creating table from %d to %d\n", winfo->start_table_id, winfo->end_table_id); @@ -812,7 +808,6 @@ void * createTable(void *sarg) } queryDB(winfo->taos, command); } - taos_close(winfo->taos); } return NULL; @@ -857,7 +852,6 @@ void multiThreadCreateTable(char* cols, bool use_metric, int threads, int ntable for (int i = 0; i < threads; i++) { info *t_info = infos + i; - taos_close(t_info->taos); sem_destroy(&(t_info->mutex_sem)); sem_destroy(&(t_info->lock_sem)); } @@ -875,6 +869,11 @@ void *readTable(void *sarg) { int64_t sTime = rinfo->start_time; char *tb_prefix = rinfo->tb_prefix; FILE *fp = fopen(rinfo->fp, "a"); + if (NULL == fp) { + printf("fopen %s fail, reason:%s.\n", rinfo->fp, strerror(errno)); + return NULL; + } + int num_of_DPT = rinfo->nrecords_per_table; int num_of_tables = rinfo->end_table_id - rinfo->start_table_id + 1; int totalData = num_of_DPT * num_of_tables; @@ -930,6 +929,11 @@ void *readMetric(void *sarg) { TAOS *taos = rinfo->taos; char command[BUFFER_SIZE] = "\0"; FILE *fp = fopen(rinfo->fp, "a"); + if (NULL == fp) { + printf("fopen %s fail, reason:%s.\n", rinfo->fp, strerror(errno)); + return NULL; + } + int num_of_DPT = rinfo->nrecords_per_table; int num_of_tables = rinfo->end_table_id - rinfo->start_table_id + 1; int totalData = num_of_DPT * num_of_tables; diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c index 0b1890c5ab..70f18b32bd 100644 --- a/src/kit/taosdump/taosdump.c +++ b/src/kit/taosdump/taosdump.c @@ -27,19 +27,18 @@ #include #include #include +#include #include "taos.h" -#include "taosmsg.h" -#include "tsclient.h" #include "taosdef.h" +#include "taosmsg.h" +#include "tglobal.h" +#include "tsclient.h" +#include "tsdb.h" #include "tutil.h" -#include "tglobal.h" - #define COMMAND_SIZE 65536 -#define DEFAULT_DUMP_FILE "taosdump.sql" - -#define MAX_DBS 100 +//#define DEFAULT_DUMP_FILE "taosdump.sql" int converStringToReadable(char *str, int size, char *buf, int bufsize); int convertNCharToReadable(char *str, int size, char *buf, int bufsize); @@ -90,21 +89,21 @@ enum _describe_table_index { }; typedef struct { - char field[TSDB_COL_NAME_LEN]; + char field[TSDB_COL_NAME_LEN + 1]; char type[16]; int length; char note[128]; } SColDes; typedef struct { - char name[TSDB_COL_NAME_LEN]; + char name[TSDB_COL_NAME_LEN + 1]; SColDes cols[]; } STableDef; extern char version[]; typedef struct { - char name[TSDB_DB_NAME_LEN]; + char name[TSDB_DB_NAME_LEN + 1]; int32_t replica; int32_t days; int32_t keep; @@ -119,8 +118,8 @@ typedef struct { } SDbInfo; typedef struct { - char name[TSDB_TABLE_NAME_LEN]; - char metric[TSDB_TABLE_NAME_LEN]; + char name[TSDB_TABLE_NAME_LEN + 1]; + char metric[TSDB_TABLE_NAME_LEN + 1]; } STableRecord; typedef struct { @@ -128,6 +127,16 @@ typedef struct { STableRecord tableRecord; } STableRecordInfo; +typedef struct { + pthread_t threadID; + int32_t threadIndex; + int32_t totalThreads; + char dbName[TSDB_TABLE_NAME_LEN + 1]; + void *taosCon; +} SThreadParaObj; + +static int64_t totalDumpOutRows = 0; + SDbInfo **dbInfos = NULL; const char *argp_program_version = version; @@ -142,7 +151,7 @@ static char doc[] = ""; /* to force a line-break, e.g.\n<-- here."; */ /* A description of the arguments we accept. */ -static char args_doc[] = "dbname [tbname ...]\n--databases dbname ...\n--all-databases\n-i input_file"; +static char args_doc[] = "dbname [tbname ...]\n--databases dbname ...\n--all-databases\n-i inpath\n-o outpath"; /* Keys for options without short-options. */ #define OPT_ABORT 1 /* –abort */ @@ -150,60 +159,68 @@ static char args_doc[] = "dbname [tbname ...]\n--databases dbname ...\n--all-dat /* The options we understand. */ static struct argp_option options[] = { // connection option - {"host", 'h', "HOST", 0, "Server host dumping data from. Default is localhost.", 0}, - {"user", 'u', "USER", 0, "User name used to connect to server. Default is root.", 0}, - {"password", 'p', "PASSWORD", 0, "User password to connect to server. Default is taosdata.", 0}, - {"port", 'P', "PORT", 0, "Port to connect", 0}, + {"host", 'h', "HOST", 0, "Server host dumping data from. Default is localhost.", 0}, + {"user", 'u', "USER", 0, "User name used to connect to server. Default is root.", 0}, + {"password", 'p', "PASSWORD", 0, "User password to connect to server. Default is taosdata.", 0}, + {"port", 'P', "PORT", 0, "Port to connect", 0}, + {"cversion", 'v', "CVERION", 0, "client version", 0}, + {"mysqlFlag", 'q', "MYSQLFLAG", 0, "mysqlFlag, Default is 0", 0}, // input/output file - {"output", 'o', "OUTPUT", 0, "Output file name.", 1}, - {"input", 'i', "INPUT", 0, "Input file name.", 1}, - {"config", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/taos/taos.cfg.", 1}, - {"encode", 'e', "ENCODE", 0, "Input file encoding.", 1}, + {"outpath", 'o', "OUTPATH", 0, "Output file path.", 1}, + {"inpath", 'i', "INPATH", 0, "Input file path.", 1}, + {"config", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/taos/taos.cfg.", 1}, + {"encode", 'e', "ENCODE", 0, "Input file encoding.", 1}, // dump unit options - {"all-databases", 'A', 0, 0, "Dump all databases.", 2}, - {"databases", 'B', 0, 0, "Dump assigned databases", 2}, + {"all-databases", 'A', 0, 0, "Dump all databases.", 2}, + {"databases", 'B', 0, 0, "Dump assigned databases", 2}, // dump format options - {"schemaonly", 's', 0, 0, "Only dump schema.", 3}, - {"with-property", 'M', 0, 0, "Dump schema with properties.", 3}, - {"start-time", 'S', "START_TIME", 0, "Start time to dump.", 3}, - {"end-time", 'E', "END_TIME", 0, "End time to dump.", 3}, - {"data-batch", 'N', "DATA_BATCH", 0, "Number of data point per insert statement. Default is 1.", 3}, - {"allow-sys", 'a', 0, 0, "Allow to dump sys database", 3}, + {"schemaonly", 's', 0, 0, "Only dump schema.", 3}, + {"with-property", 'M', 0, 0, "Dump schema with properties.", 3}, + {"start-time", 'S', "START_TIME", 0, "Start time to dump.", 3}, + {"end-time", 'E', "END_TIME", 0, "End time to dump.", 3}, + {"data-batch", 'N', "DATA_BATCH", 0, "Number of data point per insert statement. Default is 1.", 3}, + {"table-batch", 'T', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3}, + {"thread_num", 't', "THREAD_NUM", 0, "Number of thread for dump in file. Default is 5.", 3}, + {"allow-sys", 'a', 0, 0, "Allow to dump sys database", 3}, {0}}; /* Used by main to communicate with parse_opt. */ -typedef struct SDumpArguments { +struct arguments { // connection option - char *host; - char *user; - char *password; - uint16_t port; + char *host; + char *user; + char *password; + uint16_t port; + char cversion[TSDB_FILENAME_LEN+1]; + uint16_t mysqlFlag; // output file - char output[TSDB_FILENAME_LEN]; - char input[TSDB_FILENAME_LEN]; - char *encode; + char outpath[TSDB_FILENAME_LEN+1]; + char inpath[TSDB_FILENAME_LEN+1]; + char *encode; // dump unit option - bool all_databases; - bool databases; + bool all_databases; + bool databases; // dump format option - bool schemaonly; - bool with_property; - int64_t start_time; - int64_t end_time; - int data_batch; - bool allow_sys; + bool schemaonly; + bool with_property; + int64_t start_time; + int64_t end_time; + int32_t data_batch; + int32_t table_batch; // num of table which will be dump into one output file. + bool allow_sys; // other options - int abort; - char **arg_list; - int arg_list_len; - bool isDumpIn; -} SDumpArguments; + int32_t thread_num; + int abort; + char **arg_list; + int arg_list_len; + bool isDumpIn; +}; /* Parse a single option. */ static error_t parse_opt(int key, char *arg, struct argp_state *state) { /* Get the input argument from argp_parse, which we know is a pointer to our arguments structure. */ - SDumpArguments *arguments = state->input; + struct arguments *arguments = state->input; wordexp_t full_path; switch (key) { @@ -223,13 +240,24 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { case 'P': arguments->port = atoi(arg); break; - // output file + case 'q': + arguments->mysqlFlag = atoi(arg); + break; + case 'v': + if (wordexp(arg, &full_path, 0) != 0) { + fprintf(stderr, "Invalid client vesion %s\n", arg); + return -1; + } + strcpy(arguments->cversion, full_path.we_wordv[0]); + wordfree(&full_path); + break; + // output file path case 'o': if (wordexp(arg, &full_path, 0) != 0) { fprintf(stderr, "Invalid path %s\n", arg); return -1; } - tstrncpy(arguments->output, full_path.we_wordv[0], TSDB_FILENAME_LEN); + strcpy(arguments->outpath, full_path.we_wordv[0]); wordfree(&full_path); break; case 'i': @@ -238,7 +266,7 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { fprintf(stderr, "Invalid path %s\n", arg); return -1; } - tstrncpy(arguments->input, full_path.we_wordv[0], TSDB_FILENAME_LEN); + strcpy(arguments->inpath, full_path.we_wordv[0]); wordfree(&full_path); break; case 'c': @@ -246,7 +274,7 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { fprintf(stderr, "Invalid path %s\n", arg); return -1; } - tstrncpy(configDir, full_path.we_wordv[0], TSDB_FILENAME_LEN); + strcpy(configDir, full_path.we_wordv[0]); wordfree(&full_path); break; case 'e': @@ -276,13 +304,19 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { case 'N': arguments->data_batch = atoi(arg); break; + case 'T': + arguments->table_batch = atoi(arg); + break; + case 't': + arguments->thread_num = atoi(arg); + break; case OPT_ABORT: arguments->abort = 1; break; case ARGP_KEY_ARG: - arguments->arg_list = &state->argv[state->next - 1]; + arguments->arg_list = &state->argv[state->next - 1]; arguments->arg_list_len = state->argc - state->next + 1; - state->next = state->argc; + state->next = state->argc; break; default: @@ -294,52 +328,70 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { /* Our argp parser. */ static struct argp argp = {options, parse_opt, args_doc, doc}; -TAOS *taos = NULL; -char *command = NULL; -char *lcommand = NULL; -char *buffer = NULL; - -int taosDumpOut(SDumpArguments *arguments); - -int taosDumpIn(SDumpArguments *arguments); - +int taosDumpOut(struct arguments *arguments); +int taosDumpIn(struct arguments *arguments); void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, FILE *fp); - -int taosDumpDb(SDbInfo *dbInfo, SDumpArguments *arguments, FILE *fp); - -void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, SDumpArguments *arguments, FILE *fp); - -void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, SDumpArguments *arguments, - FILE *fp); - -int32_t taosDumpTable(char *table, char *metric, SDumpArguments *arguments, FILE *fp); - -int32_t taosDumpMetric(char *metric, SDumpArguments *arguments, FILE *fp); - -int taosDumpTableData(FILE *fp, char *tbname, SDumpArguments *arguments); - -int taosCheckParam(SDumpArguments *arguments); - +int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *taosCon); +int32_t taosDumpStable(char *table, FILE *fp, TAOS* taosCon); +void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp); +void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, FILE *fp); +int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FILE *fp, TAOS* taosCon); +int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* taosCon); +int taosCheckParam(struct arguments *arguments); void taosFreeDbInfos(); +static void taosStartDumpOutWorkThreads(struct arguments* args, int32_t numOfThread, char *dbName); + +struct arguments tsArguments = { + // connection option + NULL, + "root", + "taosdata", + 0, + "", + 0, + // outpath and inpath + "", + "", + NULL, + // dump unit option + false, + false, + // dump format option + false, + false, + 0, + INT64_MAX, + 1, + 1, + false, + // other options + 5, + 0, + NULL, + 0, + false +}; + +int queryDB(TAOS *taos, char *command) { + TAOS_RES *pSql = NULL; + int32_t code = -1; + + pSql = taos_query(taos, command); + code = taos_errno(pSql); + if (code) { + fprintf(stderr, "sql error: %s, reason:%s\n", command, taos_errstr(pSql)); + } + taos_free_result(pSql); + return code; +} int main(int argc, char *argv[]) { - SDumpArguments arguments = { - // connection option - NULL, TSDB_DEFAULT_USER, TSDB_DEFAULT_PASS, 0, - // output file - DEFAULT_DUMP_FILE, DEFAULT_DUMP_FILE, NULL, - // dump unit option - false, false, - // dump format option - false, false, 0, INT64_MAX, 1, false, - // other options - 0, NULL, 0, false}; /* Parse our arguments; every option seen by parse_opt will be reflected in arguments. */ - argp_parse(&argp, argc, argv, 0, 0, &arguments); + argp_parse(&argp, argc, argv, 0, 0, &tsArguments); - if (arguments.abort) { + if (tsArguments.abort) { #ifndef _ALPINE error(10, 0, "ABORTED"); #else @@ -347,14 +399,48 @@ int main(int argc, char *argv[]) { #endif } - if (taosCheckParam(&arguments) < 0) { + printf("====== arguments config ======\n"); + { + printf("host: %s\n", tsArguments.host); + printf("user: %s\n", tsArguments.user); + printf("password: %s\n", tsArguments.password); + printf("port: %u\n", tsArguments.port); + printf("cversion: %s\n", tsArguments.cversion); + printf("mysqlFlag: %d", tsArguments.mysqlFlag); + printf("outpath: %s\n", tsArguments.outpath); + printf("inpath: %s\n", tsArguments.inpath); + printf("encode: %s\n", tsArguments.encode); + printf("all_databases: %d\n", tsArguments.all_databases); + printf("databases: %d\n", tsArguments.databases); + printf("schemaonly: %d\n", tsArguments.schemaonly); + printf("with_property: %d\n", tsArguments.with_property); + printf("start_time: %" PRId64 "\n", tsArguments.start_time); + printf("end_time: %" PRId64 "\n", tsArguments.end_time); + printf("data_batch: %d\n", tsArguments.data_batch); + printf("table_batch: %d\n", tsArguments.table_batch); + printf("allow_sys: %d\n", tsArguments.allow_sys); + printf("abort: %d\n", tsArguments.abort); + printf("isDumpIn: %d\n", tsArguments.isDumpIn); + printf("arg_list_len: %d\n", tsArguments.arg_list_len); + + for (int32_t i = 0; i < tsArguments.arg_list_len; i++) { + printf("arg_list[%d]: %s\n", i, tsArguments.arg_list[i]); + } + } + printf("==============================\n"); + + if (tsArguments.cversion[0] != 0){ + strcpy(version, tsArguments.cversion); + } + + if (taosCheckParam(&tsArguments) < 0) { exit(EXIT_FAILURE); } - if (arguments.isDumpIn) { - if (taosDumpIn(&arguments) < 0) return -1; + if (tsArguments.isDumpIn) { + if (taosDumpIn(&tsArguments) < 0) return -1; } else { - if (taosDumpOut(&arguments) < 0) return -1; + if (taosDumpOut(&tsArguments) < 0) return -1; } return 0; @@ -362,96 +448,214 @@ int main(int argc, char *argv[]) { void taosFreeDbInfos() { if (dbInfos == NULL) return; - for (int i = 0; i < MAX_DBS; i++) tfree(dbInfos[i]); + for (int i = 0; i < 128; i++) tfree(dbInfos[i]); tfree(dbInfos); } -int taosGetTableRecordInfo(char *table, STableRecordInfo *pTableRecordInfo) { +// check table is normal table or super table +int taosGetTableRecordInfo(char *table, STableRecordInfo *pTableRecordInfo, TAOS *taosCon) { TAOS_ROW row = NULL; bool isSet = false; + TAOS_RES *result = NULL; memset(pTableRecordInfo, 0, sizeof(STableRecordInfo)); - sprintf(command, "show tables like %s", table); - TAOS_RES *result = taos_query(taos, command);\ - int32_t code = taos_errno(result); + char* tempCommand = (char *)malloc(COMMAND_SIZE); + if (tempCommand == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + return -1; + } + sprintf(tempCommand, "show tables like %s", table); + + result = taos_query(taosCon, tempCommand); + int32_t code = taos_errno(result); + if (code != 0) { - fprintf(stderr, "failed to run command %s, error: %s\n", command, taos_errstr(result)); + fprintf(stderr, "failed to run command %s\n", tempCommand); + free(tempCommand); taos_free_result(result); return -1; } TAOS_FIELD *fields = taos_fetch_fields(result); - if ((row = taos_fetch_row(result)) != NULL) { + while ((row = taos_fetch_row(result)) != NULL) { isSet = true; pTableRecordInfo->isMetric = false; strncpy(pTableRecordInfo->tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes); strncpy(pTableRecordInfo->tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes); + break; } taos_free_result(result); result = NULL; - if (isSet) return 0; - - sprintf(command, "show stables like %s", table); - - result = taos_query(taos, command); + if (isSet) { + free(tempCommand); + return 0; + } + + sprintf(tempCommand, "show stables like %s", table); + + result = taos_query(taosCon, tempCommand); code = taos_errno(result); + if (code != 0) { - fprintf(stderr, "failed to run command %s, error: %s\n", command, taos_errstr(result)); + fprintf(stderr, "failed to run command %s\n", tempCommand); + free(tempCommand); taos_free_result(result); return -1; } - if ((row = taos_fetch_row(result)) != NULL) { + while ((row = taos_fetch_row(result)) != NULL) { isSet = true; pTableRecordInfo->isMetric = true; - tstrncpy(pTableRecordInfo->tableRecord.metric, table, TSDB_TABLE_NAME_LEN); + strcpy(pTableRecordInfo->tableRecord.metric, table); + break; } taos_free_result(result); result = NULL; - if (isSet) return 0; - + if (isSet) { + free(tempCommand); + return 0; + } fprintf(stderr, "invalid table/metric %s\n", table); + free(tempCommand); return -1; } -int taosDumpOut(SDumpArguments *arguments) { - TAOS_ROW row; - TAOS_RES* result = NULL; - char *temp = NULL; - FILE *fp = NULL; - int count = 0; - STableRecordInfo tableRecordInfo; - fp = fopen(arguments->output, "w"); - if (fp == NULL) { - fprintf(stderr, "failed to open file %s\n", arguments->output); +int32_t taosSaveAllNormalTableToTempFile(TAOS *taosCon, char*meter, char* metric, int* fd) { + STableRecord tableRecord; + + if (-1 == *fd) { + *fd = open(".tables.tmp.0", O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); + if (*fd == -1) { + fprintf(stderr, "failed to open temp file: .tables.tmp.0\n"); + return -1; + } + } + + memset(tableRecord.name, 0, sizeof(STableRecord)); + strcpy(tableRecord.name, meter); + strcpy(tableRecord.metric, metric); + + twrite(*fd, &tableRecord, sizeof(STableRecord)); + return 0; +} + + +int32_t taosSaveTableOfMetricToTempFile(TAOS *taosCon, char* metric, struct arguments *arguments, int32_t* totalNumOfThread) { + TAOS_ROW row; + int fd = -1; + STableRecord tableRecord; + + char* tmpCommand = (char *)malloc(COMMAND_SIZE); + if (tmpCommand == NULL) { + fprintf(stderr, "failed to allocate memory\n"); return -1; } - dbInfos = (SDbInfo **)calloc(MAX_DBS, sizeof(SDbInfo *)); + sprintf(tmpCommand, "select tbname from %s", metric); + + TAOS_RES *result = taos_query(taosCon, tmpCommand); + int32_t code = taos_errno(result); + if (code != 0) { + fprintf(stderr, "failed to run command %s\n", tmpCommand); + free(tmpCommand); + taos_free_result(result); + return -1; + } + + TAOS_FIELD *fields = taos_fetch_fields(result); + + int32_t numOfTable = 0; + int32_t numOfThread = *totalNumOfThread; + char tmpFileName[TSDB_FILENAME_LEN + 1]; + while ((row = taos_fetch_row(result)) != NULL) { + if (0 == numOfTable) { + memset(tmpFileName, 0, TSDB_FILENAME_LEN); + sprintf(tmpFileName, ".tables.tmp.%d", numOfThread); + fd = open(tmpFileName, O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); + if (fd == -1) { + fprintf(stderr, "failed to open temp file: %s\n", tmpFileName); + taos_free_result(result); + for (int32_t loopCnt = 0; loopCnt < numOfThread; loopCnt++) { + sprintf(tmpFileName, ".tables.tmp.%d", loopCnt); + remove(tmpFileName); + } + free(tmpCommand); + return -1; + } + + numOfThread++; + } + + memset(tableRecord.name, 0, sizeof(STableRecord)); + strncpy(tableRecord.name, (char *)row[0], fields[0].bytes); + strcpy(tableRecord.metric, metric); + + twrite(fd, &tableRecord, sizeof(STableRecord)); + + numOfTable++; + + if (numOfTable >= arguments->table_batch) { + numOfTable = 0; + tclose(fd); + fd = -1; + } + } + tclose(fd); + fd = -1; + taos_free_result(result); + + *totalNumOfThread = numOfThread; + + free(tmpCommand); + + return 0; +} + +int taosDumpOut(struct arguments *arguments) { + TAOS *taos = NULL; + TAOS_RES *result = NULL; + char *command = NULL; + + TAOS_ROW row; + FILE *fp = NULL; + int32_t count = 0; + STableRecordInfo tableRecordInfo; + + char tmpBuf[TSDB_FILENAME_LEN+9] = {0}; + if (arguments->outpath[0] != 0) { + sprintf(tmpBuf, "%s/dbs.sql", arguments->outpath); + } else { + sprintf(tmpBuf, "dbs.sql"); + } + + fp = fopen(tmpBuf, "w"); + if (fp == NULL) { + fprintf(stderr, "failed to open file %s\n", tmpBuf); + return -1; + } + + dbInfos = (SDbInfo **)calloc(128, sizeof(SDbInfo *)); if (dbInfos == NULL) { fprintf(stderr, "failed to allocate memory\n"); goto _exit_failure; } - temp = (char *)malloc(2 * COMMAND_SIZE); - if (temp == NULL) { + command = (char *)malloc(COMMAND_SIZE); + if (command == NULL) { fprintf(stderr, "failed to allocate memory\n"); goto _exit_failure; } - command = temp; - buffer = command + COMMAND_SIZE; - /* Connect to server */ taos = taos_connect(arguments->host, arguments->user, arguments->password, NULL, arguments->port); if (taos == NULL) { @@ -465,29 +669,30 @@ int taosDumpOut(SDumpArguments *arguments) { taosDumpCharset(fp); sprintf(command, "show databases"); - result = taos_query(taos, command); + result = taos_query(taos, command); int32_t code = taos_errno(result); + if (code != 0) { - fprintf(stderr, "failed to run command: %s, reason: %s\n", command, taos_errstr(result)); - taos_free_result(result); + fprintf(stderr, "failed to run command: %s, reason: %s\n", command, taos_errstr(taos)); goto _exit_failure; } TAOS_FIELD *fields = taos_fetch_fields(result); while ((row = taos_fetch_row(result)) != NULL) { + // sys database name : 'monitor', but subsequent version changed to 'log' if (strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "monitor", fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0 && (!arguments->allow_sys)) continue; - if (arguments->databases) { + if (arguments->databases) { // input multi dbs for (int i = 0; arguments->arg_list[i]; i++) { if (strncasecmp(arguments->arg_list[i], (char *)row[TSDB_SHOW_DB_NAME_INDEX], fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) goto _dump_db_point; } continue; - } else if (!arguments->all_databases) { + } else if (!arguments->all_databases) { // only input one db if (strncasecmp(arguments->arg_list[0], (char *)row[TSDB_SHOW_DB_NAME_INDEX], fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) goto _dump_db_point; @@ -504,19 +709,19 @@ int taosDumpOut(SDumpArguments *arguments) { } strncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], fields[TSDB_SHOW_DB_NAME_INDEX].bytes); - if (strcmp(arguments->user, TSDB_DEFAULT_USER) == 0) { - dbInfos[count]->replica = (int)(*((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX])); - dbInfos[count]->days = (int)(*((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX])); - dbInfos[count]->keep = *((int *)row[TSDB_SHOW_DB_KEEP_INDEX]); - dbInfos[count]->tables = *((int *)row[TSDB_SHOW_DB_TABLES_INDEX]); - dbInfos[count]->rows = *((int *)row[TSDB_SHOW_DB_ROWS_INDEX]); - dbInfos[count]->cache = *((int *)row[TSDB_SHOW_DB_CACHE_INDEX]); - dbInfos[count]->ablocks = *((int *)row[TSDB_SHOW_DB_ABLOCKS_INDEX]); - dbInfos[count]->tblocks = (int)(*((int16_t *)row[TSDB_SHOW_DB_TBLOCKS_INDEX])); - dbInfos[count]->ctime = *((int *)row[TSDB_SHOW_DB_CTIME_INDEX]); - dbInfos[count]->clog = (int)(*((int8_t *)row[TSDB_SHOW_DB_CLOG_INDEX])); - dbInfos[count]->comp = (int)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX])); - } + #if 0 + dbInfos[count]->replica = (int)(*((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX])); + dbInfos[count]->days = (int)(*((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX])); + dbInfos[count]->keep = *((int *)row[TSDB_SHOW_DB_KEEP_INDEX]); + dbInfos[count]->tables = *((int *)row[TSDB_SHOW_DB_TABLES_INDEX]); + dbInfos[count]->rows = *((int *)row[TSDB_SHOW_DB_ROWS_INDEX]); + dbInfos[count]->cache = *((int *)row[TSDB_SHOW_DB_CACHE_INDEX]); + dbInfos[count]->ablocks = *((int *)row[TSDB_SHOW_DB_ABLOCKS_INDEX]); + dbInfos[count]->tblocks = (int)(*((int16_t *)row[TSDB_SHOW_DB_TBLOCKS_INDEX])); + dbInfos[count]->ctime = *((int *)row[TSDB_SHOW_DB_CTIME_INDEX]); + dbInfos[count]->clog = (int)(*((int8_t *)row[TSDB_SHOW_DB_CLOG_INDEX])); + dbInfos[count]->comp = (int)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX])); +#endif count++; @@ -528,42 +733,71 @@ int taosDumpOut(SDumpArguments *arguments) { } } - // taos_free_result(result); - if (count == 0) { fprintf(stderr, "No databases valid to dump\n"); goto _exit_failure; } - if (arguments->databases || arguments->all_databases) { + if (arguments->databases || arguments->all_databases) { // case: taosdump --databases dbx dby ... OR taosdump --all-databases for (int i = 0; i < count; i++) { - (void)taosDumpDb(dbInfos[i], arguments, fp); + taosDumpDb(dbInfos[i], arguments, fp, taos); } } else { - if (arguments->arg_list_len == 1) { - (void)taosDumpDb(dbInfos[0], arguments, fp); - } else { + if (arguments->arg_list_len == 1) { // case: taosdump + taosDumpDb(dbInfos[0], arguments, fp, taos); + } else { // case: taosdump tablex tabley ... taosDumpCreateDbClause(dbInfos[0], arguments->with_property, fp); sprintf(command, "use %s", dbInfos[0]->name); - if (taos_query(taos, command) == NULL ) { + + result = taos_query(taos, command); + int32_t code = taos_errno(result); + if (code != 0) { fprintf(stderr, "invalid database %s\n", dbInfos[0]->name); goto _exit_failure; } fprintf(fp, "USE %s;\n\n", dbInfos[0]->name); + int32_t totalNumOfThread = 1; // 0: all normal talbe into .tables.tmp.0 + int normalTblFd = -1; + int32_t retCode; for (int i = 1; arguments->arg_list[i]; i++) { - if (taosGetTableRecordInfo(arguments->arg_list[i], &tableRecordInfo) < 0) { - fprintf(stderr, "invalide table %s\n", arguments->arg_list[i]); + if (taosGetTableRecordInfo(arguments->arg_list[i], &tableRecordInfo, taos) < 0) { + fprintf(stderr, "input the invalide table %s\n", arguments->arg_list[i]); continue; } - if (tableRecordInfo.isMetric) { // dump whole metric - (void)taosDumpMetric(tableRecordInfo.tableRecord.metric, arguments, fp); - } else { // dump MTable and NTable - (void)taosDumpTable(tableRecordInfo.tableRecord.name, tableRecordInfo.tableRecord.metric, arguments, fp); + if (tableRecordInfo.isMetric) { // dump all table of this metric + (void)taosDumpStable(tableRecordInfo.tableRecord.metric, fp, taos); + retCode = taosSaveTableOfMetricToTempFile(taos, tableRecordInfo.tableRecord.metric, arguments, &totalNumOfThread); + } else { + if (tableRecordInfo.tableRecord.metric[0] != '\0') { // dump this sub table and it's metric + (void)taosDumpStable(tableRecordInfo.tableRecord.metric, fp, taos); + } + retCode = taosSaveAllNormalTableToTempFile(taos, tableRecordInfo.tableRecord.name, tableRecordInfo.tableRecord.metric, &normalTblFd); } + + if (retCode < 0) { + if (-1 != normalTblFd){ + tclose(normalTblFd); + } + goto _clean_tmp_file; + } + } + + if (-1 != normalTblFd){ + tclose(normalTblFd); + } + + // start multi threads to dumpout + taosStartDumpOutWorkThreads(arguments, totalNumOfThread, dbInfos[0]->name); + + char tmpFileName[TSDB_FILENAME_LEN + 1]; + _clean_tmp_file: + for (int loopCnt = 0; loopCnt < totalNumOfThread; loopCnt++) { + sprintf(tmpFileName, ".tables.tmp.%d", loopCnt); + remove(tmpFileName); } } } @@ -572,21 +806,120 @@ int taosDumpOut(SDumpArguments *arguments) { fclose(fp); taos_close(taos); taos_free_result(result); - tfree(temp); - taosFreeDbInfos(); + tfree(command); + taosFreeDbInfos(); + fprintf(stderr, "dump out rows: %" PRId64 "\n", totalDumpOutRows); return 0; _exit_failure: fclose(fp); taos_close(taos); taos_free_result(result); - tfree(temp); + tfree(command); taosFreeDbInfos(); + fprintf(stderr, "dump out rows: %" PRId64 "\n", totalDumpOutRows); return -1; } +int taosGetTableDes(char *table, STableDef *tableDes, TAOS* taosCon) { + TAOS_ROW row = NULL; + TAOS_RES *tmpResult = NULL; + int count = 0; + + char* tempCommand = (char *)malloc(COMMAND_SIZE); + if (tempCommand == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + return -1; + } + + sprintf(tempCommand, "describe %s", table); + + tmpResult = taos_query(taosCon, tempCommand); + int32_t code = taos_errno(tmpResult); + if (code != 0) { + fprintf(stderr, "failed to run command %s\n", tempCommand); + free(tempCommand); + taos_free_result(tmpResult); + return -1; + } + + TAOS_FIELD *fields = taos_fetch_fields(tmpResult); + + strcpy(tableDes->name, table); + + while ((row = taos_fetch_row(tmpResult)) != NULL) { + strncpy(tableDes->cols[count].field, (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], + fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes); + strncpy(tableDes->cols[count].type, (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], + fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes); + tableDes->cols[count].length = *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); + strncpy(tableDes->cols[count].note, (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], + fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes); + + count++; + } + + taos_free_result(tmpResult); + tmpResult = NULL; + + free(tempCommand); + + return count; +} + +int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FILE *fp, TAOS* taosCon) { + int count = 0; + + STableDef *tableDes = (STableDef *)calloc(1, sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS); + + if (metric != NULL && metric[0] != '\0') { // dump table schema which is created by using super table + /* + count = taosGetTableDes(metric, tableDes, taosCon); + + if (count < 0) { + free(tableDes); + return -1; + } + + taosDumpCreateTableClause(tableDes, count, fp); + + memset(tableDes, 0, sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS); + */ + + count = taosGetTableDes(table, tableDes, taosCon); + + if (count < 0) { + free(tableDes); + return -1; + } + + taosDumpCreateMTableClause(tableDes, metric, count, fp); + + } else { // dump table definition + count = taosGetTableDes(table, tableDes, taosCon); + + if (count < 0) { + free(tableDes); + return -1; + } + + taosDumpCreateTableClause(tableDes, count, fp); + } + + free(tableDes); + + return taosDumpTableData(fp, table, arguments, taosCon); +} + void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, FILE *fp) { - char *pstr = buffer; + + char* tmpCommand = (char *)malloc(COMMAND_SIZE); + if (tmpCommand == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + return; + } + + char *pstr = tmpCommand; pstr += sprintf(pstr, "CREATE DATABASE IF NOT EXISTS %s", dbInfo->name); if (isDumpProperty) { @@ -596,78 +929,304 @@ void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, FILE *fp) { dbInfo->ablocks, dbInfo->tblocks, dbInfo->ctime, dbInfo->clog, dbInfo->comp); } - fprintf(fp, "%s\n\n", buffer); + pstr += sprintf(pstr, ";"); + + fprintf(fp, "%s\n\n", tmpCommand); + free(tmpCommand); } -int taosDumpDb(SDbInfo *dbInfo, SDumpArguments *arguments, FILE *fp) { +void* taosDumpOutWorkThreadFp(void *arg) +{ + SThreadParaObj *pThread = (SThreadParaObj*)arg; + STableRecord tableRecord; + int fd; + + char tmpFileName[TSDB_FILENAME_LEN*4] = {0}; + sprintf(tmpFileName, ".tables.tmp.%d", pThread->threadIndex); + fd = open(tmpFileName, O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); + if (fd == -1) { + fprintf(stderr, "taosDumpTableFp() failed to open temp file: %s\n", tmpFileName); + return NULL; + } + + FILE *fp = NULL; + memset(tmpFileName, 0, TSDB_FILENAME_LEN + 128); + + if (tsArguments.outpath[0] != 0) { + sprintf(tmpFileName, "%s/%s.tables.%d.sql", tsArguments.outpath, pThread->dbName, pThread->threadIndex); + } else { + sprintf(tmpFileName, "%s.tables.%d.sql", pThread->dbName, pThread->threadIndex); + } + + fp = fopen(tmpFileName, "w"); + if (fp == NULL) { + fprintf(stderr, "failed to open file %s\n", tmpFileName); + return NULL; + } + + memset(tmpFileName, 0, TSDB_FILENAME_LEN); + sprintf(tmpFileName, "use %s", pThread->dbName); + + TAOS_RES* tmpResult = taos_query(pThread->taosCon, tmpFileName); + int32_t code = taos_errno(tmpResult); + if (code != 0) { + fprintf(stderr, "invalid database %s\n", pThread->dbName); + taos_free_result(tmpResult); + return NULL; + } + + fprintf(fp, "USE %s\n\n", pThread->dbName); + while (read(fd, &tableRecord, sizeof(STableRecord)) > 0) { + taosDumpTable(tableRecord.name, tableRecord.metric, &tsArguments, fp, pThread->taosCon); + } + + taos_free_result(tmpResult); + tclose(fd); + fclose(fp); + + return NULL; +} + +static void taosStartDumpOutWorkThreads(struct arguments* args, int32_t numOfThread, char *dbName) +{ + pthread_attr_t thattr; + SThreadParaObj *threadObj = (SThreadParaObj *)calloc(numOfThread, sizeof(SThreadParaObj)); + for (int t = 0; t < numOfThread; ++t) { + SThreadParaObj *pThread = threadObj + t; + pThread->threadIndex = t; + pThread->totalThreads = numOfThread; + strcpy(pThread->dbName, dbName); + pThread->taosCon = taos_connect(args->host, args->user, args->password, NULL, args->port); + + if (pThread->taosCon == NULL) { + fprintf(stderr, "ERROR: thread:%d failed connect to TDengine, error:%s\n", pThread->threadIndex, taos_errstr(pThread->taosCon)); + exit(0); + } + + pthread_attr_init(&thattr); + pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); + + if (pthread_create(&(pThread->threadID), &thattr, taosDumpOutWorkThreadFp, (void*)pThread) != 0) { + fprintf(stderr, "ERROR: thread:%d failed to start\n", pThread->threadIndex); + exit(0); + } + } + + for (int32_t t = 0; t < numOfThread; ++t) { + pthread_join(threadObj[t].threadID, NULL); + } + + for (int32_t t = 0; t < numOfThread; ++t) { + taos_close(threadObj[t].taosCon); + } + free(threadObj); +} + + + +int32_t taosDumpStable(char *table, FILE *fp, TAOS* taosCon) { + int count = 0; + + STableDef *tableDes = (STableDef *)calloc(1, sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS); + if (NULL == tableDes) { + fprintf(stderr, "failed to allocate memory\n"); + exit(-1); + } + + count = taosGetTableDes(table, tableDes, taosCon); + + if (count < 0) { + free(tableDes); + fprintf(stderr, "failed to get stable schema\n"); + exit(-1); + } + + taosDumpCreateTableClause(tableDes, count, fp); + + free(tableDes); + return 0; +} + + +int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp) +{ + TAOS_ROW row; + int fd = -1; + STableRecord tableRecord; + + char* tmpCommand = (char *)malloc(COMMAND_SIZE); + if (tmpCommand == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + exit(-1); + } + + sprintf(tmpCommand, "use %s", dbName); + + TAOS_RES* tmpResult = taos_query(taosCon, tmpCommand); + int32_t code = taos_errno(tmpResult); + if (code != 0) { + fprintf(stderr, "invalid database %s, error: %s\n", dbName, taos_errstr(taosCon)); + free(tmpCommand); + taos_free_result(tmpResult); + exit(-1); + } + + taos_free_result(tmpResult); + + sprintf(tmpCommand, "show stables"); + + tmpResult = taos_query(taosCon, tmpCommand); + code = taos_errno(tmpResult); + if (code != 0) { + fprintf(stderr, "failed to run command %s, error: %s\n", tmpCommand, taos_errstr(taosCon)); + free(tmpCommand); + taos_free_result(tmpResult); + exit(-1); + } + taos_free_result(tmpResult); + + TAOS_FIELD *fields = taos_fetch_fields(tmpResult); + + char tmpFileName[TSDB_FILENAME_LEN + 1]; + memset(tmpFileName, 0, TSDB_FILENAME_LEN); + sprintf(tmpFileName, ".stables.tmp"); + fd = open(tmpFileName, O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); + if (fd == -1) { + fprintf(stderr, "failed to open temp file: %s\n", tmpFileName); + taos_free_result(tmpResult); + free(tmpCommand); + remove(".stables.tmp"); + exit(-1); + } + + while ((row = taos_fetch_row(tmpResult)) != NULL) { + memset(&tableRecord, 0, sizeof(STableRecord)); + strncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes); + twrite(fd, &tableRecord, sizeof(STableRecord)); + } + + taos_free_result(tmpResult); + lseek(fd, 0, SEEK_SET); + + while (read(fd, &tableRecord, sizeof(STableRecord)) > 0) { + (void)taosDumpStable(tableRecord.name, fp, taosCon); + } + + tclose(fd); + remove(".stables.tmp"); + + free(tmpCommand); + return 0; +} + + +int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *taosCon) { TAOS_ROW row; int fd = -1; STableRecord tableRecord; taosDumpCreateDbClause(dbInfo, arguments->with_property, fp); - sprintf(command, "use %s", dbInfo->name); - if (taos_errno(taos_query(taos, command)) != 0) { - fprintf(stderr, "invalid database %s\n", dbInfo->name); + char* tmpCommand = (char *)malloc(COMMAND_SIZE); + if (tmpCommand == NULL) { + fprintf(stderr, "failed to allocate memory\n"); return -1; } + sprintf(tmpCommand, "use %s", dbInfo->name); + + TAOS_RES* tmpResult = taos_query(taosCon, tmpCommand); + int32_t code = taos_errno(tmpResult); + if (code != 0) { + fprintf(stderr, "invalid database %s\n", dbInfo->name); + free(tmpCommand); + taos_free_result(tmpResult); + return -1; + } + taos_free_result(tmpResult); + fprintf(fp, "USE %s\n\n", dbInfo->name); + + (void)taosDumpCreateSuperTableClause(taosCon, dbInfo->name, fp); - sprintf(command, "show tables"); - TAOS_RES* result = taos_query(taos,command); - int32_t code = taos_errno(result); + sprintf(tmpCommand, "show tables"); + + tmpResult = taos_query(taosCon, tmpCommand); + code = taos_errno(tmpResult); if (code != 0) { - fprintf(stderr, "failed to run command %s, error: %s\n", command, taos_errstr(result)); - taos_free_result(result); + fprintf(stderr, "failed to run command %s\n", tmpCommand); + free(tmpCommand); + taos_free_result(tmpResult); return -1; } - TAOS_FIELD *fields = taos_fetch_fields(result); + TAOS_FIELD *fields = taos_fetch_fields(tmpResult); - fd = open(".table.tmp", O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); - if (fd == -1) { - fprintf(stderr, "failed to open temp file\n"); - taos_free_result(result); - return -1; - } + int32_t numOfTable = 0; + int32_t numOfThread = 0; + char tmpFileName[TSDB_FILENAME_LEN + 1]; + while ((row = taos_fetch_row(tmpResult)) != NULL) { + if (0 == numOfTable) { + memset(tmpFileName, 0, TSDB_FILENAME_LEN); + sprintf(tmpFileName, ".tables.tmp.%d", numOfThread); + fd = open(tmpFileName, O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); + if (fd == -1) { + fprintf(stderr, "failed to open temp file: %s\n", tmpFileName); + taos_free_result(tmpResult); + for (int32_t loopCnt = 0; loopCnt < numOfThread; loopCnt++) { + sprintf(tmpFileName, ".tables.tmp.%d", loopCnt); + remove(tmpFileName); + } + free(tmpCommand); + return -1; + } - while ((row = taos_fetch_row(result)) != NULL) { + numOfThread++; + } + memset(&tableRecord, 0, sizeof(STableRecord)); strncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes); strncpy(tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes); twrite(fd, &tableRecord, sizeof(STableRecord)); + + numOfTable++; + + if (numOfTable >= arguments->table_batch) { + numOfTable = 0; + tclose(fd); + fd = -1; + } } + tclose(fd); + fd = -1; + taos_free_result(tmpResult); - taos_free_result(result); - - (void)lseek(fd, 0, SEEK_SET); - - STableRecord tableInfo; - while (1) { - memset(&tableInfo, 0, sizeof(STableRecord)); - ssize_t ret = read(fd, &tableInfo, sizeof(STableRecord)); - if (ret <= 0) break; - - tableInfo.name[sizeof(tableInfo.name) - 1] = 0; - tableInfo.metric[sizeof(tableInfo.metric) - 1] = 0; - taosDumpTable(tableInfo.name, tableInfo.metric, arguments, fp); + // start multi threads to dumpout + taosStartDumpOutWorkThreads(arguments, numOfThread, dbInfo->name); + for (int loopCnt = 0; loopCnt < numOfThread; loopCnt++) { + sprintf(tmpFileName, ".tables.tmp.%d", loopCnt); + remove(tmpFileName); } - - close(fd); - (void)remove(".table.tmp"); + + free(tmpCommand); return 0; } -void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, SDumpArguments *arguments, FILE *fp) { - char *pstr = NULL; - pstr = buffer; +void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp) { int counter = 0; int count_temp = 0; - pstr += sprintf(buffer, "CREATE TABLE IF NOT EXISTS %s", tableDes->name); + char* tmpBuf = (char *)malloc(COMMAND_SIZE); + if (tmpBuf == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + return; + } + + char* pstr = tmpBuf; + + pstr += sprintf(tmpBuf, "CREATE TABLE IF NOT EXISTS %s", tableDes->name); for (; counter < numOfCols; counter++) { if (tableDes->cols[counter].note[0] != '\0') break; @@ -699,19 +1258,27 @@ void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, SDumpArgument } } - pstr += sprintf(pstr, ")"); + pstr += sprintf(pstr, ");"); - fprintf(fp, "%s\n\n", buffer); + fprintf(fp, "%s\n", tmpBuf); + + free(tmpBuf); } -void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, SDumpArguments *arguments, - FILE *fp) { - char *pstr = NULL; - pstr = buffer; +void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, FILE *fp) { int counter = 0; int count_temp = 0; - pstr += sprintf(buffer, "CREATE TABLE IF NOT EXISTS %s USING %s TAGS (", tableDes->name, metric); + char* tmpBuf = (char *)malloc(COMMAND_SIZE); + if (tmpBuf == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + return; + } + + char *pstr = NULL; + pstr = tmpBuf; + + pstr += sprintf(tmpBuf, "CREATE TABLE IF NOT EXISTS %s USING %s TAGS (", tableDes->name, metric); for (; counter < numOfCols; counter++) { if (tableDes->cols[counter].note[0] != '\0') break; @@ -721,54 +1288,6 @@ void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols count_temp = counter; for (; counter < numOfCols; counter++) { - TAOS_ROW row = NULL; - - sprintf(command, "select %s from %s limit 1", tableDes->cols[counter].field, tableDes->name); - - TAOS_RES* result = taos_query(taos, command); - int32_t code = taos_errno(result); - if (code != 0) { - fprintf(stderr, "failed to run command %s, error: %s\n", command, taos_errstr(result)); - return; - } - - TAOS_FIELD *fields = taos_fetch_fields(result); - - row = taos_fetch_row(result); - switch (fields[0].type) { - case TSDB_DATA_TYPE_BOOL: - sprintf(tableDes->cols[counter].note, "%d", ((((int)(*((char *)row[0]))) == 1) ? 1 : 0)); - break; - case TSDB_DATA_TYPE_TINYINT: - sprintf(tableDes->cols[counter].note, "%d", (int)(*((char *)row[0]))); - break; - case TSDB_DATA_TYPE_SMALLINT: - sprintf(tableDes->cols[counter].note, "%d", (int)(*((short *)row[0]))); - break; - case TSDB_DATA_TYPE_INT: - sprintf(tableDes->cols[counter].note, "%d", *((int *)row[0])); - break; - case TSDB_DATA_TYPE_BIGINT: - sprintf(tableDes->cols[counter].note, "%" PRId64 "", *((int64_t *)row[0])); - break; - case TSDB_DATA_TYPE_FLOAT: - sprintf(tableDes->cols[counter].note, "%f", GET_FLOAT_VAL(row[0])); - break; - case TSDB_DATA_TYPE_DOUBLE: - sprintf(tableDes->cols[counter].note, "%f", GET_DOUBLE_VAL(row[0])); - break; - case TSDB_DATA_TYPE_TIMESTAMP: - sprintf(tableDes->cols[counter].note, "%" PRId64 "", *(int64_t *)row[0]); - break; - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_NCHAR: - default: - strncpy(tableDes->cols[counter].note, (char *)row[0], fields[0].bytes); - break; - } - - taos_free_result(result); - if (counter != count_temp) { if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || strcasecmp(tableDes->cols[counter].type, "nchar") == 0) { @@ -792,193 +1311,95 @@ void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols /* } */ } - pstr += sprintf(pstr, ")"); + pstr += sprintf(pstr, ");"); - fprintf(fp, "%s\n\n", buffer); + fprintf(fp, "%s\n", tmpBuf); + free(tmpBuf); } -int taosGetTableDes(char *table, STableDef *tableDes) { - TAOS_ROW row = NULL; - int count = 0; - - sprintf(command, "describe %s", table); - - TAOS_RES* result = taos_query(taos, command); - int32_t code = taos_errno(result); - if (code != 0) { - fprintf(stderr, "failed to run command %s, error: %s\n", command, taos_errstr(result)); - taos_free_result(result); - return -1; - } - - TAOS_FIELD *fields = taos_fetch_fields(result); - - tstrncpy(tableDes->name, table, TSDB_COL_NAME_LEN); - - while ((row = taos_fetch_row(result)) != NULL) { - strncpy(tableDes->cols[count].field, (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], - fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes); - strncpy(tableDes->cols[count].type, (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes); - tableDes->cols[count].length = *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); - strncpy(tableDes->cols[count].note, (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], - fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes); - - count++; - } - - taos_free_result(result); - result = NULL; - - return count; -} - -int32_t taosDumpTable(char *table, char *metric, SDumpArguments *arguments, FILE *fp) { - int count = 0; - - STableDef *tableDes = (STableDef *)calloc(1, sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS); - - if (metric != NULL && metric[0] != '\0') { // dump metric definition - count = taosGetTableDes(metric, tableDes); - - if (count < 0) { - free(tableDes); - return -1; - } - - taosDumpCreateTableClause(tableDes, count, arguments, fp); - - memset(tableDes, 0, sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS); - - count = taosGetTableDes(table, tableDes); - - if (count < 0) { - free(tableDes); - return -1; - } - - taosDumpCreateMTableClause(tableDes, metric, count, arguments, fp); - - } else { // dump table definition - count = taosGetTableDes(table, tableDes); - - if (count < 0) { - free(tableDes); - return -1; - } - - taosDumpCreateTableClause(tableDes, count, arguments, fp); - } - - free(tableDes); - - return taosDumpTableData(fp, table, arguments); -} - -int32_t taosDumpMetric(char *metric, SDumpArguments *arguments, FILE *fp) { - TAOS_ROW row = NULL; - int fd = -1; - STableRecord tableRecord; - - //tstrncpy(tableRecord.metric, metric, TSDB_TABLE_NAME_LEN); - - sprintf(command, "select tbname from %s", metric); - TAOS_RES* result = taos_query(taos, command); - int32_t code = taos_errno(result); - if (code != 0) { - fprintf(stderr, "failed to run command %s, error: %s\n", command, taos_errstr(result)); - taos_free_result(result); - return -1; - } - - fd = open(".table.tmp", O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); - if (fd < 0) { - fprintf(stderr, "failed to open temp file"); - return -1; - } - - TAOS_FIELD *fields = taos_fetch_fields(result); - - while ((row = taos_fetch_row(result)) != NULL) { - memset(&tableRecord, 0, sizeof(STableRecord)); - tstrncpy(tableRecord.name, (char *)row[0], fields[0].bytes); - tstrncpy(tableRecord.metric, metric, TSDB_TABLE_NAME_LEN); - twrite(fd, &tableRecord, sizeof(STableRecord)); - } - - taos_free_result(result); - result = NULL; - - (void)lseek(fd, 0, SEEK_SET); - - //STableRecord tableInfo; - char tableName[TSDB_TABLE_NAME_LEN] ; - char metricName[TSDB_TABLE_NAME_LEN]; - ssize_t ret; - while (1) { - //memset(&tableInfo, 0, sizeof(STableRecord)); - memset(tableName, 0, TSDB_TABLE_NAME_LEN); - memset(metricName, 0, TSDB_TABLE_NAME_LEN); - //ssize_t ret = read(fd, &tableInfo, sizeof(STableRecord)); - //if (ret <= 0) break; - ret = read(fd, tableName, TSDB_TABLE_NAME_LEN); - if (ret <= 0) break; - - ret = read(fd, metricName, TSDB_TABLE_NAME_LEN); - if (ret <= 0) break; - - //tableInfo.name[sizeof(tableInfo.name) - 1] = 0; - //tableInfo.metric[sizeof(tableInfo.metric) - 1] = 0; - //taosDumpTable(tableInfo.name, tableInfo.metric, arguments, fp); - //tstrncpy(tableName, tableInfo.name, TSDB_TABLE_NAME_LEN-1); - //tstrncpy(metricName, tableInfo.metric, TSDB_TABLE_NAME_LEN-1); - taosDumpTable(tableName, metricName, arguments, fp); - } - - close(fd); - (void)remove(".table.tmp"); - - return 0; -} - -int taosDumpTableData(FILE *fp, char *tbname, SDumpArguments *arguments) { +int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* taosCon) { /* char temp[MAX_COMMAND_SIZE] = "\0"; */ + int64_t totalRows = 0; int count = 0; char *pstr = NULL; TAOS_ROW row = NULL; int numFields = 0; char *tbuf = NULL; - if (arguments->schemaonly) return 0; - - sprintf(command, "select * from %s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc", tbname, arguments->start_time, - arguments->end_time); - - TAOS_RES* result = taos_query(taos, command); - int32_t code = taos_errno(result); - if (code != 0) { - fprintf(stderr, "failed to run command %s, reason: %s\n", command, taos_errstr(result)); - taos_free_result(result); + char* tmpCommand = (char *)calloc(1, COMMAND_SIZE); + if (tmpCommand == NULL) { + fprintf(stderr, "failed to allocate memory\n"); return -1; } - numFields = taos_field_count(result); + char* tmpBuffer = (char *)calloc(1, COMMAND_SIZE); + if (tmpBuffer == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + free(tmpCommand); + return -1; + } + + pstr = tmpBuffer; + + if (arguments->schemaonly) { + free(tmpCommand); + free(tmpBuffer); + return 0; + } + + sprintf(tmpCommand, + "select * from %s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc", + tbname, + arguments->start_time, + arguments->end_time); + + TAOS_RES* tmpResult = taos_query(taosCon, tmpCommand); + int32_t code = taos_errno(tmpResult); + if (code != 0) { + fprintf(stderr, "failed to run command %s, reason: %s\n", tmpCommand, taos_errstr(taosCon)); + free(tmpCommand); + free(tmpBuffer); + taos_free_result(tmpResult); + return -1; + } + + numFields = taos_field_count(taosCon); assert(numFields > 0); - TAOS_FIELD *fields = taos_fetch_fields(result); + TAOS_FIELD *fields = taos_fetch_fields(tmpResult); tbuf = (char *)malloc(COMMAND_SIZE); if (tbuf == NULL) { fprintf(stderr, "No enough memory\n"); + free(tmpCommand); + free(tmpBuffer); + taos_free_result(tmpResult); return -1; } + char sqlStr[8] = "\0"; + if (arguments->mysqlFlag) { + sprintf(sqlStr, "INSERT"); + } else { + sprintf(sqlStr, "IMPORT"); + } + + int rowFlag = 0; count = 0; - while ((row = taos_fetch_row(result)) != NULL) { - pstr = buffer; + while ((row = taos_fetch_row(tmpResult)) != NULL) { + pstr = tmpBuffer; if (count == 0) { - pstr += sprintf(pstr, "INSERT INTO %s VALUES (", tbname); - } else { - pstr += sprintf(pstr, "("); + pstr += sprintf(pstr, "%s INTO %s VALUES (", sqlStr, tbname); + } else { + if (arguments->mysqlFlag) { + if (0 == rowFlag) { + pstr += sprintf(pstr, "("); + rowFlag++; + } else { + pstr += sprintf(pstr, ", ("); + } + } else { + pstr += sprintf(pstr, "("); + } } for (int col = 0; col < numFields; col++) { @@ -1003,7 +1424,7 @@ int taosDumpTableData(FILE *fp, char *tbname, SDumpArguments *arguments) { pstr += sprintf(pstr, "%d", *((int *)row[col])); break; case TSDB_DATA_TYPE_BIGINT: - pstr += sprintf(pstr, "%" PRId64, *((int64_t *)row[col])); + pstr += sprintf(pstr, "%" PRId64 "", *((int64_t *)row[col])); break; case TSDB_DATA_TYPE_FLOAT: pstr += sprintf(pstr, "%f", GET_FLOAT_VAL(row[col])); @@ -1022,34 +1443,52 @@ int taosDumpTableData(FILE *fp, char *tbname, SDumpArguments *arguments) { pstr += sprintf(pstr, "\'%s\'", tbuf); break; case TSDB_DATA_TYPE_TIMESTAMP: - pstr += sprintf(pstr, "%" PRId64, *(int64_t *)row[col]); + if (!arguments->mysqlFlag) { + pstr += sprintf(pstr, "%" PRId64 "", *(int64_t *)row[col]); + } else { + char buf[64] = "\0"; + int64_t ts = *((int64_t *)row[col]); + time_t tt = (time_t)(ts / 1000); + struct tm *ptm = localtime(&tt); + strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm); + pstr += sprintf(pstr, "\'%s.%03d\'", buf, (int)(ts % 1000)); + } break; default: break; } } - sprintf(pstr, ")"); + pstr += sprintf(pstr, ") "); + + totalRows++; count++; - fprintf(fp, "%s", buffer); + fprintf(fp, "%s", tmpBuffer); if (count >= arguments->data_batch) { - fprintf(fp, "\n"); + fprintf(fp, ";\n"); count = 0; - } else { - fprintf(fp, "\\\n"); - } + } //else { + //fprintf(fp, "\\\n"); + //} } + atomic_add_fetch_64(&totalDumpOutRows, totalRows); + fprintf(fp, "\n"); - if (tbuf) free(tbuf); - taos_free_result(result); - result = NULL; + if (tbuf) { + free(tbuf); + } + + taos_free_result(tmpResult); + tmpResult = NULL; + free(tmpCommand); + free(tmpBuffer); return 0; } -int taosCheckParam(SDumpArguments *arguments) { +int taosCheckParam(struct arguments *arguments) { if (arguments->all_databases && arguments->databases) { fprintf(stderr, "conflict option --all-databases and --databases\n"); return -1; @@ -1059,19 +1498,25 @@ int taosCheckParam(SDumpArguments *arguments) { fprintf(stderr, "start time is larger than end time\n"); return -1; } + if (arguments->arg_list_len == 0) { if ((!arguments->all_databases) && (!arguments->isDumpIn)) { fprintf(stderr, "taosdump requires parameters\n"); return -1; } } - - if (arguments->isDumpIn && (strcmp(arguments->output, DEFAULT_DUMP_FILE) != 0)) { - fprintf(stderr, "duplicate parameter input and output file\n"); +/* + if (arguments->isDumpIn && (strcmp(arguments->outpath, DEFAULT_DUMP_FILE) != 0)) { + fprintf(stderr, "duplicate parameter input and output file path\n"); + return -1; + } +*/ + if (!arguments->isDumpIn && arguments->encode != NULL) { + fprintf(stderr, "invalid option in dump out\n"); return -1; } - if (!arguments->isDumpIn && arguments->encode != NULL) { + if (arguments->table_batch <= 0) { fprintf(stderr, "invalid option in dump out\n"); return -1; } @@ -1134,177 +1579,6 @@ void taosReplaceCtrlChar(char *str) { *pstr = '\0'; } -int taosDumpIn(SDumpArguments *arguments) { - assert(arguments->isDumpIn); - - int tsize = 0; - FILE * fp = NULL; - char * line = NULL; - _Bool isRun = true; - size_t line_size = 0; - char * pstr = NULL, *lstr = NULL; - iconv_t cd = (iconv_t)-1; - size_t inbytesleft = 0; - size_t outbytesleft = COMMAND_SIZE; - char fcharset[64]; - char * tcommand = NULL; - - fp = fopen(arguments->input, "r"); - if (fp == NULL) { - fprintf(stderr, "failed to open input file %s\n", arguments->input); - return -1; - } - - taosLoadFileCharset(fp, fcharset); - - taos = taos_connect(arguments->host, arguments->user, arguments->password, NULL, arguments->port); - if (taos == NULL) { - fprintf(stderr, "failed to connect to TDengine server\n"); - goto _dumpin_exit_failure; - } - - command = (char *)malloc(COMMAND_SIZE); - lcommand = (char *)malloc(COMMAND_SIZE); - if (command == NULL || lcommand == NULL) { - fprintf(stderr, "failed to connect to allocate memory\n"); - goto _dumpin_exit_failure; - } - - // Resolve locale - if (*fcharset != '\0') { - arguments->encode = fcharset; - } - - if (arguments->encode != NULL && strcasecmp(tsCharset, arguments->encode) != 0) { - cd = iconv_open(tsCharset, arguments->encode); - if (cd == (iconv_t)-1) { - fprintf(stderr, "Failed to open iconv handle\n"); - goto _dumpin_exit_failure; - } - } - - pstr = command; - int64_t linenu = 0; - while (1) { - ssize_t size = getline(&line, &line_size, fp); - linenu++; - if (size <= 0) break; - if (size == 1) { - if (pstr != command) { - inbytesleft = pstr - command; - memset(lcommand, 0, COMMAND_SIZE); - pstr = command; - lstr = lcommand; - outbytesleft = COMMAND_SIZE; - if (cd != (iconv_t)-1) { - iconv(cd, &pstr, &inbytesleft, &lstr, &outbytesleft); - tcommand = lcommand; - } else { - tcommand = command; - } - taosReplaceCtrlChar(tcommand); - - TAOS_RES* result = taos_query(taos, tcommand); - if (taos_errno(result) != 0){ - fprintf(stderr, "linenu: %" PRId64 " failed to run command %s reason:%s \ncontinue...\n", linenu, command, - taos_errstr(result)); - taos_free_result(result); - } - - pstr = command; - pstr[0] = '\0'; - tsize = 0; - isRun = true; - } - - continue; - } - - /* if (line[0] == '-' && line[1] == '-') continue; */ - - line[size - 1] = 0; - - if (tsize + size - 1 > COMMAND_SIZE) { - fprintf(stderr, "command is too long\n"); - goto _dumpin_exit_failure; - } - - if (line[size - 2] == '\\') { - line[size - 2] = ' '; - isRun = false; - } else { - isRun = true; - } - - memcpy(pstr, line, size - 1); - pstr += (size - 1); - *pstr = '\0'; - - if (!isRun) continue; - - if (command != pstr && !isEmptyCommand(command)) { - inbytesleft = pstr - command; - memset(lcommand, 0, COMMAND_SIZE); - pstr = command; - lstr = lcommand; - outbytesleft = COMMAND_SIZE; - if (cd != (iconv_t)-1) { - iconv(cd, &pstr, &inbytesleft, &lstr, &outbytesleft); - tcommand = lcommand; - } else { - tcommand = command; - } - taosReplaceCtrlChar(tcommand); - TAOS_RES* result = taos_query(taos, tcommand); - int32_t code = taos_errno(result); - if (code != 0) - { - fprintf(stderr, "linenu:%" PRId64 " failed to run command %s reason: %s \ncontinue...\n", linenu, command, - taos_errstr(result)); - } - taos_free_result(result); - } - - pstr = command; - *pstr = '\0'; - tsize = 0; - } - - if (pstr != command) { - inbytesleft = pstr - command; - memset(lcommand, 0, COMMAND_SIZE); - pstr = command; - lstr = lcommand; - outbytesleft = COMMAND_SIZE; - if (cd != (iconv_t)-1) { - iconv(cd, &pstr, &inbytesleft, &lstr, &outbytesleft); - tcommand = lcommand; - } else { - tcommand = command; - } - taosReplaceCtrlChar(lcommand); - if (taos_query(taos, tcommand) == NULL) - fprintf(stderr, "linenu:%" PRId64 " failed to run command %s reason:%s \ncontinue...\n", linenu, command, - taos_errstr(taos)); - } - - if (cd != (iconv_t)-1) iconv_close(cd); - tfree(line); - tfree(command); - tfree(lcommand); - taos_close(taos); - fclose(fp); - return 0; - -_dumpin_exit_failure: - if (cd != (iconv_t)-1) iconv_close(cd); - tfree(command); - tfree(lcommand); - taos_close(taos); - fclose(fp); - return -1; -} - char *ascii_literal_list[] = { "\\x00", "\\x01", "\\x02", "\\x03", "\\x04", "\\x05", "\\x06", "\\x07", "\\x08", "\\t", "\\n", "\\x0b", "\\x0c", "\\r", "\\x0e", "\\x0f", "\\x10", "\\x11", "\\x12", "\\x13", "\\x14", "\\x15", "\\x16", "\\x17", "\\x18", "\\x19", @@ -1351,7 +1625,7 @@ int convertNCharToReadable(char *str, int size, char *buf, int bufsize) { if ((int)wc < 256) { pbuf = stpcpy(pbuf, ascii_literal_list[(int)wc]); - } else if (byte_width > 0) { + } else { memcpy(pbuf, pstr, byte_width); pbuf += byte_width; } @@ -1399,3 +1673,453 @@ _exit_no_charset: tfree(line); return; } + +// ======== dumpIn support multi threads functions ================================// + +static char **tsDumpInSqlFiles = NULL; +static int32_t tsSqlFileNum = 0; +static char tsDbSqlFile[TSDB_FILENAME_LEN] = {0}; +static char tsfCharset[64] = {0}; +static int taosGetFilesNum(const char *directoryName, const char *prefix) +{ + char cmd[1024] = { 0 }; + sprintf(cmd, "ls %s/*.%s | wc -l ", directoryName, prefix); + + FILE *fp = popen(cmd, "r"); + if (fp == NULL) { + fprintf(stderr, "ERROR: failed to execute:%s, error:%s\n", cmd, strerror(errno)); + exit(0); + } + + int fileNum = 0; + if (fscanf(fp, "%d", &fileNum) != 1) { + fprintf(stderr, "ERROR: failed to execute:%s, parse result error\n", cmd); + exit(0); + } + + if (fileNum <= 0) { + fprintf(stderr, "ERROR: directory:%s is empry\n", directoryName); + exit(0); + } + + pclose(fp); + return fileNum; +} + +static void taosParseDirectory(const char *directoryName, const char *prefix, char **fileArray, int totalFiles) +{ + char cmd[1024] = { 0 }; + sprintf(cmd, "ls %s/*.%s | sort", directoryName, prefix); + + FILE *fp = popen(cmd, "r"); + if (fp == NULL) { + fprintf(stderr, "ERROR: failed to execute:%s, error:%s\n", cmd, strerror(errno)); + exit(0); + } + + int fileNum = 0; + while (fscanf(fp, "%s", fileArray[fileNum++])) { + if (strcmp(fileArray[fileNum-1], tsDbSqlFile) == 0) { + fileNum--; + } + if (fileNum >= totalFiles) { + break; + } + } + + if (fileNum != totalFiles) { + fprintf(stderr, "ERROR: directory:%s changed while read\n", directoryName); + exit(0); + } + + pclose(fp); +} + +static void taosCheckTablesSQLFile(const char *directoryName) +{ + char cmd[1024] = { 0 }; + sprintf(cmd, "ls %s/dbs.sql", directoryName); + + FILE *fp = popen(cmd, "r"); + if (fp == NULL) { + fprintf(stderr, "ERROR: failed to execute:%s, error:%s\n", cmd, strerror(errno)); + exit(0); + } + + while (fscanf(fp, "%s", tsDbSqlFile)) { + break; + } + + pclose(fp); +} + +static void taosMallocSQLFiles() +{ + tsDumpInSqlFiles = (char**)calloc(tsSqlFileNum, sizeof(char*)); + for (int i = 0; i < tsSqlFileNum; i++) { + tsDumpInSqlFiles[i] = calloc(1, TSDB_FILENAME_LEN); + } +} + +static void taosFreeSQLFiles() +{ + for (int i = 0; i < tsSqlFileNum; i++) { + tfree(tsDumpInSqlFiles[i]); + } + tfree(tsDumpInSqlFiles); +} + +static void taosGetDirectoryFileList(char *inputDir) +{ + struct stat fileStat; + if (stat(inputDir, &fileStat) < 0) { + fprintf(stderr, "ERROR: %s not exist\n", inputDir); + exit(0); + } + + if (fileStat.st_mode & S_IFDIR) { + taosCheckTablesSQLFile(inputDir); + tsSqlFileNum = taosGetFilesNum(inputDir, "sql"); + int totalSQLFileNum = tsSqlFileNum; + if (tsDbSqlFile[0] != 0) { + tsSqlFileNum--; + } + taosMallocSQLFiles(); + taosParseDirectory(inputDir, "sql", tsDumpInSqlFiles, tsSqlFileNum); + fprintf(stdout, "\nstart to dispose %d files in %s\n", totalSQLFileNum, inputDir); + } + else { + fprintf(stderr, "ERROR: %s is not a directory\n", inputDir); + exit(0); + } +} + +static FILE* taosOpenDumpInFile(char *fptr) { + wordexp_t full_path; + + if (wordexp(fptr, &full_path, 0) != 0) { + fprintf(stderr, "ERROR: illegal file name: %s\n", fptr); + return NULL; + } + + char *fname = full_path.we_wordv[0]; + + if (access(fname, F_OK) != 0) { + fprintf(stderr, "ERROR: file %s is not exist\n", fptr); + + wordfree(&full_path); + return NULL; + } + + if (access(fname, R_OK) != 0) { + fprintf(stderr, "ERROR: file %s is not readable\n", fptr); + + wordfree(&full_path); + return NULL; + } + + FILE *f = fopen(fname, "r"); + if (f == NULL) { + fprintf(stderr, "ERROR: failed to open file %s\n", fname); + wordfree(&full_path); + return NULL; + } + + wordfree(&full_path); + + return f; +} + +int taosDumpInOneFile_old(TAOS * taos, FILE* fp, char* fcharset, char* encode) { + char *command = NULL; + char *lcommand = NULL; + int tsize = 0; + char *line = NULL; + _Bool isRun = true; + size_t line_size = 0; + char *pstr = NULL; + char *lstr = NULL; + size_t inbytesleft = 0; + size_t outbytesleft = COMMAND_SIZE; + char *tcommand = NULL; + char *charsetOfFile = NULL; + iconv_t cd = (iconv_t)(-1); + + command = (char *)malloc(COMMAND_SIZE); + lcommand = (char *)malloc(COMMAND_SIZE); + if (command == NULL || lcommand == NULL) { + fprintf(stderr, "failed to connect to allocate memory\n"); + goto _dumpin_exit_failure; + } + + // Resolve locale + if (*fcharset != '\0') { + charsetOfFile = fcharset; + } else { + charsetOfFile = encode; + } + + if (charsetOfFile != NULL && strcasecmp(tsCharset, charsetOfFile) != 0) { + cd = iconv_open(tsCharset, charsetOfFile); + if (cd == ((iconv_t)(-1))) { + fprintf(stderr, "Failed to open iconv handle\n"); + goto _dumpin_exit_failure; + } + } + + pstr = command; + int64_t linenu = 0; + while (1) { + ssize_t size = getline(&line, &line_size, fp); + linenu++; + if (size <= 0) break; + if (size == 1) { + if (pstr != command) { + inbytesleft = pstr - command; + memset(lcommand, 0, COMMAND_SIZE); + pstr = command; + lstr = lcommand; + outbytesleft = COMMAND_SIZE; + if (cd != ((iconv_t)(-1))) { + iconv(cd, &pstr, &inbytesleft, &lstr, &outbytesleft); + tcommand = lcommand; + } else { + tcommand = command; + } + + taosReplaceCtrlChar(tcommand); + + if (queryDB(taos, tcommand) != 0) { + fprintf(stderr, "error sql: linenu: %" PRId64 " failed\n", linenu); + exit(0); + } + + pstr = command; + pstr[0] = '\0'; + tsize = 0; + isRun = true; + } + + continue; + } + + /* if (line[0] == '-' && line[1] == '-') continue; */ + + line[size - 1] = 0; + + if (tsize + size - 1 > COMMAND_SIZE) { + fprintf(stderr, "command is too long\n"); + goto _dumpin_exit_failure; + } + + if (line[size - 2] == '\\') { + line[size - 2] = ' '; + isRun = false; + } else { + isRun = true; + } + + memcpy(pstr, line, size - 1); + pstr += (size - 1); + *pstr = '\0'; + + if (!isRun) continue; + + if (command != pstr && !isEmptyCommand(command)) { + inbytesleft = pstr - command; + memset(lcommand, 0, COMMAND_SIZE); + pstr = command; + lstr = lcommand; + outbytesleft = COMMAND_SIZE; + if (cd != ((iconv_t)(-1))) { + iconv(cd, &pstr, &inbytesleft, &lstr, &outbytesleft); + tcommand = lcommand; + } else { + tcommand = command; + } + taosReplaceCtrlChar(tcommand); + if (queryDB(taos, tcommand) != 0) { + fprintf(stderr, "error sql: linenu:%" PRId64 " failed\n", linenu); + exit(0); + } + } + + pstr = command; + *pstr = '\0'; + tsize = 0; + } + + if (pstr != command) { + inbytesleft = pstr - command; + memset(lcommand, 0, COMMAND_SIZE); + pstr = command; + lstr = lcommand; + outbytesleft = COMMAND_SIZE; + if (cd != ((iconv_t)(-1))) { + iconv(cd, &pstr, &inbytesleft, &lstr, &outbytesleft); + tcommand = lcommand; + } else { + tcommand = command; + } + taosReplaceCtrlChar(lcommand); + if (queryDB(taos, tcommand) != 0) + fprintf(stderr, "error sql: linenu:%" PRId64 " failed \n", linenu); + } + + if (cd != ((iconv_t)(-1))) iconv_close(cd); + tfree(line); + tfree(command); + tfree(lcommand); + taos_close(taos); + fclose(fp); + return 0; + +_dumpin_exit_failure: + if (cd != ((iconv_t)(-1))) iconv_close(cd); + tfree(command); + tfree(lcommand); + taos_close(taos); + fclose(fp); + return -1; +} + +int taosDumpInOneFile(TAOS * taos, FILE* fp, char* fcharset, char* encode, char* fileName) { + int read_len = 0; + char * cmd = NULL; + size_t cmd_len = 0; + char * line = NULL; + size_t line_len = 0; + + cmd = (char *)malloc(COMMAND_SIZE); + if (cmd == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + return -1; + } + + int lineNo = 0; + while ((read_len = getline(&line, &line_len, fp)) != -1) { + ++lineNo; + if (read_len >= COMMAND_SIZE) continue; + line[--read_len] = '\0'; + + //if (read_len == 0 || isCommentLine(line)) { // line starts with # + if (read_len == 0 ) { + continue; + } + + if (line[read_len - 1] == '\\') { + line[read_len - 1] = ' '; + memcpy(cmd + cmd_len, line, read_len); + cmd_len += read_len; + continue; + } + + memcpy(cmd + cmd_len, line, read_len); + if (queryDB(taos, cmd)) { + fprintf(stderr, "error sql: linenu:%d, file:%s\n", lineNo, fileName); + } + + memset(cmd, 0, COMMAND_SIZE); + cmd_len = 0; + } + + tfree(cmd); + tfree(line); + fclose(fp); + return 0; +} + +void* taosDumpInWorkThreadFp(void *arg) +{ + SThreadParaObj *pThread = (SThreadParaObj*)arg; + for (int32_t f = 0; f < tsSqlFileNum; ++f) { + if (f % pThread->totalThreads == pThread->threadIndex) { + char *SQLFileName = tsDumpInSqlFiles[f]; + FILE* fp = taosOpenDumpInFile(SQLFileName); + if (NULL == fp) { + continue; + } + fprintf(stderr, "Success Open input file: %s\n", SQLFileName); + taosDumpInOneFile(pThread->taosCon, fp, tsfCharset, tsArguments.encode, SQLFileName); + } + } + + return NULL; +} + +static void taosStartDumpInWorkThreads(struct arguments *args) +{ + pthread_attr_t thattr; + SThreadParaObj *pThread; + int32_t totalThreads = args->thread_num; + + if (totalThreads > tsSqlFileNum) { + totalThreads = tsSqlFileNum; + } + + SThreadParaObj *threadObj = (SThreadParaObj *)calloc(totalThreads, sizeof(SThreadParaObj)); + for (int32_t t = 0; t < totalThreads; ++t) { + pThread = threadObj + t; + pThread->threadIndex = t; + pThread->totalThreads = totalThreads; + pThread->taosCon = taos_connect(args->host, args->user, args->password, NULL, args->port); + if (pThread->taosCon == NULL) { + fprintf(stderr, "ERROR: thread:%d failed connect to TDengine, error:%s\n", pThread->threadIndex, taos_errstr(pThread->taosCon)); + exit(0); + } + + pthread_attr_init(&thattr); + pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); + + if (pthread_create(&(pThread->threadID), &thattr, taosDumpInWorkThreadFp, (void*)pThread) != 0) { + fprintf(stderr, "ERROR: thread:%d failed to start\n", pThread->threadIndex); + exit(0); + } + } + + for (int t = 0; t < totalThreads; ++t) { + pthread_join(threadObj[t].threadID, NULL); + } + + for (int t = 0; t < totalThreads; ++t) { + taos_close(threadObj[t].taosCon); + } + free(threadObj); +} + + +int taosDumpIn(struct arguments *arguments) { + assert(arguments->isDumpIn); + + TAOS *taos = NULL; + FILE *fp = NULL; + + taos = taos_connect(arguments->host, arguments->user, arguments->password, NULL, arguments->port); + if (taos == NULL) { + fprintf(stderr, "failed to connect to TDengine server\n"); + return -1; + } + + taosGetDirectoryFileList(arguments->inpath); + + if (tsDbSqlFile[0] != 0) { + fp = taosOpenDumpInFile(tsDbSqlFile); + if (NULL == fp) { + fprintf(stderr, "failed to open input file %s\n", tsDbSqlFile); + return -1; + } + fprintf(stderr, "Success Open input file: %s\n", tsDbSqlFile); + + taosLoadFileCharset(fp, tsfCharset); + + taosDumpInOneFile(taos, fp, tsfCharset, arguments->encode, tsDbSqlFile); + } + + taosStartDumpInWorkThreads(arguments); + + taos_close(taos); + taosFreeSQLFiles(); + return 0; +} + + diff --git a/src/kit/taosmigrate/taosmigrate.c b/src/kit/taosmigrate/taosmigrate.c index b7bf6fc1ba..b80ca44a10 100644 --- a/src/kit/taosmigrate/taosmigrate.c +++ b/src/kit/taosmigrate/taosmigrate.c @@ -40,7 +40,7 @@ struct arguments { static error_t parse_opt(int key, char *arg, struct argp_state *state) { struct arguments *arguments = state->input; switch (key) { - case 'w': + case 'r': arguments->dataDir = arg; break; case 'd': @@ -51,6 +51,7 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { break; case 'f': arguments->fqdn = arg; + break; case 'g': arguments->dnodeGroups = arg; break; diff --git a/src/kit/taosmigrate/taosmigrateMnodeWal.c b/src/kit/taosmigrate/taosmigrateMnodeWal.c index 6315ff99f7..28e2b7772b 100644 --- a/src/kit/taosmigrate/taosmigrateMnodeWal.c +++ b/src/kit/taosmigrate/taosmigrateMnodeWal.c @@ -96,6 +96,7 @@ void walModWalFile(char* walfile) { if (wfd < 0) { printf("wal:%s, failed to open(%s)\n", newWalFile, strerror(errno)); free(buffer); + close(rfd); return ; } @@ -116,6 +117,11 @@ void walModWalFile(char* walfile) { break; } + if (pHead->len >= 1024000 - sizeof(SWalHead)) { + printf("wal:%s, SWalHead.len(%d) overflow, skip the rest of file\n", walfile, pHead->len); + break; + } + ret = read(rfd, pHead->cont, pHead->len); if ( ret != pHead->len) { printf("wal:%s, failed to read body, skip, len:%d ret:%d\n", walfile, pHead->len, ret); diff --git a/src/kit/taosmigrate/taosmigrateVnodeCfg.c b/src/kit/taosmigrate/taosmigrateVnodeCfg.c index 1cb2fee353..b925fb10aa 100644 --- a/src/kit/taosmigrate/taosmigrateVnodeCfg.c +++ b/src/kit/taosmigrate/taosmigrateVnodeCfg.c @@ -99,6 +99,8 @@ static int32_t readVnodeCfg(SVnodeObj *pVnode, char* cfgFile) goto PARSE_OVER; } + content[maxLen] = (char)0; + root = cJSON_Parse(content); if (root == NULL) { printf("failed to json parse %s, invalid json format\n", cfgFile); diff --git a/src/mnode/inc/mnodeMnode.h b/src/mnode/inc/mnodeMnode.h index c75deac594..1060907234 100644 --- a/src/mnode/inc/mnodeMnode.h +++ b/src/mnode/inc/mnodeMnode.h @@ -44,6 +44,7 @@ void mnodeDecMnodeRef(struct SMnodeObj *pMnode); char * mnodeGetMnodeRoleStr(); void mnodeGetMnodeIpSetForPeer(SRpcIpSet *ipSet); void mnodeGetMnodeIpSetForShell(SRpcIpSet *ipSet); +char* mnodeGetMnodeMasterEp(); void mnodeGetMnodeInfos(void *mnodes); void mnodeUpdateMnodeIpSet(); diff --git a/src/mnode/inc/mnodeSdb.h b/src/mnode/inc/mnodeSdb.h index ca2fffe24c..eec6d45e23 100644 --- a/src/mnode/inc/mnodeSdb.h +++ b/src/mnode/inc/mnodeSdb.h @@ -53,6 +53,7 @@ typedef struct { void * rowData; int32_t rowSize; int32_t retCode; // for callback in sdb queue + int32_t processedCount; // for sync fwd callback int32_t (*cb)(struct SMnodeMsg *pMsg, int32_t code); struct SMnodeMsg *pMsg; } SSdbOper; diff --git a/src/mnode/src/mnodeAcct.c b/src/mnode/src/mnodeAcct.c index d0c0f01d63..2d6e1ae007 100644 --- a/src/mnode/src/mnodeAcct.c +++ b/src/mnode/src/mnodeAcct.c @@ -128,6 +128,7 @@ int32_t mnodeInitAccts() { void mnodeCleanupAccts() { acctCleanUp(); sdbCloseTable(tsAcctSdb); + tsAcctSdb = NULL; } void *mnodeGetAcct(char *name) { diff --git a/src/mnode/src/mnodeDb.c b/src/mnode/src/mnodeDb.c index 69821e3483..c13cd7c95c 100644 --- a/src/mnode/src/mnodeDb.c +++ b/src/mnode/src/mnodeDb.c @@ -459,6 +459,7 @@ void mnodeMoveVgroupToHead(SVgObj *pVgroup) { void mnodeCleanupDbs() { sdbCloseTable(tsDbSdb); + tsDbSdb = NULL; } static int32_t mnodeGetDbMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) { @@ -964,6 +965,11 @@ static int32_t mnodeProcessAlterDbMsg(SMnodeMsg *pMsg) { mError("db:%s, failed to alter, invalid db", pAlter->db); return TSDB_CODE_MND_INVALID_DB; } + + if (pMsg->pDb->status != TSDB_DB_STATUS_READY) { + mError("db:%s, status:%d, in dropping", pAlter->db, pMsg->pDb->status); + return TSDB_CODE_MND_DB_IN_DROPPING; + } return mnodeAlterDb(pMsg->pDb, pAlter, pMsg); } diff --git a/src/mnode/src/mnodeDnode.c b/src/mnode/src/mnodeDnode.c index c1b8256a06..29272fbd4f 100644 --- a/src/mnode/src/mnodeDnode.c +++ b/src/mnode/src/mnodeDnode.c @@ -88,13 +88,13 @@ static int32_t mnodeDnodeActionDelete(SSdbOper *pOper) { } static int32_t mnodeDnodeActionUpdate(SSdbOper *pOper) { - SDnodeObj *pDnode = pOper->pObj; - SDnodeObj *pSaved = mnodeGetDnode(pDnode->dnodeId); - if (pSaved != NULL && pDnode != pSaved) { - memcpy(pSaved, pDnode, pOper->rowSize); - free(pDnode); - mnodeDecDnodeRef(pSaved); + SDnodeObj *pNew = pOper->pObj; + SDnodeObj *pDnode = mnodeGetDnode(pNew->dnodeId); + if (pDnode != NULL && pNew != pDnode) { + memcpy(pDnode, pNew, pOper->rowSize); + free(pNew); } + mnodeDecDnodeRef(pDnode); return TSDB_CODE_SUCCESS; } @@ -176,6 +176,7 @@ int32_t mnodeInitDnodes() { void mnodeCleanupDnodes() { sdbCloseTable(tsDnodeSdb); + tsDnodeSdb = NULL; } void *mnodeGetNextDnode(void *pIter, SDnodeObj **pDnode) { @@ -334,7 +335,7 @@ static int32_t mnodeProcessDnodeStatusMsg(SMnodeMsg *pMsg) { if (pStatus->dnodeId == 0) { mDebug("dnode:%d %s, first access", pDnode->dnodeId, pDnode->dnodeEp); } else { - //mDebug("dnode:%d, status received, access times %d", pDnode->dnodeId, pDnode->lastAccess); + mTrace("dnode:%d, status received, access times %d", pDnode->dnodeId, pDnode->lastAccess); } int32_t openVnodes = htons(pStatus->openVnodes); @@ -468,7 +469,7 @@ static int32_t mnodeDropDnodeByEp(char *ep, SMnodeMsg *pMsg) { } mnodeDecDnodeRef(pDnode); - if (strcmp(pDnode->dnodeEp, dnodeGetMnodeMasterEp()) == 0) { + if (strcmp(pDnode->dnodeEp, mnodeGetMnodeMasterEp()) == 0) { mError("dnode:%d, can't drop dnode:%s which is master", pDnode->dnodeId, ep); return TSDB_CODE_MND_NO_REMOVE_MASTER; } diff --git a/src/mnode/src/mnodeMain.c b/src/mnode/src/mnodeMain.c index ba0089c61e..db7c35fe2d 100644 --- a/src/mnode/src/mnodeMain.c +++ b/src/mnode/src/mnodeMain.c @@ -121,9 +121,9 @@ void mnodeCleanupSystem() { dnodeFreeMnodeWqueue(); dnodeFreeMnodeRqueue(); dnodeFreeMnodePqueue(); - mnodeCleanupComponents(sizeof(tsMnodeComponents) / sizeof(tsMnodeComponents[0]) - 1); mnodeCleanupTimer(); - + mnodeCleanupComponents(sizeof(tsMnodeComponents) / sizeof(tsMnodeComponents[0]) - 1); + mInfo("mnode is cleaned up"); } diff --git a/src/mnode/src/mnodeMnode.c b/src/mnode/src/mnodeMnode.c index 4d785dd062..f74de2b325 100644 --- a/src/mnode/src/mnodeMnode.c +++ b/src/mnode/src/mnodeMnode.c @@ -165,6 +165,7 @@ int32_t mnodeInitMnodes() { void mnodeCleanupMnodes() { sdbCloseTable(tsMnodeSdb); + tsMnodeSdb = NULL; mnodeMnodeDestroyLock(); } @@ -267,6 +268,10 @@ void mnodeGetMnodeIpSetForShell(SRpcIpSet *ipSet) { mnodeMnodeUnLock(); } +char* mnodeGetMnodeMasterEp() { + return tsMnodeInfos.nodeInfos[tsMnodeInfos.inUse].nodeEp; +} + void mnodeGetMnodeInfos(void *mnodeInfos) { mnodeMnodeRdLock(); *(SDMMnodeInfos *)mnodeInfos = tsMnodeInfos; diff --git a/src/mnode/src/mnodeSdb.c b/src/mnode/src/mnodeSdb.c index 761dce6720..c4a2d12e3d 100644 --- a/src/mnode/src/mnodeSdb.c +++ b/src/mnode/src/mnodeSdb.c @@ -72,8 +72,6 @@ typedef struct { void * sync; void * wal; SSyncCfg cfg; - sem_t sem; - int32_t code; int32_t numOfTables; SSdbTable *tableList[SDB_TABLE_MAX]; pthread_mutex_t mutex; @@ -201,7 +199,7 @@ static void sdbRestoreTables() { sdbDebug("table:%s, is restored, numOfRows:%" PRId64, pTable->tableName, pTable->numOfRows); } - sdbInfo("sdb is restored, version:%" PRId64 " totalRows:%d numOfTables:%d", tsSdbObj.version, totalRows, numOfTables); + sdbInfo("sdb is restored, ver:%" PRId64 " totalRows:%d numOfTables:%d", tsSdbObj.version, totalRows, numOfTables); } void sdbUpdateMnodeRoles() { @@ -244,27 +242,36 @@ static void sdbNotifyRole(void *ahandle, int8_t role) { sdbUpdateMnodeRoles(); } +FORCE_INLINE static void sdbConfirmForward(void *ahandle, void *param, int32_t code) { - tsSdbObj.code = code; - sem_post(&tsSdbObj.sem); - sdbDebug("forward request confirmed, version:%" PRIu64 ", result:%s", (int64_t)param, tstrerror(code)); -} + assert(param); + SSdbOper * pOper = param; + SMnodeMsg *pMsg = pOper->pMsg; + if (code <= 0) pOper->retCode = code; - static int32_t sdbForwardToPeer(SWalHead *pHead) { - if (tsSdbObj.sync == NULL) return TSDB_CODE_SUCCESS; + int32_t processedCount = atomic_add_fetch_32(&pOper->processedCount, 1); + if (processedCount <= 1) { + if (pMsg != NULL) { + sdbDebug("app:%p:%p, waiting for confirm this operation, count:%d", pMsg->rpcMsg.ahandle, pMsg, processedCount); + } + return; + } - int32_t code = syncForwardToPeer(tsSdbObj.sync, pHead, (void*)pHead->version, TAOS_QTYPE_RPC); - if (code > 0) { - sdbDebug("forward request is sent, version:%" PRIu64 ", code:%d", pHead->version, code); - sem_wait(&tsSdbObj.sem); - return tsSdbObj.code; - } - return code; + if (pMsg != NULL) { + sdbDebug("app:%p:%p, is confirmed and will do callback func", pMsg->rpcMsg.ahandle, pMsg); + } + + if (pOper->cb != NULL) { + pOper->retCode = (*pOper->cb)(pMsg, pOper->retCode); + } + + dnodeSendRpcMnodeWriteRsp(pMsg, pOper->retCode); + taosFreeQitem(pOper); } void sdbUpdateSync() { SSyncCfg syncCfg = {0}; - int32_t index = 0; + int32_t index = 0; SDMMnodeInfos *mnodes = dnodeGetMnodeInfos(); for (int32_t i = 0; i < mnodes->nodeNum; ++i) { @@ -298,7 +305,7 @@ void sdbUpdateSync() { } syncCfg.replica = index; - syncCfg.quorum = (syncCfg.replica == 1) ? 1:2; + syncCfg.quorum = (syncCfg.replica == 1) ? 1 : 2; bool hasThisDnode = false; for (int32_t i = 0; i < syncCfg.replica; ++i) { @@ -325,10 +332,10 @@ void sdbUpdateSync() { syncInfo.getWalInfo = sdbGetWalInfo; syncInfo.getFileInfo = sdbGetFileInfo; syncInfo.writeToCache = sdbWriteToQueue; - syncInfo.confirmForward = sdbConfirmForward; + syncInfo.confirmForward = sdbConfirmForward; syncInfo.notifyRole = sdbNotifyRole; tsSdbObj.cfg = syncCfg; - + if (tsSdbObj.sync) { syncReconfig(tsSdbObj.sync, &syncCfg); } else { @@ -339,7 +346,6 @@ void sdbUpdateSync() { int32_t sdbInit() { pthread_mutex_init(&tsSdbObj.mutex, NULL); - sem_init(&tsSdbObj.sem, 0, 0); if (sdbInitWriteWorker() != 0) { return -1; @@ -367,7 +373,7 @@ void sdbCleanUp() { tsSdbObj.status = SDB_STATUS_CLOSING; sdbCleanupWriteWorker(); - sdbDebug("sdb will be closed, version:%" PRId64, tsSdbObj.version); + sdbDebug("sdb will be closed, ver:%" PRId64, tsSdbObj.version); if (tsSdbObj.sync) { syncStop(tsSdbObj.sync); @@ -379,7 +385,6 @@ void sdbCleanUp() { tsSdbObj.wal = NULL; } - sem_destroy(&tsSdbObj.sem); pthread_mutex_destroy(&tsSdbObj.mutex); } @@ -466,8 +471,8 @@ static int32_t sdbInsertHash(SSdbTable *pTable, SSdbOper *pOper) { atomic_add_fetch_32(&pTable->autoIndex, 1); } - sdbDebug("table:%s, insert record:%s to hash, rowSize:%d numOfRows:%" PRId64 " version:%" PRIu64, pTable->tableName, - sdbGetKeyStrFromObj(pTable, pOper->pObj), pOper->rowSize, pTable->numOfRows, sdbGetVersion()); + sdbDebug("table:%s, insert record:%s to hash, rowSize:%d numOfRows:%" PRId64 " ver:%" PRIu64 ", msg:%p", pTable->tableName, + sdbGetKeyStrFromObj(pTable, pOper->pObj), pOper->rowSize, pTable->numOfRows, sdbGetVersion(), pOper->pMsg); (*pTable->insertFp)(pOper); return TSDB_CODE_SUCCESS; @@ -485,8 +490,8 @@ static int32_t sdbDeleteHash(SSdbTable *pTable, SSdbOper *pOper) { taosHashRemove(pTable->iHandle, key, keySize); atomic_sub_fetch_32(&pTable->numOfRows, 1); - sdbDebug("table:%s, delete record:%s from hash, numOfRows:%" PRId64 " version:%" PRIu64, pTable->tableName, - sdbGetKeyStrFromObj(pTable, pOper->pObj), pTable->numOfRows, sdbGetVersion()); + sdbDebug("table:%s, delete record:%s from hash, numOfRows:%" PRId64 " ver:%" PRIu64 ", msg:%p", pTable->tableName, + sdbGetKeyStrFromObj(pTable, pOper->pObj), pTable->numOfRows, sdbGetVersion(), pOper->pMsg); int8_t *updateEnd = pOper->pObj + pTable->refCountPos - 1; *updateEnd = 1; @@ -496,8 +501,8 @@ static int32_t sdbDeleteHash(SSdbTable *pTable, SSdbOper *pOper) { } static int32_t sdbUpdateHash(SSdbTable *pTable, SSdbOper *pOper) { - sdbDebug("table:%s, update record:%s in hash, numOfRows:%" PRId64 " version:%" PRIu64, pTable->tableName, - sdbGetKeyStrFromObj(pTable, pOper->pObj), pTable->numOfRows, sdbGetVersion()); + sdbDebug("table:%s, update record:%s in hash, numOfRows:%" PRId64 " ver:%" PRIu64 ", msg:%p", pTable->tableName, + sdbGetKeyStrFromObj(pTable, pOper->pObj), pTable->numOfRows, sdbGetVersion(), pOper->pMsg); (*pTable->updateFp)(pOper); return TSDB_CODE_SUCCESS; @@ -513,24 +518,22 @@ static int sdbWrite(void *param, void *data, int type) { assert(pTable != NULL); pthread_mutex_lock(&tsSdbObj.mutex); + if (pHead->version == 0) { - // assign version + // assign version tsSdbObj.version++; pHead->version = tsSdbObj.version; } else { // for data from WAL or forward, version may be smaller if (pHead->version <= tsSdbObj.version) { pthread_mutex_unlock(&tsSdbObj.mutex); - if (type == TAOS_QTYPE_FWD && tsSdbObj.sync != NULL) { - sdbDebug("forward request is received, version:%" PRIu64 " confirm it", pHead->version); - syncConfirmForward(tsSdbObj.sync, pHead->version, TSDB_CODE_SUCCESS); - } + sdbDebug("table:%s, failed to restore %s record:%s from source(%d), ver:%" PRId64 " too large, sdb ver:%" PRId64, + pTable->tableName, sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), type, pHead->version, tsSdbObj.version); return TSDB_CODE_SUCCESS; } else if (pHead->version != tsSdbObj.version + 1) { pthread_mutex_unlock(&tsSdbObj.mutex); - sdbError("table:%s, failed to restore %s record:%s from wal, version:%" PRId64 " too large, sdb version:%" PRId64, - pTable->tableName, sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version, - tsSdbObj.version); + sdbError("table:%s, failed to restore %s record:%s from source(%d), ver:%" PRId64 " too large, sdb ver:%" PRId64, + pTable->tableName, sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), type, pHead->version, tsSdbObj.version); return TSDB_CODE_MND_APP_ERROR; } else { tsSdbObj.version = pHead->version; @@ -542,28 +545,36 @@ static int sdbWrite(void *param, void *data, int type) { pthread_mutex_unlock(&tsSdbObj.mutex); return code; } - - code = sdbForwardToPeer(pHead); + pthread_mutex_unlock(&tsSdbObj.mutex); // from app, oper is created if (pOper != NULL) { - sdbTrace("record from app is disposed, table:%s action:%s record:%s version:%" PRIu64 " result:%s", - pTable->tableName, sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version, - tstrerror(code)); - return code; + // forward to peers + pOper->processedCount = 0; + int32_t syncCode = syncForwardToPeer(tsSdbObj.sync, pHead, pOper, TAOS_QTYPE_RPC); + if (syncCode <= 0) pOper->processedCount = 1; + + if (syncCode < 0) { + sdbError("table:%s, failed to forward request, result:%s action:%s record:%s ver:%" PRId64 ", msg:%p", pTable->tableName, + tstrerror(syncCode), sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version, pOper->pMsg); + } else if (syncCode > 0) { + sdbDebug("table:%s, forward request is sent, action:%s record:%s ver:%" PRId64 ", msg:%p", pTable->tableName, + sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version, pOper->pMsg); + } else { + sdbTrace("table:%s, no need to send fwd request, action:%s record:%s ver:%" PRId64 ", msg:%p", pTable->tableName, + sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version, pOper->pMsg); + } + return syncCode; } + sdbDebug("table:%s, record from wal/fwd is disposed, action:%s record:%s ver:%" PRId64, pTable->tableName, + sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version); + + // even it is WAL/FWD, it shall be called to update version in sync + syncForwardToPeer(tsSdbObj.sync, pHead, pOper, TAOS_QTYPE_RPC); + // from wal or forward msg, oper not created, should add into hash - if (tsSdbObj.sync != NULL) { - sdbTrace("record from wal forward is disposed, table:%s action:%s record:%s version:%" PRIu64 " confirm it", - pTable->tableName, sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version); - syncConfirmForward(tsSdbObj.sync, pHead->version, code); - } else { - sdbTrace("record from wal restore is disposed, table:%s action:%s record:%s version:%" PRIu64, pTable->tableName, - sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version); - } - if (action == SDB_ACTION_INSERT) { SSdbOper oper = {.rowSize = pHead->len, .rowData = pHead->cont, .table = pTable}; code = (*pTable->decodeFp)(&oper); @@ -627,7 +638,7 @@ int32_t sdbInsertRow(SSdbOper *pOper) { memcpy(pNewOper, pOper, sizeof(SSdbOper)); if (pNewOper->pMsg != NULL) { - sdbDebug("app:%p:%p, table:%s record:%p:%s, insert action is add to sdb queue, ", pNewOper->pMsg->rpcMsg.ahandle, + sdbDebug("app:%p:%p, table:%s record:%p:%s, insert action is add to sdb queue", pNewOper->pMsg->rpcMsg.ahandle, pNewOper->pMsg, pTable->tableName, pOper->pObj, sdbGetKeyStrFromObj(pTable, pOper->pObj)); } @@ -677,7 +688,7 @@ int32_t sdbDeleteRow(SSdbOper *pOper) { memcpy(pNewOper, pOper, sizeof(SSdbOper)); if (pNewOper->pMsg != NULL) { - sdbDebug("app:%p:%p, table:%s record:%p:%s, delete action is add to sdb queue, ", pNewOper->pMsg->rpcMsg.ahandle, + sdbDebug("app:%p:%p, table:%s record:%p:%s, delete action is add to sdb queue", pNewOper->pMsg->rpcMsg.ahandle, pNewOper->pMsg, pTable->tableName, pOper->pObj, sdbGetKeyStrFromObj(pTable, pOper->pObj)); } @@ -727,7 +738,7 @@ int32_t sdbUpdateRow(SSdbOper *pOper) { memcpy(pNewOper, pOper, sizeof(SSdbOper)); if (pNewOper->pMsg != NULL) { - sdbDebug("app:%p:%p, table:%s record:%p:%s, update action is add to sdb queue, ", pNewOper->pMsg->rpcMsg.ahandle, + sdbDebug("app:%p:%p, table:%s record:%p:%s, update action is add to sdb queue", pNewOper->pMsg->rpcMsg.ahandle, pNewOper->pMsg, pTable->tableName, pOper->pObj, sdbGetKeyStrFromObj(pTable, pOper->pObj)); } @@ -943,20 +954,20 @@ static void *sdbWorkerFp(void *param) { taosGetQitem(tsSdbWriteQall, &type, &item); if (type == TAOS_QTYPE_RPC) { pOper = (SSdbOper *)item; + pOper->processedCount = 1; pHead = (void *)pOper + sizeof(SSdbOper) + SDB_SYNC_HACK; + if (pOper->pMsg != NULL) { + sdbDebug("app:%p:%p, table:%s record:%p:%s ver:%" PRIu64 ", will be processed in sdb queue", + pOper->pMsg->rpcMsg.ahandle, pOper->pMsg, ((SSdbTable *)pOper->table)->tableName, pOper->pObj, + sdbGetKeyStr(pOper->table, pHead->cont), pHead->version); + } } else { pHead = (SWalHead *)item; pOper = NULL; } - if (pOper != NULL && pOper->pMsg != NULL) { - sdbDebug("app:%p:%p, table:%s record:%p:%s version:%" PRIu64 ", will be processed in sdb queue", - pOper->pMsg->rpcMsg.ahandle, pOper->pMsg, ((SSdbTable *)pOper->table)->tableName, pOper->pObj, - sdbGetKeyStr(pOper->table, pHead->cont), pHead->version); - } - int32_t code = sdbWrite(pOper, pHead, type); - if (pOper) pOper->retCode = code; + if (pOper && code <= 0) pOper->retCode = code; } walFsync(tsSdbObj.wal); @@ -965,25 +976,17 @@ static void *sdbWorkerFp(void *param) { taosResetQitems(tsSdbWriteQall); for (int32_t i = 0; i < numOfMsgs; ++i) { taosGetQitem(tsSdbWriteQall, &type, &item); + if (type == TAOS_QTYPE_RPC) { pOper = (SSdbOper *)item; - if (pOper != NULL && pOper->cb != NULL) { - sdbTrace("app:%p:%p, will do callback func, index:%d", pOper->pMsg->rpcMsg.ahandle, pOper->pMsg, i); - pOper->retCode = (*pOper->cb)(pOper->pMsg, pOper->retCode); - } - - if (pOper != NULL && pOper->pMsg != NULL) { - sdbTrace("app:%p:%p, msg is processed, result:%s", pOper->pMsg->rpcMsg.ahandle, pOper->pMsg, - tstrerror(pOper->retCode)); - } - - if (pOper != NULL) { - sdbDecRef(pOper->table, pOper->pObj); - } - - dnodeSendRpcMnodeWriteRsp(pOper->pMsg, pOper->retCode); + sdbDecRef(pOper->table, pOper->pObj); + sdbConfirmForward(NULL, pOper, pOper->retCode); + } else if (type == TAOS_QTYPE_FWD) { + syncConfirmForward(tsSdbObj.sync, pHead->version, TSDB_CODE_SUCCESS); + taosFreeQitem(item); + } else { + taosFreeQitem(item); } - taosFreeQitem(item); } } diff --git a/src/mnode/src/mnodeShow.c b/src/mnode/src/mnodeShow.c index 97ffe83914..1b958e72e6 100644 --- a/src/mnode/src/mnodeShow.c +++ b/src/mnode/src/mnodeShow.c @@ -307,6 +307,11 @@ static int32_t mnodeProcessConnectMsg(SMnodeMsg *pMsg) { code = TSDB_CODE_MND_INVALID_DB; goto connect_over; } + + if (pDb->status != TSDB_DB_STATUS_READY) { + mError("db:%s, status:%d, in dropping", pDb->name, pDb->status); + return TSDB_CODE_MND_DB_IN_DROPPING; + } mnodeDecDbRef(pDb); } @@ -352,6 +357,11 @@ static int32_t mnodeProcessUseMsg(SMnodeMsg *pMsg) { if (pMsg->pDb == NULL) { code = TSDB_CODE_MND_INVALID_DB; } + + if (pMsg->pDb->status != TSDB_DB_STATUS_READY) { + mError("db:%s, status:%d, in dropping", pMsg->pDb->name, pMsg->pDb->status); + return TSDB_CODE_MND_DB_IN_DROPPING; + } return code; } @@ -403,4 +413,4 @@ void mnodeVacuumResult(char *data, int32_t numOfCols, int32_t rows, int32_t capa memmove(data + pShow->offset[i] * rows, data + pShow->offset[i] * capacity, pShow->bytes[i] * rows); } } -} \ No newline at end of file +} diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c index 47add8f7a3..523d7001c2 100644 --- a/src/mnode/src/mnodeTable.c +++ b/src/mnode/src/mnodeTable.c @@ -116,6 +116,11 @@ static int32_t mnodeChildTableActionInsert(SSdbOper *pOper) { mError("ctable:%s, vgId:%d not in db:%s", pTable->info.tableId, pVgroup->vgId, pVgroup->dbName); return TSDB_CODE_MND_INVALID_DB; } + + if (pDb->status != TSDB_DB_STATUS_READY) { + mError("db:%s, status:%d, in dropping", pDb->name, pDb->status); + return TSDB_CODE_MND_DB_IN_DROPPING; + } mnodeDecDbRef(pDb); SAcctObj *pAcct = mnodeGetAcct(pDb->acct); @@ -284,8 +289,8 @@ static int32_t mnodeChildTableActionRestored() { if (pTable == NULL) break; SDbObj *pDb = mnodeGetDbByTableId(pTable->info.tableId); - if (pDb == NULL) { - mError("ctable:%s, failed to get db, discard it", pTable->info.tableId); + if (pDb == NULL || pDb->status != TSDB_DB_STATUS_READY) { + mError("ctable:%s, failed to get db or db in dropping, discard it", pTable->info.tableId); SSdbOper desc = {.type = SDB_OPER_LOCAL, .pObj = pTable, .table = tsChildTableSdb}; sdbDeleteRow(&desc); mnodeDecTableRef(pTable); @@ -376,6 +381,7 @@ static int32_t mnodeInitChildTables() { static void mnodeCleanupChildTables() { sdbCloseTable(tsChildTableSdb); + tsChildTableSdb = NULL; } static void mnodeAddTableIntoStable(SSuperTableObj *pStable, SChildTableObj *pCtable) { @@ -422,7 +428,7 @@ static int32_t mnodeSuperTableActionDestroy(SSdbOper *pOper) { static int32_t mnodeSuperTableActionInsert(SSdbOper *pOper) { SSuperTableObj *pStable = pOper->pObj; SDbObj *pDb = mnodeGetDbByTableId(pStable->info.tableId); - if (pDb != NULL) { + if (pDb != NULL && pDb->status == TSDB_DB_STATUS_READY) { mnodeAddSuperTableIntoDb(pDb); } mnodeDecDbRef(pDb); @@ -554,6 +560,7 @@ static int32_t mnodeInitSuperTables() { static void mnodeCleanupSuperTables() { sdbCloseTable(tsSuperTableSdb); + tsSuperTableSdb = NULL; } int32_t mnodeInitTables() { @@ -683,10 +690,15 @@ static int32_t mnodeProcessCreateTableMsg(SMnodeMsg *pMsg) { SCMCreateTableMsg *pCreate = pMsg->rpcMsg.pCont; if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDb(pCreate->db); - if (pMsg->pDb == NULL || pMsg->pDb->status != TSDB_DB_STATUS_READY) { + if (pMsg->pDb == NULL) { mError("app:%p:%p, table:%s, failed to create, db not selected", pMsg->rpcMsg.ahandle, pMsg, pCreate->tableId); return TSDB_CODE_MND_DB_NOT_SELECTED; } + + if (pMsg->pDb->status != TSDB_DB_STATUS_READY) { + mError("db:%s, status:%d, in dropping", pMsg->pDb->name, pMsg->pDb->status); + return TSDB_CODE_MND_DB_IN_DROPPING; + } if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(pCreate->tableId); if (pMsg->pTable != NULL && pMsg->retry == 0) { @@ -717,10 +729,15 @@ static int32_t mnodeProcessCreateTableMsg(SMnodeMsg *pMsg) { static int32_t mnodeProcessDropTableMsg(SMnodeMsg *pMsg) { SCMDropTableMsg *pDrop = pMsg->rpcMsg.pCont; if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDbByTableId(pDrop->tableId); - if (pMsg->pDb == NULL || pMsg->pDb->status != TSDB_DB_STATUS_READY) { - mError("app:%p:%p, table:%s, failed to drop table, db not selected", pMsg->rpcMsg.ahandle, pMsg, pDrop->tableId); + if (pMsg->pDb == NULL) { + mError("app:%p:%p, table:%s, failed to drop table, db not selected or db in dropping", pMsg->rpcMsg.ahandle, pMsg, pDrop->tableId); return TSDB_CODE_MND_DB_NOT_SELECTED; } + + if (pMsg->pDb->status != TSDB_DB_STATUS_READY) { + mError("db:%s, status:%d, in dropping", pMsg->pDb->name, pMsg->pDb->status); + return TSDB_CODE_MND_DB_IN_DROPPING; + } if (mnodeCheckIsMonitorDB(pMsg->pDb->name, tsMonitorDbName)) { mError("app:%p:%p, table:%s, failed to drop table, in monitor database", pMsg->rpcMsg.ahandle, pMsg, @@ -755,11 +772,16 @@ static int32_t mnodeProcessTableMetaMsg(SMnodeMsg *pMsg) { pInfo->tableId, pMsg->rpcMsg.handle, pInfo->createFlag); if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDbByTableId(pInfo->tableId); - if (pMsg->pDb == NULL || pMsg->pDb->status != TSDB_DB_STATUS_READY) { + if (pMsg->pDb == NULL) { mError("app:%p:%p, table:%s, failed to get table meta, db not selected", pMsg->rpcMsg.ahandle, pMsg, pInfo->tableId); return TSDB_CODE_MND_DB_NOT_SELECTED; } + + if (pMsg->pDb->status != TSDB_DB_STATUS_READY) { + mError("db:%s, status:%d, in dropping", pMsg->pDb->name, pMsg->pDb->status); + return TSDB_CODE_MND_DB_IN_DROPPING; + } if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(pInfo->tableId); if (pMsg->pTable == NULL) { @@ -783,9 +805,15 @@ static int32_t mnodeProcessTableMetaMsg(SMnodeMsg *pMsg) { static int32_t mnodeCreateSuperTableCb(SMnodeMsg *pMsg, int32_t code) { SSuperTableObj *pTable = (SSuperTableObj *)pMsg->pTable; - if (pTable != NULL) { - mLInfo("app:%p:%p, stable:%s, is created in sdb, result:%s", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId, - tstrerror(code)); + assert(pTable); + + if (code == TSDB_CODE_SUCCESS) { + mLInfo("stable:%s, is created in sdb", pTable->info.tableId); + } else { + mError("app:%p:%p, stable:%s, failed to create in sdb, reason:%s", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId, + tstrerror(code)); + SSdbOper desc = {.type = SDB_OPER_GLOBAL, .pObj = pTable, .table = tsSuperTableSdb}; + sdbDeleteRow(&desc); } return code; @@ -1201,6 +1229,11 @@ static int32_t mnodeDropSuperTableColumn(SMnodeMsg *pMsg, char *colName) { static int32_t mnodeGetShowSuperTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) { SDbObj *pDb = mnodeGetDb(pShow->db); if (pDb == NULL) return TSDB_CODE_MND_DB_NOT_SELECTED; + + if (pDb->status != TSDB_DB_STATUS_READY) { + mError("db:%s, status:%d, in dropping", pDb->name, pDb->status); + return TSDB_CODE_MND_DB_IN_DROPPING; + } int32_t cols = 0; SSchema *pSchema = pMeta->schema; @@ -1260,6 +1293,11 @@ int32_t mnodeRetrieveShowSuperTables(SShowObj *pShow, char *data, int32_t rows, SDbObj *pDb = mnodeGetDb(pShow->db); if (pDb == NULL) return 0; + + if (pDb->status != TSDB_DB_STATUS_READY) { + mError("db:%s, status:%d, in dropping", pDb->name, pDb->status); + return 0; + } tstrncpy(prefix, pDb->name, 64); strcat(prefix, TS_PATH_DELIMITER); @@ -1561,10 +1599,16 @@ static int32_t mnodeDoCreateChildTableCb(SMnodeMsg *pMsg, int32_t code) { SChildTableObj *pTable = (SChildTableObj *)pMsg->pTable; assert(pTable); - mDebug("app:%p:%p, table:%s, create table in id:%d, uid:%" PRIu64 ", result:%s", pMsg->rpcMsg.ahandle, pMsg, - pTable->info.tableId, pTable->sid, pTable->uid, tstrerror(code)); - - if (code != TSDB_CODE_SUCCESS) return code; + if (code == TSDB_CODE_SUCCESS) { + mDebug("app:%p:%p, table:%s, create table in sid:%d, uid:%" PRIu64, pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId, + pTable->sid, pTable->uid); + } else { + mError("app:%p:%p, table:%s, failed to create table sid:%d, uid:%" PRIu64 ", reason:%s", pMsg->rpcMsg.ahandle, pMsg, + pTable->info.tableId, pTable->sid, pTable->uid, tstrerror(code)); + SSdbOper desc = {.type = SDB_OPER_GLOBAL, .pObj = pTable, .table = tsChildTableSdb}; + sdbDeleteRow(&desc); + return code; + } SCMCreateTableMsg *pCreate = pMsg->rpcMsg.pCont; SMDCreateTableMsg *pMDCreate = mnodeBuildCreateChildTableMsg(pCreate, pTable); @@ -2285,7 +2329,7 @@ static int32_t mnodeProcessMultiTableMetaMsg(SMnodeMsg *pMsg) { if (pTable == NULL) continue; if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDbByTableId(tableId); - if (pMsg->pDb == NULL) { + if (pMsg->pDb == NULL || pMsg->pDb->status != TSDB_DB_STATUS_READY) { mnodeDecTableRef(pTable); continue; } @@ -2323,6 +2367,11 @@ static int32_t mnodeProcessMultiTableMetaMsg(SMnodeMsg *pMsg) { static int32_t mnodeGetShowTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) { SDbObj *pDb = mnodeGetDb(pShow->db); if (pDb == NULL) return TSDB_CODE_MND_DB_NOT_SELECTED; + + if (pDb->status != TSDB_DB_STATUS_READY) { + mError("db:%s, status:%d, in dropping", pDb->name, pDb->status); + return TSDB_CODE_MND_DB_IN_DROPPING; + } int32_t cols = 0; SSchema *pSchema = pMeta->schema; @@ -2371,6 +2420,11 @@ static int32_t mnodeGetShowTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, void static int32_t mnodeRetrieveShowTables(SShowObj *pShow, char *data, int32_t rows, void *pConn) { SDbObj *pDb = mnodeGetDb(pShow->db); if (pDb == NULL) return 0; + + if (pDb->status != TSDB_DB_STATUS_READY) { + mError("db:%s, status:%d, in dropping", pDb->name, pDb->status); + return 0; + } int32_t numOfRows = 0; SChildTableObj *pTable = NULL; @@ -2462,10 +2516,15 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg) { pAlter->tableId, pMsg->rpcMsg.handle); if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDbByTableId(pAlter->tableId); - if (pMsg->pDb == NULL || pMsg->pDb->status != TSDB_DB_STATUS_READY) { + if (pMsg->pDb == NULL) { mError("app:%p:%p, table:%s, failed to alter table, db not selected", pMsg->rpcMsg.ahandle, pMsg, pAlter->tableId); return TSDB_CODE_MND_DB_NOT_SELECTED; } + + if (pMsg->pDb->status != TSDB_DB_STATUS_READY) { + mError("db:%s, status:%d, in dropping", pMsg->pDb->name, pMsg->pDb->status); + return TSDB_CODE_MND_DB_IN_DROPPING; + } if (mnodeCheckIsMonitorDB(pMsg->pDb->name, tsMonitorDbName)) { mError("app:%p:%p, table:%s, failed to alter table, its log db", pMsg->rpcMsg.ahandle, pMsg, pAlter->tableId); @@ -2525,6 +2584,11 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg) { static int32_t mnodeGetStreamTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) { SDbObj *pDb = mnodeGetDb(pShow->db); if (pDb == NULL) return TSDB_CODE_MND_DB_NOT_SELECTED; + + if (pDb->status != TSDB_DB_STATUS_READY) { + mError("db:%s, status:%d, in dropping", pDb->name, pDb->status); + return TSDB_CODE_MND_DB_IN_DROPPING; + } int32_t cols = 0; SSchema *pSchema = pMeta->schema; @@ -2572,7 +2636,11 @@ static int32_t mnodeGetStreamTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, vo static int32_t mnodeRetrieveStreamTables(SShowObj *pShow, char *data, int32_t rows, void *pConn) { SDbObj *pDb = mnodeGetDb(pShow->db); if (pDb == NULL) return 0; - + + if (pDb->status != TSDB_DB_STATUS_READY) { + mError("db:%s, status:%d, in dropping", pDb->name, pDb->status); + return 0; + } int32_t numOfRows = 0; SChildTableObj *pTable = NULL; diff --git a/src/mnode/src/mnodeUser.c b/src/mnode/src/mnodeUser.c index fa2a2634d6..84f5d6aa58 100644 --- a/src/mnode/src/mnodeUser.c +++ b/src/mnode/src/mnodeUser.c @@ -154,6 +154,7 @@ int32_t mnodeInitUsers() { void mnodeCleanupUsers() { sdbCloseTable(tsUserSdb); + tsUserSdb = NULL; } SUserObj *mnodeGetUser(char *name) { diff --git a/src/mnode/src/mnodeVgroup.c b/src/mnode/src/mnodeVgroup.c index b763c62640..cddb9eaf8b 100644 --- a/src/mnode/src/mnodeVgroup.c +++ b/src/mnode/src/mnodeVgroup.c @@ -75,6 +75,11 @@ static int32_t mnodeVgroupActionInsert(SSdbOper *pOper) { if (pDb == NULL) { return TSDB_CODE_MND_INVALID_DB; } + + if (pDb->status != TSDB_DB_STATUS_READY) { + mError("db:%s, status:%d, in dropping", pDb->name, pDb->status); + return TSDB_CODE_MND_DB_IN_DROPPING; + } pVgroup->pDb = pDb; pVgroup->prev = NULL; @@ -171,6 +176,12 @@ static int32_t mnodeVgroupActionUpdate(SSdbOper *pOper) { mnodeVgroupUpdateIdPool(pVgroup); + // reset vgid status on vgroup changed + mDebug("vgId:%d, reset sync status to unsynced", pVgroup->vgId); + for (int32_t v = 0; v < pVgroup->numOfVnodes; ++v) { + pVgroup->vnodeGid[v].role = TAOS_SYNC_ROLE_UNSYNCED; + } + mnodeDecVgroupRef(pVgroup); mDebug("vgId:%d, is updated, numOfVnode:%d", pVgroup->vgId, pVgroup->numOfVnodes); @@ -302,6 +313,7 @@ void mnodeUpdateVgroupStatus(SVgObj *pVgroup, SDnodeObj *pDnode, SVnodeLoad *pVl for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) { SVnodeGid *pVgid = &pVgroup->vnodeGid[i]; if (pVgid->pDnode == pDnode) { + mTrace("dnode:%d, receive status from dnode, vgId:%d status is %d", pVgroup->vgId, pDnode->dnodeId, pVgid->role); pVgid->role = pVload->role; if (pVload->role == TAOS_SYNC_ROLE_MASTER) { pVgroup->inUse = i; @@ -341,17 +353,23 @@ void *mnodeGetNextVgroup(void *pIter, SVgObj **pVgroup) { } static int32_t mnodeCreateVgroupCb(SMnodeMsg *pMsg, int32_t code) { + SVgObj *pVgroup = pMsg->pVgroup; + SDbObj *pDb = pMsg->pDb; + assert(pVgroup); + if (code != TSDB_CODE_SUCCESS) { - pMsg->pVgroup = NULL; + mError("app:%p:%p, vgId:%d, failed to create in sdb, reason:%s", pMsg->rpcMsg.ahandle, pMsg, pVgroup->vgId, + tstrerror(code)); + SSdbOper desc = {.type = SDB_OPER_GLOBAL, .pObj = pVgroup, .table = tsVgroupSdb}; + sdbDeleteRow(&desc); return code; } - SVgObj *pVgroup = pMsg->pVgroup; - SDbObj *pDb = pMsg->pDb; - - mInfo("vgId:%d, is created in mnode, db:%s replica:%d", pVgroup->vgId, pDb->name, pVgroup->numOfVnodes); + mInfo("app:%p:%p, vgId:%d, is created in mnode, db:%s replica:%d", pMsg->rpcMsg.ahandle, pMsg, pVgroup->vgId, + pDb->name, pVgroup->numOfVnodes); for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) { - mInfo("vgId:%d, index:%d, dnode:%d", pVgroup->vgId, i, pVgroup->vnodeGid[i].dnodeId); + mInfo("app:%p:%p, vgId:%d, index:%d, dnode:%d", pMsg->rpcMsg.ahandle, pMsg, pVgroup->vgId, i, + pVgroup->vnodeGid[i].dnodeId); } mnodeIncVgroupRef(pVgroup); @@ -414,6 +432,7 @@ void mnodeDropVgroup(SVgObj *pVgroup, void *ahandle) { void mnodeCleanupVgroups() { sdbCloseTable(tsVgroupSdb); + tsVgroupSdb = NULL; } int32_t mnodeGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) { @@ -421,6 +440,11 @@ int32_t mnodeGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) { if (pDb == NULL) { return TSDB_CODE_MND_DB_NOT_SELECTED; } + + if (pDb->status != TSDB_DB_STATUS_READY) { + mError("db:%s, status:%d, in dropping", pDb->name, pDb->status); + return TSDB_CODE_MND_DB_IN_DROPPING; + } int32_t cols = 0; SSchema *pSchema = pMeta->schema; @@ -509,6 +533,11 @@ int32_t mnodeRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, void *pC SDbObj *pDb = mnodeGetDb(pShow->db); if (pDb == NULL) return 0; + + if (pDb->status != TSDB_DB_STATUS_READY) { + mError("db:%s, status:%d, in dropping", pDb->name, pDb->status); + return 0; + } pVgroup = pDb->pHead; while (pVgroup != NULL) { @@ -683,9 +712,9 @@ static void mnodeProcessCreateVnodeRsp(SRpcMsg *rpcMsg) { if (rpcMsg->ahandle == NULL) return; SMnodeMsg *mnodeMsg = rpcMsg->ahandle; - mnodeMsg->received++; + atomic_add_fetch_8(&mnodeMsg->received, 1); if (rpcMsg->code == TSDB_CODE_SUCCESS) { - mnodeMsg->successed++; + atomic_add_fetch_8(&mnodeMsg->successed, 1); } else { mnodeMsg->code = rpcMsg->code; } diff --git a/src/query/inc/qast.h b/src/query/inc/qast.h index 410b2ac9d2..918604f8c9 100644 --- a/src/query/inc/qast.h +++ b/src/query/inc/qast.h @@ -16,16 +16,16 @@ #ifndef TDENGINE_TAST_H #define TDENGINE_TAST_H -#include #ifdef __cplusplus extern "C" { #endif -#include #include "os.h" #include "taosmsg.h" #include "taosdef.h" +#include "tskiplist.h" +#include "tbuffer.h" #include "tvariant.h" struct tExprNode; @@ -75,10 +75,6 @@ typedef struct tExprNode { }; } tExprNode; -void tSQLBinaryExprFromString(tExprNode **pExpr, SSchema *pSchema, int32_t numOfCols, char *src, int32_t len); - -void tSQLBinaryExprToString(tExprNode *pExpr, char *dst, int32_t *len); - void tExprTreeDestroy(tExprNode **pExprs, void (*fp)(void*)); void tExprTreeTraverse(tExprNode *pExpr, SSkipList *pSkipList, SArray *result, SExprTraverseSupp *param); @@ -86,12 +82,9 @@ void tExprTreeTraverse(tExprNode *pExpr, SSkipList *pSkipList, SArray *result, S void tExprTreeCalcTraverse(tExprNode *pExprs, int32_t numOfRows, char *pOutput, void *param, int32_t order, char *(*cb)(void *, const char*, int32_t)); -// todo refactor: remove it -void tSQLBinaryExprTrv(tExprNode *pExprs, SArray* res); - uint8_t getBinaryExprOptr(SSQLToken *pToken); -void tExprNodeDestroy(tExprNode *pNode, void (*fp)(void *)); +void tExprNodeDestroy(tExprNode *pNode, void (*fp)(void *)); void exprTreeToBinary(SBufferWriter* bw, tExprNode* pExprTree); tExprNode* exprTreeFromBinary(const void* data, size_t size); diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index bd8c9951e1..3d7f6f95a0 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -18,18 +18,18 @@ #include "qfill.h" #include "taosmsg.h" +#include "exception.h" #include "hash.h" #include "qExecutor.h" #include "qUtil.h" -#include "qast.h" #include "qresultBuf.h" #include "query.h" #include "queryLog.h" +#include "qast.h" +#include "tfile.h" #include "tlosertree.h" -#include "exception.h" #include "tscompression.h" #include "ttime.h" -#include "tfile.h" /** * check if the primary column is load by default, otherwise, the program will @@ -49,6 +49,8 @@ #define GET_COL_DATA_POS(query, index, step) ((query)->pos + (index) * (step)) #define SWITCH_ORDER(n) (((n) = ((n) == TSDB_ORDER_ASC) ? TSDB_ORDER_DESC : TSDB_ORDER_ASC)) +#define SDATA_BLOCK_INITIALIZER (SDataBlockInfo) {{0}, 0} + /* get the qinfo struct address from the query struct address */ #define GET_COLUMN_BYTES(query, colidx) \ ((query)->colList[(query)->pSelectExpr[colidx].base.colInfo.colIndex].bytes) @@ -120,7 +122,6 @@ static UNUSED_FUNC void* u_calloc(size_t num, size_t __size) { static void setQueryStatus(SQuery *pQuery, int8_t status); #define QUERY_IS_INTERVAL_QUERY(_q) ((_q)->intervalTime > 0) -static bool isIntervalQuery(SQuery *pQuery) { return pQuery->intervalTime > 0; } // todo move to utility static int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *group); @@ -397,34 +398,38 @@ static bool hasNullValue(SQuery *pQuery, int32_t col, int32_t numOfCols, SDataSt } static SWindowResult *doSetTimeWindowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SWindowResInfo *pWindowResInfo, char *pData, - int16_t bytes) { + int16_t bytes, bool masterscan) { SQuery *pQuery = pRuntimeEnv->pQuery; int32_t *p1 = (int32_t *) taosHashGet(pWindowResInfo->hashList, pData, bytes); if (p1 != NULL) { pWindowResInfo->curIndex = *p1; - } else { // more than the capacity, reallocate the resources - if (pWindowResInfo->size >= pWindowResInfo->capacity) { - int64_t newCap = pWindowResInfo->capacity * 2; + } else { + if (masterscan) { // more than the capacity, reallocate the resources + if (pWindowResInfo->size >= pWindowResInfo->capacity) { + int64_t newCap = pWindowResInfo->capacity * 2; - char *t = realloc(pWindowResInfo->pResult, newCap * sizeof(SWindowResult)); - if (t != NULL) { - pWindowResInfo->pResult = (SWindowResult *)t; - memset(&pWindowResInfo->pResult[pWindowResInfo->capacity], 0, sizeof(SWindowResult) * pWindowResInfo->capacity); - } else { - // todo + char *t = realloc(pWindowResInfo->pResult, newCap * sizeof(SWindowResult)); + if (t != NULL) { + pWindowResInfo->pResult = (SWindowResult *)t; + memset(&pWindowResInfo->pResult[pWindowResInfo->capacity], 0, sizeof(SWindowResult) * pWindowResInfo->capacity); + } else { + // todo + } + + for (int32_t i = pWindowResInfo->capacity; i < newCap; ++i) { + SPosInfo pos = {-1, -1}; + createQueryResultInfo(pQuery, &pWindowResInfo->pResult[i], pRuntimeEnv->stableQuery, &pos); + } + pWindowResInfo->capacity = newCap; } - for (int32_t i = pWindowResInfo->capacity; i < newCap; ++i) { - SPosInfo pos = {-1, -1}; - createQueryResultInfo(pQuery, &pWindowResInfo->pResult[i], pRuntimeEnv->stableQuery, &pos); - } - pWindowResInfo->capacity = newCap; + // add a new result set for a new group + pWindowResInfo->curIndex = pWindowResInfo->size++; + taosHashPut(pWindowResInfo->hashList, pData, bytes, (char *)&pWindowResInfo->curIndex, sizeof(int32_t)); + } else { + return NULL; } - - // add a new result set for a new group - pWindowResInfo->curIndex = pWindowResInfo->size++; - taosHashPut(pWindowResInfo->hashList, pData, bytes, (char *)&pWindowResInfo->curIndex, sizeof(int32_t)); } return getWindowResult(pWindowResInfo, pWindowResInfo->curIndex); @@ -511,15 +516,19 @@ static int32_t addNewWindowResultBuf(SWindowResult *pWindowRes, SDiskbasedResult } static int32_t setWindowOutputBufByKey(SQueryRuntimeEnv *pRuntimeEnv, SWindowResInfo *pWindowResInfo, int32_t sid, - STimeWindow *win) { + STimeWindow *win, bool masterscan, bool* newWind) { assert(win->skey <= win->ekey); SDiskbasedResultBuf *pResultBuf = pRuntimeEnv->pResultBuf; - SWindowResult *pWindowRes = doSetTimeWindowFromKey(pRuntimeEnv, pWindowResInfo, (char *)&win->skey, TSDB_KEYSIZE); + SWindowResult *pWindowRes = doSetTimeWindowFromKey(pRuntimeEnv, pWindowResInfo, (char *)&win->skey, + TSDB_KEYSIZE, masterscan); if (pWindowRes == NULL) { - return -1; + *newWind = false; + + return masterscan? -1:0; } + *newWind = true; // not assign result buffer yet, add new result buffer if (pWindowRes->pos.pageId == -1) { int32_t ret = addNewWindowResultBuf(pWindowRes, pResultBuf, sid, pRuntimeEnv->numOfRowsPerPage); @@ -563,7 +572,7 @@ static int32_t getForwardStepsInBlock(int32_t numOfRows, __block_search_fn_t sea */ static int32_t doCheckQueryCompleted(SQueryRuntimeEnv *pRuntimeEnv, TSKEY lastKey, SWindowResInfo *pWindowResInfo) { SQuery *pQuery = pRuntimeEnv->pQuery; - if (pRuntimeEnv->scanFlag != MASTER_SCAN || (!isIntervalQuery(pQuery))) { + if (pRuntimeEnv->scanFlag != MASTER_SCAN || (!QUERY_IS_INTERVAL_QUERY(pQuery))) { return pWindowResInfo->size; } @@ -686,24 +695,26 @@ static void doBlockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SWindowStat SQuery * pQuery = pRuntimeEnv->pQuery; SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx; - for (int32_t k = 0; k < pQuery->numOfOutput; ++k) { - int32_t functionId = pQuery->pSelectExpr[k].base.functionId; + if (IS_MASTER_SCAN(pRuntimeEnv) || pStatus->closed) { + for (int32_t k = 0; k < pQuery->numOfOutput; ++k) { + int32_t functionId = pQuery->pSelectExpr[k].base.functionId; - pCtx[k].nStartQueryTimestamp = pWin->skey; - pCtx[k].size = forwardStep; - pCtx[k].startOffset = (QUERY_IS_ASC_QUERY(pQuery)) ? offset : offset - (forwardStep - 1); + pCtx[k].nStartQueryTimestamp = pWin->skey; + pCtx[k].size = forwardStep; + pCtx[k].startOffset = (QUERY_IS_ASC_QUERY(pQuery)) ? offset : offset - (forwardStep - 1); - if ((aAggs[functionId].nStatus & TSDB_FUNCSTATE_SELECTIVITY) != 0) { - pCtx[k].ptsList = &tsBuf[offset]; - } + if ((aAggs[functionId].nStatus & TSDB_FUNCSTATE_SELECTIVITY) != 0) { + pCtx[k].ptsList = &tsBuf[offset]; + } - // not a whole block involved in query processing, statistics data can not be used - if (forwardStep != numOfTotal) { - pCtx[k].preAggVals.isSet = false; - } + // not a whole block involved in query processing, statistics data can not be used + if (forwardStep != numOfTotal) { + pCtx[k].preAggVals.isSet = false; + } - if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) { - aAggs[functionId].xFunction(&pCtx[k]); + if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) { + aAggs[functionId].xFunction(&pCtx[k]); + } } } } @@ -713,12 +724,14 @@ static void doRowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SWindowStatus SQuery * pQuery = pRuntimeEnv->pQuery; SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx; - for (int32_t k = 0; k < pQuery->numOfOutput; ++k) { - pCtx[k].nStartQueryTimestamp = pWin->skey; + if (IS_MASTER_SCAN(pRuntimeEnv) || pStatus->closed) { + for (int32_t k = 0; k < pQuery->numOfOutput; ++k) { + pCtx[k].nStartQueryTimestamp = pWin->skey; - int32_t functionId = pQuery->pSelectExpr[k].base.functionId; - if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) { - aAggs[functionId].xFunctionF(&pCtx[k], offset); + int32_t functionId = pQuery->pSelectExpr[k].base.functionId; + if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) { + aAggs[functionId].xFunctionF(&pCtx[k], offset); + } } } } @@ -880,7 +893,8 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis * SDataBlockInfo *pDataBlockInfo, SWindowResInfo *pWindowResInfo, __block_search_fn_t searchFn, SArray *pDataBlock) { SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx; - + bool masterScan = IS_MASTER_SCAN(pRuntimeEnv); + SQuery *pQuery = pRuntimeEnv->pQuery; TSKEY *tsCols = NULL; if (pDataBlock != NULL) { @@ -903,18 +917,21 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis * int32_t offset = GET_COL_DATA_POS(pQuery, 0, step); TSKEY ts = tsCols[offset]; + bool hasTimeWindow = false; STimeWindow win = getActiveTimeWindow(pWindowResInfo, ts, pQuery); - if (setWindowOutputBufByKey(pRuntimeEnv, pWindowResInfo, pDataBlockInfo->tid, &win) != TSDB_CODE_SUCCESS) { + if (setWindowOutputBufByKey(pRuntimeEnv, pWindowResInfo, pDataBlockInfo->tid, &win, masterScan, &hasTimeWindow) != TSDB_CODE_SUCCESS) { tfree(sasArray); return; } - TSKEY ekey = reviseWindowEkey(pQuery, &win); - int32_t forwardStep = - getNumOfRowsInTimeWindow(pQuery, pDataBlockInfo, tsCols, pQuery->pos, ekey, searchFn, true); + if (hasTimeWindow) { + TSKEY ekey = reviseWindowEkey(pQuery, &win); + int32_t forwardStep = + getNumOfRowsInTimeWindow(pQuery, pDataBlockInfo, tsCols, pQuery->pos, ekey, searchFn, true); - SWindowStatus *pStatus = getTimeWindowResStatus(pWindowResInfo, curTimeWindow(pWindowResInfo)); - doBlockwiseApplyFunctions(pRuntimeEnv, pStatus, &win, pQuery->pos, forwardStep, tsCols, pDataBlockInfo->rows); + SWindowStatus *pStatus = getTimeWindowResStatus(pWindowResInfo, curTimeWindow(pWindowResInfo)); + doBlockwiseApplyFunctions(pRuntimeEnv, pStatus, &win, pQuery->pos, forwardStep, tsCols, pDataBlockInfo->rows); + } int32_t index = pWindowResInfo->curIndex; STimeWindow nextWin = win; @@ -926,14 +943,19 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis * } // null data, failed to allocate more memory buffer - if (setWindowOutputBufByKey(pRuntimeEnv, pWindowResInfo, pDataBlockInfo->tid, &nextWin) != TSDB_CODE_SUCCESS) { + hasTimeWindow = false; + if (setWindowOutputBufByKey(pRuntimeEnv, pWindowResInfo, pDataBlockInfo->tid, &nextWin, masterScan, &hasTimeWindow) != TSDB_CODE_SUCCESS) { break; } - ekey = reviseWindowEkey(pQuery, &nextWin); - forwardStep = getNumOfRowsInTimeWindow(pQuery, pDataBlockInfo, tsCols, startPos, ekey, searchFn, true); + if (!hasTimeWindow) { + continue; + } - pStatus = getTimeWindowResStatus(pWindowResInfo, curTimeWindow(pWindowResInfo)); + TSKEY ekey = reviseWindowEkey(pQuery, &nextWin); + int32_t forwardStep = getNumOfRowsInTimeWindow(pQuery, pDataBlockInfo, tsCols, startPos, ekey, searchFn, true); + + SWindowStatus* pStatus = getTimeWindowResStatus(pWindowResInfo, curTimeWindow(pWindowResInfo)); doBlockwiseApplyFunctions(pRuntimeEnv, pStatus, &nextWin, startPos, forwardStep, tsCols, pDataBlockInfo->rows); } @@ -983,7 +1005,7 @@ static int32_t setGroupResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, char *pDat } // assert(pRuntimeEnv->windowResInfo.hashList->size <= 2); - SWindowResult *pWindowRes = doSetTimeWindowFromKey(pRuntimeEnv, &pRuntimeEnv->windowResInfo, pData, bytes); + SWindowResult *pWindowRes = doSetTimeWindowFromKey(pRuntimeEnv, &pRuntimeEnv->windowResInfo, pData, bytes, true); if (pWindowRes == NULL) { return -1; } @@ -1113,6 +1135,7 @@ static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pStatis, SDataBlockInfo *pDataBlockInfo, SWindowResInfo *pWindowResInfo, SArray *pDataBlock) { SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx; + bool masterScan = IS_MASTER_SCAN(pRuntimeEnv); SQuery *pQuery = pRuntimeEnv->pQuery; STableQueryInfo* item = pQuery->current; @@ -1179,16 +1202,21 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS } // interval window query - if (isIntervalQuery(pQuery)) { + if (QUERY_IS_INTERVAL_QUERY(pQuery)) { // decide the time window according to the primary timestamp int64_t ts = tsCols[offset]; STimeWindow win = getActiveTimeWindow(pWindowResInfo, ts, pQuery); - int32_t ret = setWindowOutputBufByKey(pRuntimeEnv, pWindowResInfo, pDataBlockInfo->tid, &win); + bool hasTimeWindow = false; + int32_t ret = setWindowOutputBufByKey(pRuntimeEnv, pWindowResInfo, pDataBlockInfo->tid, &win, masterScan, &hasTimeWindow); if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code continue; } + if (!hasTimeWindow) { + continue; + } + SWindowStatus *pStatus = getTimeWindowResStatus(pWindowResInfo, curTimeWindow(pWindowResInfo)); doRowwiseApplyFunctions(pRuntimeEnv, pStatus, &win, offset); @@ -1208,12 +1236,15 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS } // null data, failed to allocate more memory buffer - if (setWindowOutputBufByKey(pRuntimeEnv, pWindowResInfo, pDataBlockInfo->tid, &nextWin) != TSDB_CODE_SUCCESS) { + bool hasTimeWindow = false; + if (setWindowOutputBufByKey(pRuntimeEnv, pWindowResInfo, pDataBlockInfo->tid, &nextWin, masterScan, &hasTimeWindow) != TSDB_CODE_SUCCESS) { break; } - pStatus = getTimeWindowResStatus(pWindowResInfo, curTimeWindow(pWindowResInfo)); - doRowwiseApplyFunctions(pRuntimeEnv, pStatus, &nextWin, offset); + if (hasTimeWindow) { + pStatus = getTimeWindowResStatus(pWindowResInfo, curTimeWindow(pWindowResInfo)); + doRowwiseApplyFunctions(pRuntimeEnv, pStatus, &nextWin, offset); + } } pWindowResInfo->curIndex = index; @@ -1283,7 +1314,7 @@ static int32_t tableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBl // interval query with limit applied int32_t numOfRes = 0; - if (isIntervalQuery(pQuery)) { + if (QUERY_IS_INTERVAL_QUERY(pQuery)) { numOfRes = doCheckQueryCompleted(pRuntimeEnv, lastKey, pWindowResInfo); } else { numOfRes = getNumOfResult(pRuntimeEnv); @@ -1679,28 +1710,21 @@ static bool onlyQueryTags(SQuery* pQuery) { ///////////////////////////////////////////////////////////////////////////////////////////// -void getAlignQueryTimeWindow(SQuery *pQuery, int64_t key, int64_t keyFirst, int64_t keyLast, STimeWindow *realWin, STimeWindow *win) { +void getAlignQueryTimeWindow(SQuery *pQuery, int64_t key, int64_t keyFirst, int64_t keyLast, STimeWindow *win) { assert(key >= keyFirst && key <= keyLast && pQuery->slidingTime <= pQuery->intervalTime); - win->skey = taosGetIntervalStartTimestamp(key, pQuery->slidingTime, pQuery->intervalTime, pQuery->slidingTimeUnit, pQuery->precision); + + /* + * if the realSkey > INT64_MAX - pQuery->intervalTime, the query duration between + * realSkey and realEkey must be less than one interval.Therefore, no need to adjust the query ranges. + */ if (keyFirst > (INT64_MAX - pQuery->intervalTime)) { - /* - * if the realSkey > INT64_MAX - pQuery->intervalTime, the query duration between - * realSkey and realEkey must be less than one interval.Therefore, no need to adjust the query ranges. - */ assert(keyLast - keyFirst < pQuery->intervalTime); - - realWin->skey = keyFirst; - realWin->ekey = keyLast; - win->ekey = INT64_MAX; return; + } else { + win->ekey = win->skey + pQuery->intervalTime - 1; } - - win->ekey = win->skey + pQuery->intervalTime - 1; - - realWin->skey = (win->skey < keyFirst)? keyFirst : win->skey; - realWin->ekey = (win->ekey < keyLast) ? win->ekey : keyLast; } static void setScanLimitationByResultBuffer(SQuery *pQuery) { @@ -1869,7 +1893,7 @@ static int32_t getInitialPageNum(SQInfo *pQInfo) { if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) { num = 128; - } else if (isIntervalQuery(pQuery)) { // time window query, allocate one page for each table + } else if (QUERY_IS_INTERVAL_QUERY(pQuery)) { // time window query, allocate one page for each table size_t s = pQInfo->tableqinfoGroupInfo.numOfTables; num = MAX(s, INITIAL_RESULT_ROWS_VALUE); } else { // for super table query, one page for each subset @@ -2019,7 +2043,7 @@ SArray *loadDataBlockOnDemand(SQueryRuntimeEnv *pRuntimeEnv, void* pQueryHandle, r |= aAggs[functionId].dataReqFunc(&pRuntimeEnv->pCtx[i], pQuery->window.skey, pQuery->window.ekey, colId); } - if (pRuntimeEnv->pTSBuf > 0 || isIntervalQuery(pQuery)) { + if (pRuntimeEnv->pTSBuf > 0 || QUERY_IS_INTERVAL_QUERY(pQuery)) { r |= BLK_DATA_ALL_NEEDED; } } @@ -2173,6 +2197,7 @@ static void ensureOutputBuffer(SQueryRuntimeEnv* pRuntimeEnv, SDataBlockInfo* pB if (tmp == NULL) { // todo handle the oom assert(0); } else { + memset(tmp + sizeof(tFilePage) + bytes * pRec->rows, 0, (newSize - pRec->rows) * bytes); pQuery->sdata[i] = (tFilePage *)tmp; } @@ -2203,34 +2228,32 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { pQuery->order.order); TsdbQueryHandleT pQueryHandle = IS_MASTER_SCAN(pRuntimeEnv)? pRuntimeEnv->pQueryHandle : pRuntimeEnv->pSecQueryHandle; + + SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER; while (tsdbNextDataBlock(pQueryHandle)) { summary->totalBlocks += 1; if (isQueryKilled(GET_QINFO_ADDR(pRuntimeEnv))) { return 0; } - SDataBlockInfo blockInfo = tsdbRetrieveDataBlockInfo(pQueryHandle); + tsdbRetrieveDataBlockInfo(pQueryHandle, &blockInfo); // todo extract methods if (QUERY_IS_INTERVAL_QUERY(pQuery) && pRuntimeEnv->windowResInfo.prevSKey == TSKEY_INITIAL_VAL) { - STimeWindow realWin = TSWINDOW_INITIALIZER, w = TSWINDOW_INITIALIZER; + STimeWindow w = TSWINDOW_INITIALIZER; SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo; if (QUERY_IS_ASC_QUERY(pQuery)) { - getAlignQueryTimeWindow(pQuery, blockInfo.window.skey, blockInfo.window.skey, pQuery->window.ekey, &realWin, &w); + getAlignQueryTimeWindow(pQuery, blockInfo.window.skey, blockInfo.window.skey, pQuery->window.ekey, &w); pWindowResInfo->startTime = w.skey; pWindowResInfo->prevSKey = w.skey; } else { // the start position of the first time window in the endpoint that spreads beyond the queried last timestamp - getAlignQueryTimeWindow(pQuery, blockInfo.window.ekey, pQuery->window.ekey, blockInfo.window.ekey, &realWin, &w); + getAlignQueryTimeWindow(pQuery, blockInfo.window.ekey, pQuery->window.ekey, blockInfo.window.ekey, &w); pWindowResInfo->startTime = pQuery->window.skey; pWindowResInfo->prevSKey = w.skey; } - - if (pRuntimeEnv->pFillInfo != NULL) { - pRuntimeEnv->pFillInfo->start = w.skey; - } } // in case of prj/diff query, ensure the output buffer is sufficient to accommodate the results of current block @@ -2260,10 +2283,10 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { if (QUERY_IS_INTERVAL_QUERY(pQuery) && IS_MASTER_SCAN(pRuntimeEnv)) { if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) { - int32_t step = QUERY_IS_ASC_QUERY(pQuery) ? QUERY_ASC_FORWARD_STEP : QUERY_DESC_FORWARD_STEP; +// int32_t step = QUERY_IS_ASC_QUERY(pQuery) ? QUERY_ASC_FORWARD_STEP : QUERY_DESC_FORWARD_STEP; closeAllTimeWindow(&pRuntimeEnv->windowResInfo); - removeRedundantWindow(&pRuntimeEnv->windowResInfo, pTableQueryInfo->lastKey - step, step); +// removeRedundantWindow(&pRuntimeEnv->windowResInfo, pTableQueryInfo->lastKey - step, step); pRuntimeEnv->windowResInfo.curIndex = pRuntimeEnv->windowResInfo.size - 1; // point to the last time window } else { assert(Q_STATUS_EQUAL(pQuery->status, QUERY_RESBUF_FULL)); @@ -2880,6 +2903,9 @@ static void updateTableQueryInfoForReverseScan(SQuery *pQuery, STableQueryInfo * SWITCH_ORDER(pTableQueryInfo->cur.order); pTableQueryInfo->cur.vgroupIndex = -1; + + // set the index at the end of time window + pTableQueryInfo->windowResInfo.curIndex = pTableQueryInfo->windowResInfo.size - 1; } static void disableFuncInReverseScanImpl(SQInfo* pQInfo, SWindowResInfo *pWindowResInfo, int32_t order) { @@ -2914,7 +2940,7 @@ void disableFuncInReverseScan(SQInfo *pQInfo) { // group by normal columns and interval query on normal table SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo; - if (pRuntimeEnv->groupbyNormalCol || isIntervalQuery(pQuery)) { + if (pRuntimeEnv->groupbyNormalCol || QUERY_IS_INTERVAL_QUERY(pQuery)) { disableFuncInReverseScanImpl(pQInfo, pWindowResInfo, order); } else { // for simple result of table query, for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { // todo refactor @@ -3089,7 +3115,7 @@ bool needScanDataBlocksAgain(SQueryRuntimeEnv *pRuntimeEnv) { SQuery *pQuery = pRuntimeEnv->pQuery; bool toContinue = false; - if (pRuntimeEnv->groupbyNormalCol || isIntervalQuery(pQuery)) { + if (pRuntimeEnv->groupbyNormalCol || QUERY_IS_INTERVAL_QUERY(pQuery)) { // for each group result, call the finalize function for each column SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo; @@ -3281,7 +3307,7 @@ void scanOneTableDataBlocks(SQueryRuntimeEnv *pRuntimeEnv, TSKEY start) { void finalizeQueryResult(SQueryRuntimeEnv *pRuntimeEnv) { SQuery *pQuery = pRuntimeEnv->pQuery; - if (pRuntimeEnv->groupbyNormalCol || isIntervalQuery(pQuery)) { + if (pRuntimeEnv->groupbyNormalCol || QUERY_IS_INTERVAL_QUERY(pQuery)) { // for each group result, call the finalize function for each column SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo; if (pRuntimeEnv->groupbyNormalCol) { @@ -3327,9 +3353,9 @@ static bool hasMainOutput(SQuery *pQuery) { } static STableQueryInfo *createTableQueryInfo(SQueryRuntimeEnv *pRuntimeEnv, void* pTable, STimeWindow win, void* buf) { - SQuery* pQuery = pRuntimeEnv->pQuery; + SQuery *pQuery = pRuntimeEnv->pQuery; - STableQueryInfo *pTableQueryInfo = buf;//calloc(1, sizeof(STableQueryInfo)); + STableQueryInfo *pTableQueryInfo = buf; pTableQueryInfo->win = win; pTableQueryInfo->lastKey = win.skey; @@ -3337,16 +3363,14 @@ static STableQueryInfo *createTableQueryInfo(SQueryRuntimeEnv *pRuntimeEnv, void pTableQueryInfo->pTable = pTable; pTableQueryInfo->cur.vgroupIndex = -1; - int32_t initialSize = 1; - int32_t initialThreshold = 1; - // set more initial size of interval/groupby query if (QUERY_IS_INTERVAL_QUERY(pQuery) || pRuntimeEnv->groupbyNormalCol) { - initialSize = 20; - initialThreshold = 100; + int32_t initialSize = 20; + int32_t initialThreshold = 100; + initWindowResInfo(&pTableQueryInfo->windowResInfo, pRuntimeEnv, initialSize, initialThreshold, TSDB_DATA_TYPE_INT); + } else { // in other aggregate query, do not initialize the windowResInfo } - initWindowResInfo(&pTableQueryInfo->windowResInfo, pRuntimeEnv, initialSize, initialThreshold, TSDB_DATA_TYPE_INT); return pTableQueryInfo; } @@ -3388,7 +3412,8 @@ void setExecutionContext(SQInfo *pQInfo, int32_t groupIndex, TSKEY nextKey) { } int32_t GROUPRESULTID = 1; - SWindowResult *pWindowRes = doSetTimeWindowFromKey(pRuntimeEnv, pWindowResInfo, (char *)&groupIndex, sizeof(groupIndex)); + SWindowResult *pWindowRes = doSetTimeWindowFromKey(pRuntimeEnv, pWindowResInfo, (char *)&groupIndex, + sizeof(groupIndex), true); if (pWindowRes == NULL) { return; } @@ -3521,12 +3546,12 @@ void setIntervalQueryRange(SQInfo *pQInfo, TSKEY key) { * In ascending query, key is the first qualified timestamp. However, in the descending order query, additional * operations involve. */ - STimeWindow w = TSWINDOW_INITIALIZER, realWin = TSWINDOW_INITIALIZER; + STimeWindow w = TSWINDOW_INITIALIZER; SWindowResInfo *pWindowResInfo = &pTableQueryInfo->windowResInfo; TSKEY sk = MIN(win.skey, win.ekey); TSKEY ek = MAX(win.skey, win.ekey); - getAlignQueryTimeWindow(pQuery, win.skey, sk, ek, &realWin, &w); + getAlignQueryTimeWindow(pQuery, win.skey, sk, ek, &w); pWindowResInfo->startTime = pTableQueryInfo->win.skey; // windowSKey may be 0 in case of 1970 timestamp if (pWindowResInfo->prevSKey == TSKEY_INITIAL_VAL) { @@ -3570,7 +3595,7 @@ static int32_t getNumOfSubset(SQInfo *pQInfo) { SQuery *pQuery = pQInfo->runtimeEnv.pQuery; int32_t totalSubset = 0; - if (pQInfo->runtimeEnv.groupbyNormalCol || (isIntervalQuery(pQuery))) { + if (pQInfo->runtimeEnv.groupbyNormalCol || (QUERY_IS_INTERVAL_QUERY(pQuery))) { totalSubset = numOfClosedTimeWindow(&pQInfo->runtimeEnv.windowResInfo); } else { totalSubset = GET_NUM_OF_TABLEGROUP(pQInfo); @@ -3735,7 +3760,7 @@ bool queryHasRemainResults(SQueryRuntimeEnv* pRuntimeEnv) { } else { // there are results waiting for returned to client. if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED) && - (pRuntimeEnv->groupbyNormalCol || isIntervalQuery(pQuery)) && + (pRuntimeEnv->groupbyNormalCol || QUERY_IS_INTERVAL_QUERY(pQuery)) && (pRuntimeEnv->windowResInfo.size > 0)) { return true; } @@ -3858,7 +3883,6 @@ static void queryCostStatis(SQInfo *pQInfo) { // double total = pSummary->fileTimeUs + pSummary->cacheTimeUs; // double io = pSummary->loadCompInfoUs + pSummary->loadBlocksUs + pSummary->loadFieldUs; - // todo add the intermediate result save cost!! // double computing = total - io; // // qDebug( @@ -3918,12 +3942,13 @@ void skipBlocks(SQueryRuntimeEnv *pRuntimeEnv) { STableQueryInfo* pTableQueryInfo = pQuery->current; TsdbQueryHandleT pQueryHandle = pRuntimeEnv->pQueryHandle; + SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER; while (tsdbNextDataBlock(pQueryHandle)) { if (isQueryKilled(GET_QINFO_ADDR(pRuntimeEnv))) { return; } - SDataBlockInfo blockInfo = tsdbRetrieveDataBlockInfo(pQueryHandle); + tsdbRetrieveDataBlockInfo(pQueryHandle, &blockInfo); if (pQuery->limit.offset > blockInfo.rows) { pQuery->limit.offset -= blockInfo.rows; @@ -3955,22 +3980,23 @@ static bool skipTimeInterval(SQueryRuntimeEnv *pRuntimeEnv, TSKEY* start) { */ assert(pRuntimeEnv->windowResInfo.prevSKey == TSKEY_INITIAL_VAL); - STimeWindow w = TSWINDOW_INITIALIZER, realWin = TSWINDOW_INITIALIZER; + STimeWindow w = TSWINDOW_INITIALIZER; SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo; STableQueryInfo *pTableQueryInfo = pQuery->current; + SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER; while (tsdbNextDataBlock(pRuntimeEnv->pQueryHandle)) { - SDataBlockInfo blockInfo = tsdbRetrieveDataBlockInfo(pRuntimeEnv->pQueryHandle); + tsdbRetrieveDataBlockInfo(pRuntimeEnv->pQueryHandle, &blockInfo); if (QUERY_IS_ASC_QUERY(pQuery)) { if (pWindowResInfo->prevSKey == TSKEY_INITIAL_VAL) { - getAlignQueryTimeWindow(pQuery, blockInfo.window.skey, blockInfo.window.skey, pQuery->window.ekey, &realWin, &w); + getAlignQueryTimeWindow(pQuery, blockInfo.window.skey, blockInfo.window.skey, pQuery->window.ekey, &w); pWindowResInfo->startTime = w.skey; pWindowResInfo->prevSKey = w.skey; } } else { - getAlignQueryTimeWindow(pQuery, blockInfo.window.ekey, pQuery->window.ekey, blockInfo.window.ekey, &realWin, &w); + getAlignQueryTimeWindow(pQuery, blockInfo.window.ekey, pQuery->window.ekey, blockInfo.window.ekey, &w); pWindowResInfo->startTime = pQuery->window.skey; pWindowResInfo->prevSKey = w.skey; @@ -4067,7 +4093,7 @@ static void setupQueryHandle(void* tsdb, SQInfo* pQInfo, bool isSTableQuery) { return; } - if (isSTableQuery && (!isIntervalQuery(pQuery)) && (!isFixedOutputQuery(pQuery))) { + if (isSTableQuery && (!QUERY_IS_INTERVAL_QUERY(pQuery)) && (!isFixedOutputQuery(pQuery))) { return; } @@ -4081,7 +4107,7 @@ static void setupQueryHandle(void* tsdb, SQInfo* pQInfo, bool isSTableQuery) { if (!isSTableQuery && (pQInfo->tableqinfoGroupInfo.numOfTables == 1) && (cond.order == TSDB_ORDER_ASC) - && (!isIntervalQuery(pQuery)) + && (!QUERY_IS_INTERVAL_QUERY(pQuery)) && (!isGroupbyNormalCol(pQuery->pGroupbyExpr)) && (!isFixedOutputQuery(pQuery)) ) { @@ -4174,7 +4200,7 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo initWindowResInfo(&pRuntimeEnv->windowResInfo, pRuntimeEnv, 512, 4096, type); } - } else if (pRuntimeEnv->groupbyNormalCol || isIntervalQuery(pQuery)) { + } else if (pRuntimeEnv->groupbyNormalCol || QUERY_IS_INTERVAL_QUERY(pQuery)) { int32_t rows = getInitialPageNum(pQInfo); code = createDiskbasedResultBuffer(&pRuntimeEnv->pResultBuf, rows, pQuery->rowSize, pQInfo); if (code != TSDB_CODE_SUCCESS) { @@ -4193,7 +4219,13 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo if (pQuery->fillType != TSDB_FILL_NONE && !isPointInterpoQuery(pQuery)) { SFillColInfo* pColInfo = taosCreateFillColInfo(pQuery); - pRuntimeEnv->pFillInfo = taosInitFillInfo(pQuery->order.order, 0, 0, pQuery->rec.capacity, pQuery->numOfOutput, + STimeWindow w = TSWINDOW_INITIALIZER; + + TSKEY sk = MIN(pQuery->window.skey, pQuery->window.ekey); + TSKEY ek = MAX(pQuery->window.skey, pQuery->window.ekey); + getAlignQueryTimeWindow(pQuery, pQuery->window.skey, sk, ek, &w); + + pRuntimeEnv->pFillInfo = taosInitFillInfo(pQuery->order.order, w.skey, 0, pQuery->rec.capacity, pQuery->numOfOutput, pQuery->slidingTime, pQuery->slidingTimeUnit, pQuery->precision, pQuery->fillType, pColInfo); } @@ -4225,13 +4257,15 @@ static int64_t scanMultiTableDataBlocks(SQInfo *pQInfo) { int64_t st = taosGetTimestampMs(); TsdbQueryHandleT pQueryHandle = IS_MASTER_SCAN(pRuntimeEnv)? pRuntimeEnv->pQueryHandle : pRuntimeEnv->pSecQueryHandle; + SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER; + while (tsdbNextDataBlock(pQueryHandle)) { summary->totalBlocks += 1; if (isQueryKilled(pQInfo)) { break; } - SDataBlockInfo blockInfo = tsdbRetrieveDataBlockInfo(pQueryHandle); + tsdbRetrieveDataBlockInfo(pQueryHandle, &blockInfo); STableQueryInfo **pTableQueryInfo = (STableQueryInfo**) taosHashGet(pQInfo->tableqinfoGroupInfo.map, &blockInfo.tid, sizeof(blockInfo.tid)); if(pTableQueryInfo == NULL) { break; @@ -4244,7 +4278,7 @@ static int64_t scanMultiTableDataBlocks(SQInfo *pQInfo) { SArray *pDataBlock = loadDataBlockOnDemand(pRuntimeEnv, pQueryHandle, &blockInfo, &pStatis); if (!pRuntimeEnv->groupbyNormalCol) { - if (!isIntervalQuery(pQuery)) { + if (!QUERY_IS_INTERVAL_QUERY(pQuery)) { int32_t step = QUERY_IS_ASC_QUERY(pQuery)? 1:-1; setExecutionContext(pQInfo, (*pTableQueryInfo)->groupIndex, blockInfo.window.ekey + step); } else { // interval query @@ -4653,9 +4687,9 @@ static void doRestoreContext(SQInfo *pQInfo) { static void doCloseAllTimeWindowAfterScan(SQInfo *pQInfo) { SQuery *pQuery = pQInfo->runtimeEnv.pQuery; - int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); +// int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); - if (isIntervalQuery(pQuery)) { + if (QUERY_IS_INTERVAL_QUERY(pQuery)) { size_t numOfGroup = GET_NUM_OF_TABLEGROUP(pQInfo); for (int32_t i = 0; i < numOfGroup; ++i) { SArray *group = GET_TABLEGROUP(pQInfo, i); @@ -4664,7 +4698,7 @@ static void doCloseAllTimeWindowAfterScan(SQInfo *pQInfo) { for (int32_t j = 0; j < num; ++j) { STableQueryInfo* item = taosArrayGetP(group, j); closeAllTimeWindow(&item->windowResInfo); - removeRedundantWindow(&item->windowResInfo, item->lastKey - step, step); +// removeRedundantWindow(&item->windowResInfo, item->lastKey - step, step); } } } else { // close results for group result @@ -4681,7 +4715,7 @@ static void multiTableQueryProcess(SQInfo *pQInfo) { * if the groupIndex > 0, the query process must be completed yet, we only need to * copy the data into output buffer */ - if (isIntervalQuery(pQuery)) { + if (QUERY_IS_INTERVAL_QUERY(pQuery)) { copyResToQueryResultBuf(pQInfo, pQuery); #ifdef _DEBUG_VIEW displayInterResult(pQuery->sdata, pRuntimeEnv, pQuery->sdata[0]->num); @@ -4716,7 +4750,7 @@ static void multiTableQueryProcess(SQInfo *pQInfo) { el = scanMultiTableDataBlocks(pQInfo); qDebug("QInfo:%p reversed scan completed, elapsed time: %" PRId64 "ms", pQInfo, el); - doCloseAllTimeWindowAfterScan(pQInfo); +// doCloseAllTimeWindowAfterScan(pQInfo); doRestoreContext(pQInfo); } else { qDebug("QInfo:%p no need to do reversed scan, query completed", pQInfo); @@ -4890,7 +4924,7 @@ static void tableIntervalProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) { while (1) { tableIntervalProcessImpl(pRuntimeEnv, newStartKey); - if (isIntervalQuery(pQuery)) { + if (QUERY_IS_INTERVAL_QUERY(pQuery)) { pQInfo->groupIndex = 0; // always start from 0 pQuery->rec.rows = 0; copyFromWindowResToSData(pQInfo, pRuntimeEnv->windowResInfo.pResult); @@ -5788,7 +5822,7 @@ static int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQ qDebug("QInfo:%p no result in time range %" PRId64 "-%" PRId64 ", order %d", pQInfo, pQuery->window.skey, pQuery->window.ekey, pQuery->order.order); setQueryStatus(pQuery, QUERY_COMPLETED); - + pQInfo->tableqinfoGroupInfo.numOfTables = 0; sem_post(&pQInfo->dataReady); return TSDB_CODE_SUCCESS; } @@ -6460,7 +6494,7 @@ void qSetQueryMgmtClosed(void* pQMgmt) { pQueryMgmt->closed = true; pthread_mutex_unlock(&pQueryMgmt->lock); - taosCacheEmpty(pQueryMgmt->qinfoPool, true); + taosCacheRefresh(pQueryMgmt->qinfoPool, freeqinfoFn); } void qCleanupQueryMgmt(void* pQMgmt) { @@ -6483,11 +6517,13 @@ void qCleanupQueryMgmt(void* pQMgmt) { qDebug("vgId:%d querymgmt cleanup completed", vgId); } -void** qRegisterQInfo(void* pMgmt, void* qInfo) { +void** qRegisterQInfo(void* pMgmt, uint64_t qInfo) { if (pMgmt == NULL) { return NULL; } + const int32_t DEFAULT_QHANDLE_LIFE_SPAN = tsShellActivityTimer * 2; + SQueryMgmt *pQueryMgmt = pMgmt; if (pQueryMgmt->qinfoPool == NULL) { return NULL; @@ -6499,21 +6535,23 @@ void** qRegisterQInfo(void* pMgmt, void* qInfo) { return NULL; } else { - void** handle = taosCachePut(pQueryMgmt->qinfoPool, qInfo, POINTER_BYTES, &qInfo, POINTER_BYTES, tsShellActivityTimer*2); + uint64_t handleVal = (uint64_t) qInfo; + + void** handle = taosCachePut(pQueryMgmt->qinfoPool, &handleVal, sizeof(int64_t), &qInfo, POINTER_BYTES, DEFAULT_QHANDLE_LIFE_SPAN); pthread_mutex_unlock(&pQueryMgmt->lock); return handle; } } -void** qAcquireQInfo(void* pMgmt, void** key) { +void** qAcquireQInfo(void* pMgmt, uint64_t key) { SQueryMgmt *pQueryMgmt = pMgmt; if (pQueryMgmt->qinfoPool == NULL || pQueryMgmt->closed) { return NULL; } - void** handle = taosCacheAcquireByKey(pQueryMgmt->qinfoPool, key, POINTER_BYTES); + void** handle = taosCacheAcquireByKey(pQueryMgmt->qinfoPool, &key, sizeof(uint64_t)); if (handle == NULL || *handle == NULL) { return NULL; } else { diff --git a/src/query/src/qUtil.c b/src/query/src/qUtil.c index 3b2b2bfff0..03574388b7 100644 --- a/src/query/src/qUtil.c +++ b/src/query/src/qUtil.c @@ -190,8 +190,7 @@ void removeRedundantWindow(SWindowResInfo *pWindowResInfo, TSKEY lastKey, int32_ } // get the result order - int32_t resultOrder = (pWindowResInfo->pResult[0].window.skey < pWindowResInfo->pResult[1].window.skey)? - TSDB_ORDER_ASC:TSDB_ORDER_DESC; + int32_t resultOrder = (pWindowResInfo->pResult[0].window.skey < pWindowResInfo->pResult[1].window.skey)? 1:-1; if (order != resultOrder) { return; diff --git a/src/query/src/qast.c b/src/query/src/qast.c index 721cd8ae5a..ffd339f111 100644 --- a/src/query/src/qast.c +++ b/src/query/src/qast.c @@ -13,25 +13,26 @@ * along with this program. If not, see . */ + #include "os.h" -#include "tulog.h" -#include "tutil.h" -#include "tbuffer.h" + +#include "tname.h" #include "qast.h" -#include "tcompare.h" +#include "tsdb.h" +#include "exception.h" #include "qsqlparser.h" #include "qsyntaxtreefunction.h" #include "taosdef.h" #include "taosmsg.h" +#include "tarray.h" +#include "tbuffer.h" +#include "tcompare.h" +#include "tskiplist.h" #include "tsqlfunction.h" #include "tstoken.h" #include "ttokendef.h" -#include "tschemautil.h" -#include "tarray.h" -#include "tskiplist.h" -#include "queryLog.h" -#include "tsdbMain.h" -#include "exception.h" +#include "tulog.h" +#include "tutil.h" /* * @@ -327,104 +328,6 @@ static tExprNode *createSyntaxTree(SSchema *pSchema, int32_t numOfCols, char *st } } -void tSQLBinaryExprFromString(tExprNode **pExpr, SSchema *pSchema, int32_t numOfCols, char *src, int32_t len) { - *pExpr = NULL; - - if (len <= 0 || src == NULL || pSchema == NULL || numOfCols <= 0) { - return; - } - - int32_t pos = 0; - - *pExpr = createSyntaxTree(pSchema, numOfCols, src, &pos); - if (*pExpr != NULL) { - assert((*pExpr)->nodeType == TSQL_NODE_EXPR); - } -} - -int32_t tSQLBinaryExprToStringImpl(tExprNode *pNode, char *dst, uint8_t type) { - int32_t len = 0; - if (type == TSQL_NODE_EXPR) { - *dst = '('; - tSQLBinaryExprToString(pNode, dst + 1, &len); - len += 2; - *(dst + len - 1) = ')'; - } else if (type == TSQL_NODE_COL) { - len = sprintf(dst, "%s", pNode->pSchema->name); - } else { - len = tVariantToString(pNode->pVal, dst); - } - return len; -} - -// TODO REFACTOR WITH SQL PARSER -static char *tSQLOptrToString(uint8_t optr, char *dst) { - switch (optr) { - case TSDB_RELATION_LESS: { - *dst = '<'; - dst += 1; - break; - } - case TSDB_RELATION_LESS_EQUAL: { - *dst = '<'; - *(dst + 1) = '='; - dst += 2; - break; - } - case TSDB_RELATION_EQUAL: { - *dst = '='; - dst += 1; - break; - } - case TSDB_RELATION_GREATER: { - *dst = '>'; - dst += 1; - break; - } - case TSDB_RELATION_GREATER_EQUAL: { - *dst = '>'; - *(dst + 1) = '='; - dst += 2; - break; - } - case TSDB_RELATION_NOT_EQUAL: { - *dst = '<'; - *(dst + 1) = '>'; - dst += 2; - break; - } - case TSDB_RELATION_OR: { - memcpy(dst, "or", 2); - dst += 2; - break; - } - case TSDB_RELATION_AND: { - memcpy(dst, "and", 3); - dst += 3; - break; - } - default:; - } - return dst; -} - -void tSQLBinaryExprToString(tExprNode *pExpr, char *dst, int32_t *len) { - if (pExpr == NULL) { - *dst = 0; - *len = 0; - return; - } - - int32_t lhs = tSQLBinaryExprToStringImpl(pExpr->_node.pLeft, dst, pExpr->_node.pLeft->nodeType); - dst += lhs; - *len = lhs; - - char *start = tSQLOptrToString(pExpr->_node.optr, dst); - *len += (start - dst); - - *len += tSQLBinaryExprToStringImpl(pExpr->_node.pRight, start, pExpr->_node.pRight->nodeType); -} - static void UNUSED_FUNC destroySyntaxTree(tExprNode *pNode) { tExprNodeDestroy(pNode, NULL); } void tExprNodeDestroy(tExprNode *pNode, void (*fp)(void *)) { @@ -773,8 +676,7 @@ static void tQueryIndexlessColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, SSkipListNode *pNode = tSkipListIterGet(iter); char * pData = SL_GET_NODE_DATA(pNode); - // todo refactor: - tstr *name = (*(STable **)pData)->name; + tstr *name = (tstr*) tsdbGetTableName(*(void**) pData); // todo speed up by using hash if (pQueryInfo->colIndex == TSDB_TBNAME_COLUMN_INDEX) { if (pQueryInfo->optr == TSDB_RELATION_IN) { @@ -976,27 +878,27 @@ void tExprTreeCalcTraverse(tExprNode *pExprs, int32_t numOfRows, char *pOutput, free(pRightOutput); } -void tSQLBinaryExprTrv(tExprNode *pExprs, SArray* res) { - if (pExprs == NULL) { - return; - } - - tExprNode *pLeft = pExprs->_node.pLeft; - tExprNode *pRight = pExprs->_node.pRight; - - // recursive traverse left child branch - if (pLeft->nodeType == TSQL_NODE_EXPR) { - tSQLBinaryExprTrv(pLeft, res); - } else if (pLeft->nodeType == TSQL_NODE_COL) { - taosArrayPush(res, &pLeft->pSchema->colId); - } - - if (pRight->nodeType == TSQL_NODE_EXPR) { - tSQLBinaryExprTrv(pRight, res); - } else if (pRight->nodeType == TSQL_NODE_COL) { - taosArrayPush(res, &pRight->pSchema->colId); - } -} +//void tSQLBinaryExprTrv(tExprNode *pExprs, SArray* res) { +// if (pExprs == NULL) { +// return; +// } +// +// tExprNode *pLeft = pExprs->_node.pLeft; +// tExprNode *pRight = pExprs->_node.pRight; +// +// // recursive traverse left child branch +// if (pLeft->nodeType == TSQL_NODE_EXPR) { +// tSQLBinaryExprTrv(pLeft, res); +// } else if (pLeft->nodeType == TSQL_NODE_COL) { +// taosArrayPush(res, &pLeft->pSchema->colId); +// } +// +// if (pRight->nodeType == TSQL_NODE_EXPR) { +// tSQLBinaryExprTrv(pRight, res); +// } else if (pRight->nodeType == TSQL_NODE_COL) { +// taosArrayPush(res, &pRight->pSchema->colId); +// } +//} static void exprTreeToBinaryImpl(SBufferWriter* bw, tExprNode* expr) { tbufWriteUint8(bw, expr->nodeType); diff --git a/src/query/src/qfill.c b/src/query/src/qfill.c index eebe9a976b..65951a5b9e 100644 --- a/src/query/src/qfill.c +++ b/src/query/src/qfill.c @@ -174,7 +174,7 @@ int32_t taosNumOfRemainRows(SFillInfo* pFillInfo) { return 0; } - return FILL_IS_ASC_FILL(pFillInfo) ? (pFillInfo->numOfRows - pFillInfo->rowIdx) : pFillInfo->rowIdx + 1; + return pFillInfo->numOfRows - pFillInfo->rowIdx; } // todo: refactor diff --git a/src/query/tests/astTest.cpp b/src/query/tests/astTest.cpp index 15eb780021..df27087216 100644 --- a/src/query/tests/astTest.cpp +++ b/src/query/tests/astTest.cpp @@ -1,11 +1,10 @@ #include -#include #include #include #include -#include "qast.h" #include "taosmsg.h" +#include "qast.h" #include "tsdb.h" #include "tskiplist.h" @@ -24,8 +23,6 @@ static void initSchema_binary(SSchema *schema, int32_t numOfCols); static SSkipList *createSkipList(SSchema *pSchema, int32_t numOfTags); static SSkipList *createSkipList_binary(SSchema *pSchema, int32_t numOfTags); -static void testQueryStr(SSchema *schema, int32_t numOfCols, char *sql, SSkipList *pSkipList, ResultObj *expectedVal); - static void dropMeter(SSkipList *pSkipList); static void Right2LeftTest(SSchema *schema, int32_t numOfCols, SSkipList *pSkipList); @@ -239,44 +236,45 @@ static void initSchema(SSchema *schema, int32_t numOfCols) { // return pSkipList; //} -static void testQueryStr(SSchema *schema, int32_t numOfCols, char *sql, SSkipList *pSkipList, ResultObj *pResult) { - tExprNode *pExpr = NULL; - tSQLBinaryExprFromString(&pExpr, schema, numOfCols, sql, strlen(sql)); - - char str[512] = {0}; - int32_t len = 0; - if (pExpr == NULL) { - printf("-----error in parse syntax:%s\n\n", sql); - assert(pResult == NULL); - return; - } - - tSQLBinaryExprToString(pExpr, str, &len); - printf("expr is: %s\n", str); - - SArray *result = NULL; - // tExprTreeTraverse(pExpr, pSkipList, result, SSkipListNodeFilterCallback, &result); - // printf("the result is:%lld\n", result.num); - // - // bool findResult = false; - // for (int32_t i = 0; i < result.num; ++i) { - // STabObj *pm = (STabObj *)result.pRes[i]; - // printf("meterid:%s,\t", pm->meterId); - // - // for (int32_t j = 0; j < pResult->numOfResult; ++j) { - // if (strcmp(pm->meterId, pResult->resultName[j]) == 0) { - // findResult = true; - // break; - // } - // } - // assert(findResult == true); - // findResult = false; - // } - - printf("\n\n"); - tExprTreeDestroy(&pExpr, NULL); -} +//static void testQueryStr(SSchema *schema, int32_t numOfCols, char *sql, SSkipList *pSkipList, ResultObj *pResult) { +// tExprNode *pExpr = NULL; +// tSQLBinaryExprFromString(&pExpr, schema, numOfCols, sql, strlen(sql)); +// +// char str[512] = {0}; +// int32_t len = 0; +// if (pExpr == NULL) { +// printf("-----error in parse syntax:%s\n\n", sql); +// assert(pResult == NULL); +// return; +// } +// +// tSQLBinaryExprToString(pExpr, str, &len); +// printf("expr is: %s\n", str); +// +// SArray *result = NULL; +// // tExprTreeTraverse(pExpr, pSkipList, result, SSkipListNodeFilterCallback, &result); +// // printf("the result is:%lld\n", result.num); +// // +// // bool findResult = false; +// // for (int32_t i = 0; i < result.num; ++i) { +// // STabObj *pm = (STabObj *)result.pRes[i]; +// // printf("meterid:%s,\t", pm->meterId); +// // +// // for (int32_t j = 0; j < pResult->numOfResult; ++j) { +// // if (strcmp(pm->meterId, pResult->resultName[j]) == 0) { +// // findResult = true; +// // break; +// // } +// // } +// // assert(findResult == true); +// // findResult = false; +// // } +// +// printf("\n\n"); +// tExprTreeDestroy(&pExpr, NULL); +//} +#if 0 static void Left2RightTest(SSchema *schema, int32_t numOfCols, SSkipList *pSkipList) { char str[256] = {0}; @@ -632,4 +630,5 @@ void exprSerializeTest2() { } // namespace TEST(testCase, astTest) { // exprSerializeTest2(); -} \ No newline at end of file +} +#endif \ No newline at end of file diff --git a/src/rpc/test/rclient.c b/src/rpc/test/rclient.c index e51b54e299..f8dbbedb11 100644 --- a/src/rpc/test/rclient.c +++ b/src/rpc/test/rclient.c @@ -156,6 +156,7 @@ int main(int argc, char *argv[]) { } tInfo("client is initialized"); + tInfo("threads:%d msgSize:%d requests:%d", appThreads, msgSize, numOfReqs); gettimeofday(&systemTime, NULL); startTime = systemTime.tv_sec*1000000 + systemTime.tv_usec; diff --git a/src/rpc/test/rserver.c b/src/rpc/test/rserver.c index 1ac9409a57..d06e9df64b 100644 --- a/src/rpc/test/rserver.c +++ b/src/rpc/test/rserver.c @@ -24,23 +24,21 @@ int msgSize = 128; int commit = 0; int dataFd = -1; void *qhandle = NULL; +void *qset = NULL; void processShellMsg() { static int num = 0; taos_qall qall; SRpcMsg *pRpcMsg, rpcMsg; int type; + void *pvnode; qall = taosAllocateQall(); while (1) { - int numOfMsgs = taosReadAllQitems(qhandle, qall); - if (numOfMsgs <= 0) { - usleep(100); - continue; - } - + int numOfMsgs = taosReadAllQitemsFromQset(qset, qall, &pvnode); tDebug("%d shell msgs are received", numOfMsgs); + if (numOfMsgs <= 0) break; for (int i=0; iconfig.tsdbId diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c index 9ebd63ba7e..84c1c8e7d1 100644 --- a/src/tsdb/src/tsdbMeta.c +++ b/src/tsdb/src/tsdbMeta.c @@ -192,11 +192,6 @@ char *tsdbGetTableName(void* pTable) { } } -STableId tsdbGetTableId(void *pTable) { - assert(pTable); - return ((STable*)pTable)->tableId; -} - STableCfg *tsdbCreateTableCfgFromMsg(SMDCreateTableMsg *pMsg) { if (pMsg == NULL) return NULL; diff --git a/src/tsdb/src/tsdbRWHelper.c b/src/tsdb/src/tsdbRWHelper.c index 9e87a1f587..326da07a36 100644 --- a/src/tsdb/src/tsdbRWHelper.c +++ b/src/tsdb/src/tsdbRWHelper.c @@ -318,7 +318,7 @@ int tsdbMoveLastBlockIfNeccessary(SRWHelper *pHelper) { ASSERT(pCompBlock->last); if (pCompBlock->numOfSubBlocks > 1) { - if (tsdbLoadBlockData(pHelper, blockAtIdx(pHelper, pIdx->numOfBlocks - 1)) < 0) return -1; + if (tsdbLoadBlockData(pHelper, blockAtIdx(pHelper, pIdx->numOfBlocks - 1), NULL) < 0) return -1; ASSERT(pHelper->pDataCols[0]->numOfRows > 0 && pHelper->pDataCols[0]->numOfRows < pCfg->minRowsPerFileBlock); if (tsdbWriteBlockToFile(pHelper, &(pHelper->files.nLastF), pHelper->pDataCols[0], pHelper->pDataCols[0]->numOfRows, &compBlock, true, true) < 0) @@ -577,11 +577,12 @@ void tsdbGetDataStatis(SRWHelper *pHelper, SDataStatis *pStatis, int numOfCols) } } -int tsdbLoadBlockDataCols(SRWHelper *pHelper, SCompBlock *pCompBlock, int16_t *colIds, int numOfColIds) { +int tsdbLoadBlockDataCols(SRWHelper *pHelper, SCompBlock *pCompBlock, SCompInfo *pCompInfo, int16_t *colIds, int numOfColIds) { ASSERT(pCompBlock->numOfSubBlocks >= 1); // Must be super block int numOfSubBlocks = pCompBlock->numOfSubBlocks; - if (numOfSubBlocks > 1) pCompBlock = (SCompBlock *)POINTER_SHIFT(pHelper->pCompInfo, pCompBlock->offset); + if (numOfSubBlocks > 1) + pCompBlock = (SCompBlock *)POINTER_SHIFT((pCompInfo == NULL) ? pHelper->pCompInfo : pCompInfo, pCompBlock->offset); tdResetDataCols(pHelper->pDataCols[0]); if (tsdbLoadBlockDataColsImpl(pHelper, pCompBlock, pHelper->pDataCols[0], colIds, numOfColIds) < 0) goto _err; @@ -598,10 +599,10 @@ _err: return -1; } -int tsdbLoadBlockData(SRWHelper *pHelper, SCompBlock *pCompBlock) { - +int tsdbLoadBlockData(SRWHelper *pHelper, SCompBlock *pCompBlock, SCompInfo *pCompInfo) { int numOfSubBlock = pCompBlock->numOfSubBlocks; - if (numOfSubBlock > 1) pCompBlock = (SCompBlock *)POINTER_SHIFT(pHelper->pCompInfo, pCompBlock->offset); + if (numOfSubBlock > 1) + pCompBlock = (SCompBlock *)POINTER_SHIFT((pCompInfo == NULL) ? pHelper->pCompInfo : pCompInfo, pCompBlock->offset); tdResetDataCols(pHelper->pDataCols[0]); if (tsdbLoadBlockDataImpl(pHelper, pCompBlock, pHelper->pDataCols[0]) < 0) goto _err; @@ -703,6 +704,7 @@ static int tsdbWriteBlockToFile(SRWHelper *pHelper, SFile *pFile, SDataCols *pDa } // Add checksum + ASSERT(pCompCol->len > 0); pCompCol->len += sizeof(TSCKSUM); taosCalcChecksumAppend(0, (uint8_t *)tptr, pCompCol->len); @@ -792,7 +794,7 @@ static int tsdbMergeDataWithBlock(SRWHelper *pHelper, int blkIdx, SDataCols *pDa if (tsdbAddSubBlock(pHelper, &compBlock, blkIdx, rowsWritten) < 0) goto _err; } else { // Load - if (tsdbLoadBlockData(pHelper, blockAtIdx(pHelper, blkIdx)) < 0) goto _err; + if (tsdbLoadBlockData(pHelper, blockAtIdx(pHelper, blkIdx), NULL) < 0) goto _err; ASSERT(pHelper->pDataCols[0]->numOfRows <= blockAtIdx(pHelper, blkIdx)->numOfRows); // Merge if (tdMergeDataCols(pHelper->pDataCols[0], pDataCols, rowsWritten) < 0) goto _err; @@ -848,7 +850,7 @@ static int tsdbMergeDataWithBlock(SRWHelper *pHelper, int blkIdx, SDataCols *pDa if (tsdbAddSubBlock(pHelper, &compBlock, blkIdx, rowsWritten) < 0) goto _err; } else { // Load-Merge-Write // Load - if (tsdbLoadBlockData(pHelper, blockAtIdx(pHelper, blkIdx)) < 0) goto _err; + if (tsdbLoadBlockData(pHelper, blockAtIdx(pHelper, blkIdx), NULL) < 0) goto _err; if (blockAtIdx(pHelper, blkIdx)->last) pHelper->hasOldLastBlock = false; rowsWritten = rows3; diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 6a9c8e1ff6..a4e0151f89 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -22,7 +22,7 @@ #include "exception.h" #include "../../../query/inc/qast.h" // todo move to common module -#include "../../../query/inc/tlosertree.h" // todo move to util module +#include "tlosertree.h" #include "tsdb.h" #include "tsdbMain.h" @@ -122,7 +122,7 @@ static void changeQueryHandleForInterpQuery(TsdbQueryHandleT pHandle); static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* pCheckInfo, SCompBlock* pBlock, SArray* sa); static int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order); -static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int maxRowsToRead, TSKEY* skey, TSKEY* ekey, +static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int maxRowsToRead, STimeWindow* win, STsdbQueryHandle* pQueryHandle); static void tsdbInitDataBlockLoadInfo(SDataBlockLoadInfo* pBlockLoadInfo) { @@ -249,8 +249,6 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh pCheckInfo->initBuf = true; int32_t order = pHandle->order; -// tsdbTakeMemSnapshot(pHandle->pTsdb, &pCheckInfo->mem, &pCheckInfo->imem); - // no data in buffer, abort if (pHandle->mem == NULL && pHandle->imem == NULL) { return false; @@ -392,8 +390,11 @@ static bool hasMoreDataInCache(STsdbQueryHandle* pHandle) { STable* pTable = pCheckInfo->pTableObj; assert(pTable != NULL); - - initTableMemIterator(pHandle, pCheckInfo); + + if (!pCheckInfo->initBuf) { + initTableMemIterator(pHandle, pCheckInfo); + } + SDataRow row = getSDataRowInTableMem(pCheckInfo); if (row == NULL) { return false; @@ -411,8 +412,7 @@ static bool hasMoreDataInCache(STsdbQueryHandle* pHandle) { int32_t step = ASCENDING_TRAVERSE(pHandle->order)? 1:-1; STimeWindow* win = &pHandle->cur.win; - pHandle->cur.rows = tsdbReadRowsFromCache(pCheckInfo, pHandle->window.ekey, - pHandle->outputCapacity, &win->skey, &win->ekey, pHandle); // todo refactor API + pHandle->cur.rows = tsdbReadRowsFromCache(pCheckInfo, pHandle->window.ekey, pHandle->outputCapacity, win, pHandle); // update the last key value pCheckInfo->lastKey = win->ekey + step; @@ -576,6 +576,8 @@ static SArray* getDefaultLoadColumns(STsdbQueryHandle* pQueryHandle, bool loadTS static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock, STableCheckInfo* pCheckInfo) { STsdbRepo *pRepo = pQueryHandle->pTsdb; + + // TODO refactor SCompData* data = calloc(1, sizeof(SCompData) + sizeof(SCompCol) * pBlock->numOfCols); data->numOfCols = pBlock->numOfCols; @@ -592,9 +594,12 @@ static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlo pCheckInfo->pDataCols = tdNewDataCols(pMeta->maxRowBytes, pMeta->maxCols, pRepo->config.maxRowsPerFileBlock); } - tdInitDataCols(pCheckInfo->pDataCols, tsdbGetTableSchema(pCheckInfo->pTableObj)); + STSchema* pSchema = tsdbGetTableSchema(pCheckInfo->pTableObj); + tdInitDataCols(pCheckInfo->pDataCols, pSchema); + tdInitDataCols(pQueryHandle->rhelper.pDataCols[0], pSchema); + tdInitDataCols(pQueryHandle->rhelper.pDataCols[1], pSchema); - if (tsdbLoadBlockData(&(pQueryHandle->rhelper), pBlock) == 0) { + if (tsdbLoadBlockData(&(pQueryHandle->rhelper), pBlock, pCheckInfo->pCompInfo) == 0) { SDataBlockLoadInfo* pBlockLoadInfo = &pQueryHandle->dataBlockLoadInfo; pBlockLoadInfo->fileGroup = pQueryHandle->pFileGroup; @@ -605,8 +610,9 @@ static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlo } SDataCols* pCols = pQueryHandle->rhelper.pDataCols[0]; - assert(pCols->numOfRows != 0); + assert(pCols->numOfRows != 0 && pCols->numOfRows <= pBlock->numOfRows); + pBlock->numOfRows = pCols->numOfRows; taosArrayDestroy(sa); tfree(data); @@ -636,7 +642,7 @@ static void handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SCompBlock* int32_t step = ASCENDING_TRAVERSE(pQueryHandle->order) ? 1 : -1; cur->rows = tsdbReadRowsFromCache(pCheckInfo, binfo.window.skey - step, - pQueryHandle->outputCapacity, &cur->win.skey, &cur->win.ekey, pQueryHandle); + pQueryHandle->outputCapacity, &cur->win, pQueryHandle); pQueryHandle->realNumOfRows = cur->rows; // update the last key value @@ -1237,7 +1243,6 @@ static int32_t dataBlockOrderCompar(const void* pLeft, const void* pRight, void* // assert(pLeftBlockInfoEx->compBlock->offset != pRightBlockInfoEx->compBlock->offset); if (pLeftBlockInfoEx->compBlock->offset == pRightBlockInfoEx->compBlock->offset && pLeftBlockInfoEx->compBlock->last == pRightBlockInfoEx->compBlock->last) { - // todo add more information tsdbError("error in header file, two block with same offset:%" PRId64, (int64_t)pLeftBlockInfoEx->compBlock->offset); } @@ -1477,7 +1482,8 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) { size_t numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo); assert(numOfTables > 0); - + + SDataBlockInfo blockInfo = {{0}, 0}; if (pQueryHandle->type == TSDB_QUERY_TYPE_EXTERNAL) { pQueryHandle->type = TSDB_QUERY_TYPE_ALL; pQueryHandle->order = TSDB_ORDER_DESC; @@ -1487,7 +1493,7 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) { } SArray* sa = getDefaultLoadColumns(pQueryHandle, true); - /*SDataBlockInfo* pBlockInfo =*/ tsdbRetrieveDataBlockInfo(pHandle); + /*SDataBlockInfo* pBlockInfo =*/ tsdbRetrieveDataBlockInfo(pHandle, &blockInfo); /*SArray *pDataBlock = */tsdbRetrieveDataBlock(pHandle, sa); if (pQueryHandle->cur.win.ekey == pQueryHandle->window.skey) { @@ -1558,7 +1564,7 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) { bool ret = tsdbNextDataBlock((void*) pSecQueryHandle); assert(ret); - /*SDataBlockInfo* pBlockInfo =*/ tsdbRetrieveDataBlockInfo((void*) pSecQueryHandle); + /*SDataBlockInfo* pBlockInfo =*/ tsdbRetrieveDataBlockInfo((void*) pSecQueryHandle, &blockInfo); /*SArray *pDataBlock = */tsdbRetrieveDataBlock((void*) pSecQueryHandle, sa); for (int32_t i = 0; i < numOfCols; ++i) { @@ -1693,11 +1699,11 @@ static void changeQueryHandleForInterpQuery(TsdbQueryHandleT pHandle) { pQueryHandle->window = (STimeWindow) {info.lastKey, TSKEY_INITIAL_VAL}; } -static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int maxRowsToRead, TSKEY* skey, TSKEY* ekey, +static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int maxRowsToRead, STimeWindow* win, STsdbQueryHandle* pQueryHandle) { int numOfRows = 0; int32_t numOfCols = taosArrayGetSize(pQueryHandle->pColumns); - *skey = TSKEY_INITIAL_VAL; + win->skey = TSKEY_INITIAL_VAL; int64_t st = taosGetTimestampUs(); STsdbMeta* pMeta = tsdbGetMeta(pQueryHandle->pTsdb); @@ -1717,11 +1723,11 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int break; } - if (*skey == INT64_MIN) { - *skey = key; + if (win->skey == INT64_MIN) { + win->skey = key; } - *ekey = key; + win->ekey = key; copyOneRowFromMem(pQueryHandle, maxRowsToRead, numOfRows, row, pMeta, numOfCols, pTable); if (++numOfRows >= maxRowsToRead) { @@ -1750,7 +1756,7 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int return numOfRows; } -SDataBlockInfo tsdbRetrieveDataBlockInfo(TsdbQueryHandleT* pQueryHandle) { +void tsdbRetrieveDataBlockInfo(TsdbQueryHandleT* pQueryHandle, SDataBlockInfo* pDataBlockInfo) { STsdbQueryHandle* pHandle = (STsdbQueryHandle*)pQueryHandle; SQueryFilePos* cur = &pHandle->cur; STable* pTable = NULL; @@ -1763,16 +1769,12 @@ SDataBlockInfo tsdbRetrieveDataBlockInfo(TsdbQueryHandleT* pQueryHandle) { STableCheckInfo* pCheckInfo = taosArrayGet(pHandle->pTableCheckInfo, pHandle->activeIndex); pTable = pCheckInfo->pTableObj; } - - SDataBlockInfo blockInfo = { - .uid = pTable->tableId.uid, - .tid = pTable->tableId.tid, - .rows = cur->rows, - .window = cur->win, - .numOfCols = QH_GET_NUM_OF_COLS(pHandle), - }; - return blockInfo; + pDataBlockInfo->uid = pTable->tableId.uid; + pDataBlockInfo->tid = pTable->tableId.tid; + pDataBlockInfo->rows = cur->rows; + pDataBlockInfo->window = cur->win; + pDataBlockInfo->numOfCols = QH_GET_NUM_OF_COLS(pHandle); } /* @@ -1974,9 +1976,9 @@ int32_t tableGroupComparFn(const void *p1, const void *p2, const void *param) { int32_t type = 0; int32_t bytes = 0; - if (colIndex == TSDB_TBNAME_COLUMN_INDEX) { // todo refactor extract method , to queryExecutor to generate tags values - f1 = (char*) pTable1->name; - f2 = (char*) pTable2->name; + if (colIndex == TSDB_TBNAME_COLUMN_INDEX) { + f1 = (char*) TABLE_NAME(pTable1); + f2 = (char*) TABLE_NAME(pTable2); type = TSDB_DATA_TYPE_BINARY; bytes = tGetTableNameColumnSchema().bytes; } else { @@ -2085,13 +2087,17 @@ bool indexedNodeFilterFp(const void* pNode, void* param) { char* val = NULL; if (pInfo->colIndex == TSDB_TBNAME_COLUMN_INDEX) { - val = (char*) pTable->name; + val = (char*) TABLE_NAME(pTable); } else { val = tdGetKVRowValOfCol(pTable->tagVal, pInfo->sch.colId); } - //todo :the val is possible to be null, so check it out carefully - int32_t ret = pInfo->compare(val, pInfo->q); + int32_t ret = 0; + if (val == NULL) { //the val is possible to be null, so check it out carefully + ret = -1; // val is missing in table tags value pairs + } else { + ret = pInfo->compare(val, pInfo->q); + } switch (pInfo->optr) { case TSDB_RELATION_EQUAL: { diff --git a/src/util/inc/tcache.h b/src/util/inc/tcache.h index b026ad4386..2982b8dc70 100644 --- a/src/util/inc/tcache.h +++ b/src/util/inc/tcache.h @@ -24,14 +24,13 @@ extern "C" { #include "tref.h" #include "hash.h" -typedef void (*__cache_freeres_fn_t)(void*); +typedef void (*__cache_free_fn_t)(void*); typedef struct SCacheStatis { int64_t missCount; int64_t hitCount; int64_t totalAccess; int64_t refreshCount; - int32_t numOfCollision; } SCacheStatis; typedef struct SCacheDataNode { @@ -70,7 +69,7 @@ typedef struct { // void * pTimer; SCacheStatis statistics; SHashObj * pHashTable; - __cache_freeres_fn_t freeFp; + __cache_free_fn_t freeFp; uint32_t numOfElemsInTrash; // number of element in trash uint8_t deleting; // set the deleting flag to stop refreshing ASAP. pthread_t refreshWorker; @@ -91,15 +90,7 @@ typedef struct { * @param fn free resource callback function * @return */ -SCacheObj *taosCacheInit(int32_t keyType, int64_t refreshTimeInSeconds, bool extendLifespan, __cache_freeres_fn_t fn, const char *cacheName); - -/** - * initialize the cache object and set the free object callback function - * @param refreshTimeInSeconds - * @param freeCb - * @return - */ -SCacheObj *taosCacheInitWithCb(int32_t keyType, int64_t refreshTimeInSeconds, bool extendLifespan, __cache_freeres_fn_t fn, const char *cacheName); +SCacheObj *taosCacheInit(int32_t keyType, int64_t refreshTimeInSeconds, bool extendLifespan, __cache_free_fn_t fn, const char *cacheName); /** * add data into cache @@ -163,9 +154,8 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove); /** * move all data node into trash, clear node in trash can if it is not referenced by any clients * @param handle - * @param _remove remove the data or not if refcount is greater than 0 */ -void taosCacheEmpty(SCacheObj *pCacheObj, bool _remove); +void taosCacheEmpty(SCacheObj *pCacheObj); /** * release all allocated memory and destroy the cache object. @@ -180,6 +170,14 @@ void taosCacheEmpty(SCacheObj *pCacheObj, bool _remove); */ void taosCacheCleanup(SCacheObj *pCacheObj); +/** + * + * @param pCacheObj + * @param fp + * @return + */ +void taosCacheRefresh(SCacheObj *pCacheObj, __cache_free_fn_t fp); + #ifdef __cplusplus } #endif diff --git a/src/query/inc/tlosertree.h b/src/util/inc/tlosertree.h similarity index 100% rename from src/query/inc/tlosertree.h rename to src/util/inc/tlosertree.h diff --git a/src/util/inc/tskiplist.h b/src/util/inc/tskiplist.h index 686e5ab313..4ba620dce0 100644 --- a/src/util/inc/tskiplist.h +++ b/src/util/inc/tskiplist.h @@ -51,6 +51,7 @@ typedef struct SSkipListNode { #define SL_GET_NODE_KEY(s, n) ((s)->keyFn(SL_GET_NODE_DATA(n))) #define SL_GET_SL_MIN_KEY(s) (SL_GET_NODE_KEY((s), SL_GET_FORWARD_POINTER((s)->pHead, 0))) +#define SL_GET_SL_MAX_KEY(s) (SL_GET_NODE_KEY((s), SL_GET_BACKWARD_POINTER((s)->pTail, 0))) #define SL_GET_NODE_LEVEL(n) *(uint8_t *)((n)) @@ -119,7 +120,6 @@ typedef struct SSkipList { pthread_rwlock_t *lock; SSkipListNode * pHead; // point to the first element SSkipListNode * pTail; // point to the last element - void * lastKey; // last key in the skiplist #if SKIP_LIST_RECORD_PERFORMANCE tSkipListState state; // skiplist state #endif diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index d546970868..688e49a40b 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -223,9 +223,9 @@ static void doCleanupDataCache(SCacheObj *pCacheObj); * refresh cache to remove data in both hash list and trash, if any nodes' refcount == 0, every pCacheObj->refreshTime * @param handle Cache object handle */ -static void* taosCacheRefresh(void *handle); +static void* taosCacheTimedRefresh(void *pCacheObj); -SCacheObj *taosCacheInit(int32_t keyType, int64_t refreshTimeInSeconds, bool extendLifespan, __cache_freeres_fn_t fn, const char* cacheName) { +SCacheObj *taosCacheInit(int32_t keyType, int64_t refreshTimeInSeconds, bool extendLifespan, __cache_free_fn_t fn, const char* cacheName) { if (refreshTimeInSeconds <= 0) { return NULL; } @@ -261,7 +261,7 @@ SCacheObj *taosCacheInit(int32_t keyType, int64_t refreshTimeInSeconds, bool ext pthread_attr_init(&thattr); pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); - pthread_create(&pCacheObj->refreshWorker, &thattr, taosCacheRefresh, pCacheObj); + pthread_create(&pCacheObj->refreshWorker, &thattr, taosCacheTimedRefresh, pCacheObj); pthread_attr_destroy(&thattr); return pCacheObj; @@ -450,7 +450,7 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) { } } -void taosCacheEmpty(SCacheObj *pCacheObj, bool _remove) { +void taosCacheEmpty(SCacheObj *pCacheObj) { SHashMutableIterator *pIter = taosHashCreateIter(pCacheObj->pHashTable); __cache_wr_lock(pCacheObj); @@ -459,8 +459,8 @@ void taosCacheEmpty(SCacheObj *pCacheObj, bool _remove) { break; } - SCacheDataNode *pNode = *(SCacheDataNode **)taosHashIterGet(pIter); - if (T_REF_VAL_GET(pNode) == 0 || _remove) { + SCacheDataNode *pNode = *(SCacheDataNode **) taosHashIterGet(pIter); + if (T_REF_VAL_GET(pNode) == 0) { taosCacheReleaseNode(pCacheObj, pNode); } else { taosCacheMoveToTrash(pCacheObj, pNode); @@ -469,7 +469,7 @@ void taosCacheEmpty(SCacheObj *pCacheObj, bool _remove) { __cache_unlock(pCacheObj); taosHashDestroyIter(pIter); - taosTrashCanEmpty(pCacheObj, _remove); + taosTrashCanEmpty(pCacheObj, false); } void taosCacheCleanup(SCacheObj *pCacheObj) { @@ -623,8 +623,29 @@ void doCleanupDataCache(SCacheObj *pCacheObj) { free(pCacheObj); } -void* taosCacheRefresh(void *handle) { - SCacheObj *pCacheObj = (SCacheObj *)handle; +static void doCacheRefresh(SCacheObj* pCacheObj, int64_t time, __cache_free_fn_t fp) { + SHashMutableIterator *pIter = taosHashCreateIter(pCacheObj->pHashTable); + + __cache_wr_lock(pCacheObj); + while (taosHashIterNext(pIter)) { + SCacheDataNode *pNode = *(SCacheDataNode **)taosHashIterGet(pIter); + if ((pNode->addedTime + pNode->lifespan * pNode->extendFactor) <= time && T_REF_VAL_GET(pNode) <= 0) { + taosCacheReleaseNode(pCacheObj, pNode); + continue; + } + + if (fp) { + fp(pNode->data); + } + } + + __cache_unlock(pCacheObj); + + taosHashDestroyIter(pIter); +} + +void* taosCacheTimedRefresh(void *handle) { + SCacheObj* pCacheObj = handle; if (pCacheObj == NULL) { uDebug("object is destroyed. no refresh retry"); return NULL; @@ -657,21 +678,8 @@ void* taosCacheRefresh(void *handle) { // refresh data in hash table if (elemInHash > 0) { - int64_t expiredTime = taosGetTimestampMs(); - - SHashMutableIterator *pIter = taosHashCreateIter(pCacheObj->pHashTable); - - __cache_wr_lock(pCacheObj); - while (taosHashIterNext(pIter)) { - SCacheDataNode *pNode = *(SCacheDataNode **)taosHashIterGet(pIter); - if ((pNode->addedTime + pNode->lifespan * pNode->extendFactor) <= expiredTime && T_REF_VAL_GET(pNode) <= 0) { - taosCacheReleaseNode(pCacheObj, pNode); - } - } - - __cache_unlock(pCacheObj); - - taosHashDestroyIter(pIter); + int64_t now = taosGetTimestampMs(); + doCacheRefresh(pCacheObj, now, NULL); } taosTrashCanEmpty(pCacheObj, false); @@ -679,3 +687,12 @@ void* taosCacheRefresh(void *handle) { return NULL; } + +void taosCacheRefresh(SCacheObj *pCacheObj, __cache_free_fn_t fp) { + if (pCacheObj == NULL) { + return; + } + + int64_t now = taosGetTimestampMs(); + doCacheRefresh(pCacheObj, now, fp); +} diff --git a/src/query/src/tlosertree.c b/src/util/src/tlosertree.c similarity index 97% rename from src/query/src/tlosertree.c rename to src/util/src/tlosertree.c index 5d471bb927..fa7e4fc340 100644 --- a/src/query/src/tlosertree.c +++ b/src/util/src/tlosertree.c @@ -13,10 +13,10 @@ * along with this program. If not, see . */ +#include "tlosertree.h" #include "os.h" #include "taosmsg.h" -#include "tlosertree.h" -#include "queryLog.h" +#include "tulog.h" // set initial value for loser tree void tLoserTreeInit(SLoserTreeInfo* pTree) { @@ -45,7 +45,7 @@ uint32_t tLoserTreeCreate(SLoserTreeInfo** pTree, int32_t numOfEntries, void* pa *pTree = (SLoserTreeInfo*)calloc(1, sizeof(SLoserTreeInfo) + sizeof(SLoserTreeNode) * totalEntries); if ((*pTree) == NULL) { - qError("allocate memory for loser-tree failed. reason:%s", strerror(errno)); + uError("allocate memory for loser-tree failed. reason:%s", strerror(errno)); return TSDB_CODE_QRY_OUT_OF_MEMORY; } diff --git a/src/util/src/tqueue.c b/src/util/src/tqueue.c index f4f7904968..d9abf0d7c3 100644 --- a/src/util/src/tqueue.c +++ b/src/util/src/tqueue.c @@ -93,16 +93,18 @@ void taosCloseQueue(taos_queue param) { void *taosAllocateQitem(int size) { STaosQnode *pNode = (STaosQnode *)calloc(sizeof(STaosQnode) + size, 1); + if (pNode == NULL) return NULL; + uTrace("item:%p, node:%p is allocated", pNode->item, pNode); return (void *)pNode->item; } void taosFreeQitem(void *param) { if (param == NULL) return; - uTrace("item:%p is freed", param); char *temp = (char *)param; temp -= sizeof(STaosQnode); + uTrace("item:%p, node:%p is freed", param, temp); free(temp); } diff --git a/src/util/src/tskiplist.c b/src/util/src/tskiplist.c index f3c0babe6b..aacc4a5487 100644 --- a/src/util/src/tskiplist.c +++ b/src/util/src/tskiplist.c @@ -5,6 +5,7 @@ * it under the terms of the GNU Affero General Public License, version 3 * or later ("AGPL"), as published by the Free Software Foundation. * + * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. @@ -238,7 +239,7 @@ SSkipListNode *tSkipListPut(SSkipList *pSkipList, SSkipListNode *pNode) { // if the new key is greater than the maximum key of skip list, push back this node at the end of skip list char *newDatakey = SL_GET_NODE_KEY(pSkipList, pNode); - if (pSkipList->size == 0 || pSkipList->comparFn(pSkipList->lastKey, newDatakey) < 0) { + if (pSkipList->size == 0 || pSkipList->comparFn(SL_GET_SL_MAX_KEY(pSkipList), newDatakey) < 0) { return tSkipListPushBack(pSkipList, pNode); } @@ -498,7 +499,7 @@ void tSkipListRemoveNode(SSkipList *pSkipList, SSkipListNode *pNode) { if (pSkipList->lock) { pthread_rwlock_wrlock(pSkipList->lock); } - + for (int32_t j = level - 1; j >= 0; --j) { SSkipListNode* prev = SL_GET_BACKWARD_POINTER(pNode, j); SSkipListNode* next = SL_GET_FORWARD_POINTER(pNode, j); @@ -699,8 +700,6 @@ SSkipListNode* tSkipListPushBack(SSkipList *pSkipList, SSkipListNode *pNode) { SL_GET_BACKWARD_POINTER(pSkipList->pTail, i) = pNode; } - pSkipList->lastKey = SL_GET_NODE_KEY(pSkipList, pNode); - atomic_add_fetch_32(&pSkipList->size, 1); if (pSkipList->lock) { pthread_rwlock_unlock(pSkipList->lock); diff --git a/src/util/src/tsocket.c b/src/util/src/tsocket.c index bd2606d5fb..b225dfa36a 100644 --- a/src/util/src/tsocket.c +++ b/src/util/src/tsocket.c @@ -383,10 +383,15 @@ int taosOpenTcpServerSocket(uint32_t ip, uint16_t port) { return -1; } - if (taosKeepTcpAlive(sockFd) < 0) return -1; + if (taosKeepTcpAlive(sockFd) < 0) { + uError("failed to set tcp server keep-alive option, 0x%x:%hu(%s)", ip, port, strerror(errno)); + close(sockFd); + return -1; + } if (listen(sockFd, 10) < 0) { uError("listen tcp server socket failed, 0x%x:%hu(%s)", ip, port, strerror(errno)); + close(sockFd); return -1; } diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c index 0ee4ef4837..18c9ebf2e1 100644 --- a/src/vnode/src/vnodeMain.c +++ b/src/vnode/src/vnodeMain.c @@ -161,11 +161,17 @@ int32_t vnodeDrop(int32_t vgId) { int32_t vnodeAlter(void *param, SMDCreateVnodeMsg *pVnodeCfg) { SVnodeObj *pVnode = param; - if (pVnode->status != TAOS_VN_STATUS_READY) - return TSDB_CODE_VND_INVALID_STATUS; + // vnode in non-ready state and still needs to return success instead of TSDB_CODE_VND_INVALID_STATUS + // cfgVersion can be corrected by status msg + if (pVnode->status != TAOS_VN_STATUS_READY) { + vDebug("vgId:%d, vnode is not ready, do alter operation later", pVnode->vgId); + return TSDB_CODE_SUCCESS; + } - if (pVnode->syncCfg.replica > 1 && pVnode->role == TAOS_SYNC_ROLE_UNSYNCED) - return TSDB_CODE_VND_NOT_SYNCED; + // the vnode may always fail to synchronize because of it in low cfgVersion + // so cannot use the following codes + // if (pVnode->syncCfg.replica > 1 && pVnode->role == TAOS_SYNC_ROLE_UNSYNCED) + // return TSDB_CODE_VND_NOT_SYNCED; pVnode->status = TAOS_VN_STATUS_UPDATING; @@ -415,13 +421,19 @@ void *vnodeGetWal(void *pVnode) { } static void vnodeBuildVloadMsg(SVnodeObj *pVnode, SDMStatusMsg *pStatus) { + int64_t totalStorage = 0; + int64_t compStorage = 0; + int64_t pointsWritten = 0; + if (pVnode->status != TAOS_VN_STATUS_READY) return; if (pStatus->openVnodes >= TSDB_MAX_VNODES) return; - if (pVnode->syncCfg.replica > 1 && pVnode->role == TAOS_SYNC_ROLE_UNSYNCED) return; - if (pVnode->tsdb == NULL) return; - int64_t totalStorage, compStorage, pointsWritten = 0; - tsdbReportStat(pVnode->tsdb, &pointsWritten, &totalStorage, &compStorage); + // still need report status when unsynced + if (pVnode->syncCfg.replica > 1 && pVnode->role == TAOS_SYNC_ROLE_UNSYNCED) { + } else if (pVnode->tsdb == NULL) { + } else { + tsdbReportStat(pVnode->tsdb, &pointsWritten, &totalStorage, &compStorage); + } SVnodeLoad *pLoad = &pStatus->load[pStatus->openVnodes++]; pLoad->vgId = htonl(pVnode->vgId); diff --git a/src/vnode/src/vnodeRead.c b/src/vnode/src/vnodeRead.c index cbce3f6cf2..f054ae3904 100644 --- a/src/vnode/src/vnodeRead.c +++ b/src/vnode/src/vnodeRead.c @@ -61,7 +61,7 @@ int32_t vnodeProcessRead(void *param, SReadMsg *pReadMsg) { } static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { - void * pCont = pReadMsg->pCont; + void *pCont = pReadMsg->pCont; int32_t contLen = pReadMsg->contLen; SRspRet *pRet = &pReadMsg->rspRet; @@ -74,19 +74,14 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { killQueryMsg->free = htons(killQueryMsg->free); killQueryMsg->qhandle = htobe64(killQueryMsg->qhandle); - void* handle = NULL; - if ((void**) killQueryMsg->qhandle != NULL) { - handle = *(void**) killQueryMsg->qhandle; - } - - vWarn("QInfo:%p connection %p broken, kill query", handle, pReadMsg->rpcMsg.handle); + vWarn("QInfo:%p connection %p broken, kill query", (void*) killQueryMsg->qhandle, pReadMsg->rpcMsg.handle); assert(pReadMsg->rpcMsg.contLen > 0 && killQueryMsg->free == 1); - void** qhandle = qAcquireQInfo(pVnode->qMgmt, (void**) killQueryMsg->qhandle); + void** qhandle = qAcquireQInfo(pVnode->qMgmt, (uint64_t) killQueryMsg->qhandle); if (qhandle == NULL || *qhandle == NULL) { vWarn("QInfo:%p invalid qhandle, no matched query handle, conn:%p", (void*) killQueryMsg->qhandle, pReadMsg->rpcMsg.handle); } else { - assert(qhandle == (void**) killQueryMsg->qhandle); + assert(*qhandle == (void*) killQueryMsg->qhandle); qReleaseQInfo(pVnode->qMgmt, (void**) &qhandle, true); } @@ -94,10 +89,10 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { } int32_t code = TSDB_CODE_SUCCESS; - qinfo_t pQInfo = NULL; void** handle = NULL; if (contLen != 0) { + qinfo_t pQInfo = NULL; code = qCreateQueryInfo(pVnode->tsdb, pVnode->vgId, pQueryTableMsg, pVnode, NULL, &pQInfo); SQueryTableRsp *pRsp = (SQueryTableRsp *) rpcMallocCont(sizeof(SQueryTableRsp)); @@ -110,48 +105,49 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { // current connect is broken if (code == TSDB_CODE_SUCCESS) { - handle = qRegisterQInfo(pVnode->qMgmt, pQInfo); + handle = qRegisterQInfo(pVnode->qMgmt, (uint64_t) pQInfo); if (handle == NULL) { // failed to register qhandle pRsp->code = TSDB_CODE_QRY_INVALID_QHANDLE; qKillQuery(pQInfo); qKillQuery(pQInfo); - pQInfo = NULL; } else { assert(*handle == pQInfo); - pRsp->qhandle = htobe64((uint64_t) (handle)); + pRsp->qhandle = htobe64((uint64_t) pQInfo); } - if (handle != NULL && vnodeNotifyCurrentQhandle(pReadMsg->rpcMsg.handle, handle, pVnode->vgId) != TSDB_CODE_SUCCESS) { - vError("vgId:%d, QInfo:%p, query discarded since link is broken, %p", pVnode->vgId, pQInfo, pReadMsg->rpcMsg.handle); + pQInfo = NULL; + if (handle != NULL && vnodeNotifyCurrentQhandle(pReadMsg->rpcMsg.handle, *handle, pVnode->vgId) != TSDB_CODE_SUCCESS) { + vError("vgId:%d, QInfo:%p, query discarded since link is broken, %p", pVnode->vgId, *handle, pReadMsg->rpcMsg.handle); pRsp->code = TSDB_CODE_RPC_NETWORK_UNAVAIL; // NOTE: there two refcount, needs to kill twice // query has not been put into qhandle pool, kill it directly. - qKillQuery(pQInfo); + qKillQuery(*handle); qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true); return pRsp->code; } } else { assert(pQInfo == NULL); } - + if (handle != NULL) { + dnodePutItemIntoReadQueue(pVnode, *handle); + qReleaseQInfo(pVnode->qMgmt, (void**) &handle, false); + } vDebug("vgId:%d, QInfo:%p, dnode query msg disposed", vgId, pQInfo); } else { assert(pCont != NULL); - pQInfo = *(void**)(pCont); - handle = pCont; - code = TSDB_CODE_VND_ACTION_IN_PROGRESS; - - vDebug("vgId:%d, QInfo:%p, dnode query msg in progress", pVnode->vgId, pQInfo); - } - - if (pQInfo != NULL) { - qTableQuery(pQInfo); // do execute query - assert(handle != NULL); + handle = qAcquireQInfo(pVnode->qMgmt, (uint64_t) pCont); + if (handle == NULL) { + vWarn("QInfo:%p invalid qhandle in continuing exec query, conn:%p", (void*) pCont, pReadMsg->rpcMsg.handle); + code = TSDB_CODE_QRY_INVALID_QHANDLE; + } else { + vDebug("vgId:%d, QInfo:%p, dnode query msg in progress", pVnode->vgId, (void*) pCont); + code = TSDB_CODE_VND_ACTION_IN_PROGRESS; + qTableQuery(*handle); // do execute query + } qReleaseQInfo(pVnode->qMgmt, (void**) &handle, false); } - return code; } @@ -160,57 +156,64 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { SRspRet *pRet = &pReadMsg->rspRet; SRetrieveTableMsg *pRetrieve = pCont; - void **pQInfo = (void*) htobe64(pRetrieve->qhandle); + pRetrieve->qhandle = htobe64(pRetrieve->qhandle); pRetrieve->free = htons(pRetrieve->free); - vDebug("vgId:%d, QInfo:%p, retrieve msg is disposed", pVnode->vgId, *pQInfo); + vDebug("vgId:%d, QInfo:%p, retrieve msg is disposed", pVnode->vgId, *(void**) pRetrieve->qhandle); memset(pRet, 0, sizeof(SRspRet)); - int32_t ret = 0; - void** handle = qAcquireQInfo(pVnode->qMgmt, pQInfo); - if (handle == NULL || handle != pQInfo) { - ret = TSDB_CODE_QRY_INVALID_QHANDLE; + int32_t code = TSDB_CODE_SUCCESS; + void** handle = qAcquireQInfo(pVnode->qMgmt, pRetrieve->qhandle); + if (handle == NULL || (*handle) != (void*) pRetrieve->qhandle) { + code = TSDB_CODE_QRY_INVALID_QHANDLE; + vDebug("vgId:%d, invalid qhandle in fetch result, QInfo:%p", pVnode->vgId, (void*) pRetrieve->qhandle); + + pRet->rsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp)); + pRet->len = sizeof(SRetrieveTableRsp); + + memset(pRet->rsp, 0, sizeof(SRetrieveTableRsp)); + SRetrieveTableRsp* pRsp = pRet->rsp; + pRsp->numOfRows = 0; + pRsp->useconds = 0; + pRsp->completed = true; + + return code; } if (pRetrieve->free == 1) { - if (ret == TSDB_CODE_SUCCESS) { - vDebug("vgId:%d, QInfo:%p, retrieve msg received to kill query and free qhandle", pVnode->vgId, pQInfo); - qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true); + vDebug("vgId:%d, QInfo:%p, retrieve msg received to kill query and free qhandle", pVnode->vgId, *handle); + qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true); - pRet->rsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp)); - pRet->len = sizeof(SRetrieveTableRsp); + pRet->rsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp)); + pRet->len = sizeof(SRetrieveTableRsp); - memset(pRet->rsp, 0, sizeof(SRetrieveTableRsp)); - SRetrieveTableRsp* pRsp = pRet->rsp; - pRsp->numOfRows = 0; - pRsp->completed = true; - pRsp->useconds = 0; - } else { // todo handle error - qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true); - } - return ret; + memset(pRet->rsp, 0, sizeof(SRetrieveTableRsp)); + SRetrieveTableRsp* pRsp = pRet->rsp; + pRsp->numOfRows = 0; + pRsp->completed = true; + pRsp->useconds = 0; + + return code; } - int32_t code = qRetrieveQueryResultInfo(*pQInfo); - if (code != TSDB_CODE_SUCCESS || ret != TSDB_CODE_SUCCESS) { - //TODO + bool freeHandle = true; + code = qRetrieveQueryResultInfo(*handle); + if (code != TSDB_CODE_SUCCESS) { + //TODO handle malloc failure pRet->rsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp)); memset(pRet->rsp, 0, sizeof(SRetrieveTableRsp)); - - } else { - // todo check code and handle error in build result set - code = qDumpRetrieveResult(*pQInfo, (SRetrieveTableRsp **)&pRet->rsp, &pRet->len); - - if (qHasMoreResultsToRetrieve(*handle)) { - dnodePutItemIntoReadQueue(pVnode, handle); - pRet->qhandle = handle; - code = TSDB_CODE_SUCCESS; - } else { // no further execution invoked, release the ref to vnode - qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true); + } else { // if failed to dump result, free qhandle immediately + if ((code = qDumpRetrieveResult(*handle, (SRetrieveTableRsp **)&pRet->rsp, &pRet->len)) == TSDB_CODE_SUCCESS) { + if (qHasMoreResultsToRetrieve(*handle)) { + dnodePutItemIntoReadQueue(pVnode, *handle); + pRet->qhandle = *handle; + freeHandle = false; + } } } + qReleaseQInfo(pVnode->qMgmt, (void**) &handle, freeHandle); return code; } diff --git a/src/vnode/src/vnodeWrite.c b/src/vnode/src/vnodeWrite.c index dad079c4b8..5ed5e747f2 100644 --- a/src/vnode/src/vnodeWrite.c +++ b/src/vnode/src/vnodeWrite.c @@ -49,19 +49,26 @@ int32_t vnodeProcessWrite(void *param1, int qtype, void *param2, void *item) { SVnodeObj *pVnode = (SVnodeObj *)param1; SWalHead *pHead = param2; - if (vnodeProcessWriteMsgFp[pHead->msgType] == NULL) + if (vnodeProcessWriteMsgFp[pHead->msgType] == NULL) { + vDebug("vgId:%d, msgType:%s not processed, no handle", pVnode->vgId, taosMsg[pHead->msgType]); return TSDB_CODE_VND_MSG_NOT_PROCESSED; + } if (!(pVnode->accessState & TSDB_VN_WRITE_ACCCESS)) { + vDebug("vgId:%d, msgType:%s not processed, no write auth", pVnode->vgId, taosMsg[pHead->msgType]); return TSDB_CODE_VND_NO_WRITE_AUTH; } if (pHead->version == 0) { // from client or CQ - if (pVnode->status != TAOS_VN_STATUS_READY) + if (pVnode->status != TAOS_VN_STATUS_READY) { + vDebug("vgId:%d, msgType:%s not processed, vnode status is %d", pVnode->vgId, taosMsg[pHead->msgType], pVnode->status); return TSDB_CODE_VND_INVALID_STATUS; // it may be in deleting or closing state + } - if (pVnode->syncCfg.replica > 1 && pVnode->role != TAOS_SYNC_ROLE_MASTER) + if (pVnode->syncCfg.replica > 1 && pVnode->role != TAOS_SYNC_ROLE_MASTER) { + vDebug("vgId:%d, msgType:%s not processed, replica:%d role:%d", pVnode->vgId, taosMsg[pHead->msgType], pVnode->syncCfg.replica, pVnode->role); return TSDB_CODE_RPC_NOT_READY; + } // assign version pVnode->version++; diff --git a/tests/pytest/alter/alter_table_crash.py b/tests/pytest/alter/alter_table_crash.py index aefe9ff26e..d1af022e35 100644 --- a/tests/pytest/alter/alter_table_crash.py +++ b/tests/pytest/alter/alter_table_crash.py @@ -78,5 +78,6 @@ class TDTestCase: tdSql.close() tdLog.success("%s successfully executed" % __file__) + tdCases.addWindows(__file__, TDTestCase()) tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/client/client.py b/tests/pytest/client/client.py index 21df7e6a86..6ac12c6795 100644 --- a/tests/pytest/client/client.py +++ b/tests/pytest/client/client.py @@ -40,6 +40,11 @@ class TDTestCase: ret = tdSql.query('select server_status() as result') tdSql.checkData(0, 0, 1) + ret = tdSql.query('show dnodes') + + ret = tdSql.execute('alter dnode "%s" debugFlag 135' % tdSql.getData(0,1)) + tdLog.info('alter dnode "%s" debugFlag 135 -> ret: %d' % (tdSql.getData(0, 1), ret)) + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/pytest/crash_gen.py b/tests/pytest/crash_gen.py index cc41fd5e7d..53957fa96a 100755 --- a/tests/pytest/crash_gen.py +++ b/tests/pytest/crash_gen.py @@ -1,4 +1,4 @@ -#!/usr/bin/python3.7 +#-----!/usr/bin/python3.7 ################################################################### # Copyright (c) 2016 by TAOS Technologies, Inc. # All rights reserved. @@ -15,6 +15,8 @@ from __future__ import annotations # For type hinting before definition, ref: h import sys import os +import io +import signal import traceback # Require Python 3 if sys.version_info[0] < 3: @@ -23,6 +25,7 @@ if sys.version_info[0] < 3: import getopt import argparse import copy +import requests import threading import random @@ -30,10 +33,14 @@ import time import logging import datetime import textwrap +import requests +from requests.auth import HTTPBasicAuth from typing import List from typing import Dict from typing import Set +from typing import IO +from queue import Queue, Empty from util.log import * from util.dnodes import * @@ -76,7 +83,10 @@ class WorkerThread: # Let us have a DB connection of our own if ( gConfig.per_thread_db_connection ): # type: ignore - self._dbConn = DbConn() + # print("connector_type = {}".format(gConfig.connector_type)) + self._dbConn = DbConn.createNative() if (gConfig.connector_type == 'native') else DbConn.createRest() + + self._dbInUse = False # if "use db" was executed already def logDebug(self, msg): logger.debug(" TRD[{}] {}".format(self._tid, msg)) @@ -84,7 +94,14 @@ class WorkerThread: def logInfo(self, msg): logger.info(" TRD[{}] {}".format(self._tid, msg)) - + def dbInUse(self): + return self._dbInUse + + def useDb(self): + if ( not self._dbInUse ): + self.execSql("use db") + self._dbInUse = True + def getTaskExecutor(self): return self._tc.getTaskExecutor() @@ -97,6 +114,7 @@ class WorkerThread: logger.info("Starting to run thread: {}".format(self._tid)) if ( gConfig.per_thread_db_connection ): # type: ignore + logger.debug("Worker thread openning database connection") self._dbConn.open() self._doTaskLoop() @@ -118,12 +136,17 @@ class WorkerThread: logger.debug("[TRD] Thread Coordinator not running any more, worker thread now stopping...") break + # Fetch a task from the Thread Coordinator logger.debug("[TRD] Worker thread [{}] about to fetch task".format(self._tid)) task = tc.fetchTask() + + # Execute such a task logger.debug("[TRD] Worker thread [{}] about to execute task: {}".format(self._tid, task.__class__.__name__)) task.execute(self) tc.saveExecutedTask(task) logger.debug("[TRD] Worker thread [{}] finished executing task".format(self._tid)) + + self._dbInUse = False # there may be changes between steps def verifyThreadSelf(self): # ensure we are called by this own thread if ( threading.get_ident() != self._thread.ident ): @@ -163,6 +186,18 @@ class WorkerThread: else: return self._tc.getDbManager().getDbConn().execute(sql) + def querySql(self, sql): # TODO: expose DbConn directly + if ( gConfig.per_thread_db_connection ): + return self._dbConn.query(sql) + else: + return self._tc.getDbManager().getDbConn().query(sql) + + def getQueryResult(self): + if ( gConfig.per_thread_db_connection ): + return self._dbConn.getQueryResult() + else: + return self._tc.getDbManager().getDbConn().getQueryResult() + def getDbConn(self): if ( gConfig.per_thread_db_connection ): return self._dbConn @@ -175,8 +210,9 @@ class WorkerThread: # else: # return self._tc.getDbState().getDbConn().query(sql) +# The coordinator of all worker threads, mostly running in main thread class ThreadCoordinator: - def __init__(self, pool, dbManager): + def __init__(self, pool: ThreadPool, dbManager): self._curStep = -1 # first step is 0 self._pool = pool # self._wd = wd @@ -187,6 +223,7 @@ class ThreadCoordinator: self._stepBarrier = threading.Barrier(self._pool.numThreads + 1) # one barrier for all threads self._execStats = ExecutionStats() + self._runStatus = MainExec.STATUS_RUNNING def getTaskExecutor(self): return self._te @@ -197,6 +234,10 @@ class ThreadCoordinator: def crossStepBarrier(self): self._stepBarrier.wait() + def requestToStop(self): + self._runStatus = MainExec.STATUS_STOPPING + self._execStats.registerFailure("User Interruption") + def run(self): self._pool.createAndStartThreads(self) @@ -204,48 +245,73 @@ class ThreadCoordinator: self._curStep = -1 # not started yet maxSteps = gConfig.max_steps # type: ignore self._execStats.startExec() # start the stop watch - failed = False - while(self._curStep < maxSteps-1 and not failed): # maxStep==10, last curStep should be 9 + transitionFailed = False + hasAbortedTask = False + while(self._curStep < maxSteps-1 and + (not transitionFailed) and + (self._runStatus==MainExec.STATUS_RUNNING) and + (not hasAbortedTask)): # maxStep==10, last curStep should be 9 + if not gConfig.debug: print(".", end="", flush=True) # print this only if we are not in debug mode logger.debug("[TRD] Main thread going to sleep") - # Now ready to enter a step + # Now main thread (that's us) is ready to enter a step self.crossStepBarrier() # let other threads go past the pool barrier, but wait at the thread gate self._stepBarrier.reset() # Other worker threads should now be at the "gate" # At this point, all threads should be pass the overall "barrier" and before the per-thread "gate" - try: - self._dbManager.getStateMachine().transition(self._executedTasks) # at end of step, transiton the DB state - except taos.error.ProgrammingError as err: - if ( err.msg == 'network unavailable' ): # broken DB connection - logger.info("DB connection broken, execution failed") - traceback.print_stack() - failed = True - self._te = None # Not running any more - self._execStats.registerFailure("Broken DB Connection") - # continue # don't do that, need to tap all threads at end, and maybe signal them to stop - else: - raise - finally: - pass + # We use this period to do house keeping work, when all worker threads are QUIET. + hasAbortedTask = False + for task in self._executedTasks : + if task.isAborted() : + print("Task aborted: {}".format(task)) + hasAbortedTask = True + break + + if hasAbortedTask : # do transition only if tasks are error free + self._execStats.registerFailure("Aborted Task Encountered") + else: + try: + sm = self._dbManager.getStateMachine() + logger.debug("[STT] starting transitions") + sm.transition(self._executedTasks) # at end of step, transiton the DB state + logger.debug("[STT] transition ended") + # Due to limitation (or maybe not) of the Python library, we cannot share connections across threads + if sm.hasDatabase() : + for t in self._pool.threadList: + logger.debug("[DB] use db for all worker threads") + t.useDb() + # t.execSql("use db") # main thread executing "use db" on behalf of every worker thread + except taos.error.ProgrammingError as err: + if ( err.msg == 'network unavailable' ): # broken DB connection + logger.info("DB connection broken, execution failed") + traceback.print_stack() + transitionFailed = True + self._te = None # Not running any more + self._execStats.registerFailure("Broken DB Connection") + # continue # don't do that, need to tap all threads at end, and maybe signal them to stop + else: + raise + # finally: + # pass self.resetExecutedTasks() # clear the tasks after we are done # Get ready for next step logger.debug("<-- Step {} finished".format(self._curStep)) self._curStep += 1 # we are about to get into next step. TODO: race condition here! - logger.debug("\r\n--> Step {} starts with main thread waking up".format(self._curStep)) # Now not all threads had time to go to sleep + logger.debug("\r\n\n--> Step {} starts with main thread waking up".format(self._curStep)) # Now not all threads had time to go to sleep # A new TE for the new step - if not failed: # only if not failed + if not transitionFailed: # only if not failed self._te = TaskExecutor(self._curStep) logger.debug("[TRD] Main thread waking up at step {}, tapping worker threads".format(self._curStep)) # Now not all threads had time to go to sleep - self.tapAllThreads() + self.tapAllThreads() # Worker threads will wake up at this point, and each execute it's own task logger.debug("Main thread ready to finish up...") - if not failed: # only in regular situations + if not transitionFailed: # only in regular situations self.crossStepBarrier() # Cross it one last time, after all threads finish self._stepBarrier.reset() logger.debug("Main thread in exclusive zone...") @@ -255,11 +321,17 @@ class ThreadCoordinator: logger.debug("Main thread joining all threads") self._pool.joinAll() # Get all threads to finish - logger.info("All worker thread finished") + logger.info("\nAll worker threads finished") self._execStats.endExec() - def logStats(self): - self._execStats.logStats() + def printStats(self): + self._execStats.printStats() + + def isFailed(self): + return self._execStats.isFailed() + + def getExecStats(self): + return self._execStats def tapAllThreads(self): # in a deterministic manner wakeSeq = [] @@ -268,7 +340,7 @@ class ThreadCoordinator: wakeSeq.append(i) else: wakeSeq.insert(0, i) - logger.debug("[TRD] Main thread waking up worker thread: {}".format(str(wakeSeq))) + logger.debug("[TRD] Main thread waking up worker threads: {}".format(str(wakeSeq))) # TODO: set dice seed to a deterministic value for i in wakeSeq: self._pool.threadList[i].tapStepGate() # TODO: maybe a bit too deep?! @@ -306,7 +378,7 @@ class ThreadPool: self.maxSteps = maxSteps # Internal class variables self.curStep = 0 - self.threadList = [] + self.threadList = [] # type: List[WorkerThread] # starting to run all the threads, in locking steps def createAndStartThreads(self, tc: ThreadCoordinator): @@ -397,26 +469,39 @@ class LinearQueue(): return ret class DbConn: + TYPE_NATIVE = "native-c" + TYPE_REST = "rest-api" + TYPE_INVALID = "invalid" + + @classmethod + def create(cls, connType): + if connType == cls.TYPE_NATIVE: + return DbConnNative() + elif connType == cls.TYPE_REST: + return DbConnRest() + else: + raise RuntimeError("Unexpected connection type: {}".format(connType)) + + @classmethod + def createNative(cls): + return cls.create(cls.TYPE_NATIVE) + + @classmethod + def createRest(cls): + return cls.create(cls.TYPE_REST) + def __init__(self): - self._conn = None - self._cursor = None self.isOpen = False - - def open(self): # Open connection + self._type = self.TYPE_INVALID + + def open(self): if ( self.isOpen ): raise RuntimeError("Cannot re-open an existing DB connection") - cfgPath = "../../build/test/cfg" - self._conn = taos.connect(host="127.0.0.1", config=cfgPath) # TODO: make configurable - self._cursor = self._conn.cursor() + # below implemented by child classes + self.openByType() - # Get the connection/cursor ready - self._cursor.execute('reset query cache') - # self._cursor.execute('use db') # note we do this in _findCurrenState - - # Open connection - self._tdSql = TDSql() - self._tdSql.init(self._cursor) + logger.debug("[DB] data connection opened, type = {}".format(self._type)) self.isOpen = True def resetDb(self): # reset the whole database, etc. @@ -424,17 +509,128 @@ class DbConn: raise RuntimeError("Cannot reset database until connection is open") # self._tdSql.prepare() # Recreate database, etc. - self._cursor.execute('drop database if exists db') + self.execute('drop database if exists db') logger.debug("Resetting DB, dropped database") # self._cursor.execute('create database db') # self._cursor.execute('use db') - # tdSql.execute('show databases') + def queryScalar(self, sql) -> int : + return self._queryAny(sql) + + def queryString(self, sql) -> str : + return self._queryAny(sql) + + def _queryAny(self, sql) : # actual query result as an int + if ( not self.isOpen ): + raise RuntimeError("Cannot query database until connection is open") + nRows = self.query(sql) + if nRows != 1 : + raise RuntimeError("Unexpected result for query: {}, rows = {}".format(sql, nRows)) + if self.getResultRows() != 1 or self.getResultCols() != 1: + raise RuntimeError("Unexpected result set for query: {}".format(sql)) + return self.getQueryResult()[0][0] + + def execute(self, sql): + raise RuntimeError("Unexpected execution, should be overriden") + def openByType(self): + raise RuntimeError("Unexpected execution, should be overriden") + def getQueryResult(self): + raise RuntimeError("Unexpected execution, should be overriden") + def getResultRows(self): + raise RuntimeError("Unexpected execution, should be overriden") + def getResultCols(self): + raise RuntimeError("Unexpected execution, should be overriden") + +# Sample: curl -u root:taosdata -d "show databases" localhost:6020/rest/sql +class DbConnRest(DbConn): + def __init__(self): + super().__init__() + self._type = self.TYPE_REST + self._url = "http://localhost:6020/rest/sql" # fixed for now + self._result = None + + def openByType(self): # Open connection + pass # do nothing, always open + + def close(self): + if ( not self.isOpen ): + raise RuntimeError("Cannot clean up database until connection is open") + # Do nothing for REST + logger.debug("[DB] REST Database connection closed") + self.isOpen = False + + def _doSql(self, sql): + r = requests.post(self._url, + data = sql, + auth = HTTPBasicAuth('root', 'taosdata')) + rj = r.json() + # Sanity check for the "Json Result" + if (not 'status' in rj): + raise RuntimeError("No status in REST response") + + if rj['status'] == 'error': # clearly reported error + if (not 'code' in rj): # error without code + raise RuntimeError("REST error return without code") + errno = rj['code'] # May need to massage this in the future + # print("Raising programming error with REST return: {}".format(rj)) + raise taos.error.ProgrammingError(rj['desc'], errno) # todo: check existance of 'desc' + + if rj['status'] != 'succ': # better be this + raise RuntimeError("Unexpected REST return status: {}".format(rj['status'])) + + nRows = rj['rows'] if ('rows' in rj) else 0 + self._result = rj + return nRows + + def execute(self, sql): + if ( not self.isOpen ): + raise RuntimeError("Cannot execute database commands until connection is open") + logger.debug("[SQL-REST] Executing SQL: {}".format(sql)) + nRows = self._doSql(sql) + logger.debug("[SQL-REST] Execution Result, nRows = {}, SQL = {}".format(nRows, sql)) + return nRows + + def query(self, sql) : # return rows affected + return self.execute(sql) + + def getQueryResult(self): + return self._result['data'] + + def getResultRows(self): + print(self._result) + raise RuntimeError("TBD") + # return self._tdSql.queryRows + + def getResultCols(self): + print(self._result) + raise RuntimeError("TBD") + +class DbConnNative(DbConn): + def __init__(self): + super().__init__() + self._type = self.TYPE_REST + self._conn = None + self._cursor = None + + def openByType(self): # Open connection + cfgPath = "../../build/test/cfg" + self._conn = taos.connect(host="127.0.0.1", config=cfgPath) # TODO: make configurable + self._cursor = self._conn.cursor() + + # Get the connection/cursor ready + self._cursor.execute('reset query cache') + # self._cursor.execute('use db') # do this at the beginning of every step + + # Open connection + self._tdSql = TDSql() + self._tdSql.init(self._cursor) + def close(self): if ( not self.isOpen ): raise RuntimeError("Cannot clean up database until connection is open") self._tdSql.close() + logger.debug("[DB] Database connection closed") self.isOpen = False def execute(self, sql): @@ -450,29 +646,19 @@ class DbConn: raise RuntimeError("Cannot query database until connection is open") logger.debug("[SQL] Executing SQL: {}".format(sql)) nRows = self._tdSql.query(sql) - logger.debug("[SQL] Execution Result, nRows = {}, SQL = {}".format(nRows, sql)) + logger.debug("[SQL] Query Result, nRows = {}, SQL = {}".format(nRows, sql)) return nRows # results are in: return self._tdSql.queryResult def getQueryResult(self): return self._tdSql.queryResult - def _queryAny(self, sql) : # actual query result as an int - if ( not self.isOpen ): - raise RuntimeError("Cannot query database until connection is open") - tSql = self._tdSql - nRows = tSql.query(sql) - if nRows != 1 : - raise RuntimeError("Unexpected result for query: {}, rows = {}".format(sql, nRows)) - if tSql.queryRows != 1 or tSql.queryCols != 1: - raise RuntimeError("Unexpected result set for query: {}".format(sql)) - return tSql.queryResult[0][0] + def getResultRows(self): + return self._tdSql.queryRows - def queryScalar(self, sql) -> int : - return self._queryAny(sql) + def getResultCols(self): + return self._tdSql.queryCols - def queryString(self, sql) -> str : - return self._queryAny(sql) class AnyState: STATE_INVALID = -1 @@ -620,10 +806,10 @@ class StateDbOnly(AnyState): # self.assertHasTask(tasks, DropDbTask) # implied by hasSuccess # self.assertAtMostOneSuccess(tasks, DropDbTask) # self._state = self.STATE_EMPTY - if ( self.hasSuccess(tasks, TaskCreateSuperTable) ): # did not drop db, create table success - # self.assertHasTask(tasks, CreateFixedTableTask) # tried to create table - if ( not self.hasTask(tasks, TaskDropSuperTable) ): - self.assertAtMostOneSuccess(tasks, TaskCreateSuperTable) # at most 1 attempt is successful, if we don't drop anything + # if ( self.hasSuccess(tasks, TaskCreateSuperTable) ): # did not drop db, create table success + # # self.assertHasTask(tasks, CreateFixedTableTask) # tried to create table + # if ( not self.hasTask(tasks, TaskDropSuperTable) ): + # self.assertAtMostOneSuccess(tasks, TaskCreateSuperTable) # at most 1 attempt is successful, if we don't drop anything # self.assertNoTask(tasks, DropDbTask) # should have have tried # if ( not self.hasSuccess(tasks, AddFixedDataTask) ): # just created table, no data yet # # can't say there's add-data attempts, since they may all fail @@ -648,7 +834,9 @@ class StateSuperTableOnly(AnyState): def verifyTasksToState(self, tasks, newState): if ( self.hasSuccess(tasks, TaskDropSuperTable) ): # we are able to drop the table - self.assertAtMostOneSuccess(tasks, TaskDropSuperTable) + #self.assertAtMostOneSuccess(tasks, TaskDropSuperTable) + self.hasSuccess(tasks, TaskCreateSuperTable) # we must have had recreted it + # self._state = self.STATE_DB_ONLY # elif ( self.hasSuccess(tasks, AddFixedDataTask) ): # no success dropping the table, but added data # self.assertNoTask(tasks, DropFixedTableTask) # not true in massively parrallel cases @@ -680,18 +868,19 @@ class StateHasData(AnyState): self.assertNoTask(tasks, TaskDropDb) # we must have drop_db task self.hasSuccess(tasks, TaskDropSuperTable) # self.assertAtMostOneSuccess(tasks, DropFixedSuperTableTask) # TODO: dicy - elif ( newState.equals(AnyState.STATE_TABLE_ONLY) ): # data deleted - self.assertNoTask(tasks, TaskDropDb) - self.assertNoTask(tasks, TaskDropSuperTable) - self.assertNoTask(tasks, TaskAddData) + # elif ( newState.equals(AnyState.STATE_TABLE_ONLY) ): # data deleted + # self.assertNoTask(tasks, TaskDropDb) + # self.assertNoTask(tasks, TaskDropSuperTable) + # self.assertNoTask(tasks, TaskAddData) # self.hasSuccess(tasks, DeleteDataTasks) else: # should be STATE_HAS_DATA - self.assertNoTask(tasks, TaskDropDb) + if (not self.hasTask(tasks, TaskCreateDb) ): # only if we didn't create one + self.assertNoTask(tasks, TaskDropDb) # we shouldn't have dropped it if (not self.hasTask(tasks, TaskCreateSuperTable)) : # if we didn't create the table self.assertNoTask(tasks, TaskDropSuperTable) # we should not have a task that drops it # self.assertIfExistThenSuccess(tasks, ReadFixedDataTask) -class StateMechine : +class StateMechine: def __init__(self, dbConn): self._dbConn = dbConn self._curState = self._findCurrentState() # starting state @@ -700,8 +889,17 @@ class StateMechine : def getCurrentState(self): return self._curState + def hasDatabase(self): + return self._curState.canDropDb() # ha, can drop DB means it has one + # May be slow, use cautionsly... def getTaskTypes(self): # those that can run (directly/indirectly) from the current state + def typesToStrings(types): + ss = [] + for t in types: + ss.append(t.__name__) + return ss + allTaskClasses = StateTransitionTask.__subclasses__() # all state transition tasks firstTaskTypes = [] for tc in allTaskClasses: @@ -720,7 +918,7 @@ class StateMechine : if len(taskTypes) <= 0: raise RuntimeError("No suitable task types found for state: {}".format(self._curState)) - logger.debug("[OPS] Tasks found for state {}: {}".format(self._curState, taskTypes)) + logger.debug("[OPS] Tasks found for state {}: {}".format(self._curState, typesToStrings(taskTypes))) return taskTypes def _findCurrentState(self): @@ -730,7 +928,7 @@ class StateMechine : # logger.debug("Found EMPTY state") logger.debug("[STT] empty database found, between {} and {}".format(ts, time.time())) return StateEmpty() - dbc.execute("use db") # did not do this when openning connection + dbc.execute("use db") # did not do this when openning connection, and this is NOT the worker thread, which does this on their own if dbc.query("show tables") == 0 : # no tables # logger.debug("Found DB ONLY state") logger.debug("[STT] DB_ONLY found, between {} and {}".format(ts, time.time())) @@ -746,6 +944,7 @@ class StateMechine : def transition(self, tasks): if ( len(tasks) == 0 ): # before 1st step, or otherwise empty + logger.debug("[STT] Starting State: {}".format(self._curState)) return # do nothing self._dbConn.execute("show dnodes") # this should show up in the server log, separating steps @@ -807,14 +1006,14 @@ class DbManager(): self._lock = threading.RLock() # self.openDbServerConnection() - self._dbConn = DbConn() + self._dbConn = DbConn.createNative() if (gConfig.connector_type=='native') else DbConn.createRest() try: self._dbConn.open() # may throw taos.error.ProgrammingError: disconnected except taos.error.ProgrammingError as err: # print("Error type: {}, msg: {}, value: {}".format(type(err), err.msg, err)) if ( err.msg == 'client disconnected' ): # cannot open DB connection print("Cannot establish DB connection, please re-run script without parameter, and follow the instructions.") - sys.exit() + sys.exit(2) else: raise except: @@ -829,7 +1028,7 @@ class DbManager(): def getDbConn(self): return self._dbConn - def getStateMachine(self): + def getStateMachine(self) -> StateMechine : return self._stateMachine # def getState(self): @@ -869,8 +1068,11 @@ class DbManager(): def getNextTick(self): with self._lock: # prevent duplicate tick - self._lastTick += datetime.timedelta(0, 1) # add one second to it - return self._lastTick + if Dice.throw(10) == 0 : # 1 in 10 chance + return self._lastTick + datetime.timedelta(0, -100) + else: # regular + self._lastTick += datetime.timedelta(0, 1) # add one second to it + return self._lastTick def getNextInt(self): with self._lock: @@ -894,15 +1096,60 @@ class DbManager(): self._dbConn.close() class TaskExecutor(): + class BoundedList: + def __init__(self, size = 10): + self._size = size + self._list = [] + + def add(self, n: int) : + if not self._list: # empty + self._list.append(n) + return + # now we should insert + nItems = len(self._list) + insPos = 0 + for i in range(nItems): + insPos = i + if n <= self._list[i] : # smaller than this item, time to insert + break # found the insertion point + insPos += 1 # insert to the right + + if insPos == 0 : # except for the 1st item, # TODO: elimiate first item as gating item + return # do nothing + + # print("Inserting at postion {}, value: {}".format(insPos, n)) + self._list.insert(insPos, n) # insert + + newLen = len(self._list) + if newLen <= self._size : + return # do nothing + elif newLen == (self._size + 1) : + del self._list[0] # remove the first item + else : + raise RuntimeError("Corrupt Bounded List") + + def __str__(self): + return repr(self._list) + + _boundedList = BoundedList() + def __init__(self, curStep): self._curStep = curStep + @classmethod + def getBoundedList(cls): + return cls._boundedList + def getCurStep(self): return self._curStep def execute(self, task: Task, wt: WorkerThread): # execute a task on a thread task.execute(wt) + def recordDataMark(self, n: int): + # print("[{}]".format(n), end="", flush=True) + self._boundedList.add(n) + # def logInfo(self, msg): # logger.info(" T[{}.x]: ".format(self._curStep) + msg) @@ -922,6 +1169,7 @@ class Task(): self._dbManager = dbManager self._workerThread = None self._err = None + self._aborted = False self._curStep = None self._numRows = None # Number of rows affected @@ -930,10 +1178,14 @@ class Task(): # logger.debug("Creating new task {}...".format(self._taskNum)) self._execStats = execStats + self._lastSql = "" # last SQL executed/attempted def isSuccess(self): return self._err == None + def isAborted(self): + return self._aborted + def clone(self): # TODO: why do we need this again? newTask = self.__class__(self._dbManager, self._execStats) return newTask @@ -960,10 +1212,40 @@ class Task(): try: self._executeInternal(te, wt) # TODO: no return value? except taos.error.ProgrammingError as err: - self.logDebug("[=] Taos library exception: errno={:X}, msg: {}".format(err.errno, err)) - self._err = err - except: - self.logDebug("[=] Unexpected exception") + errno2 = err.errno if (err.errno > 0) else 0x80000000 + err.errno # correct error scheme + if ( errno2 in [ + 0x05, # TSDB_CODE_RPC_NOT_READY + 0x200, 0x360, 0x362, 0x36A, 0x36B, 0x36D, 0x381, 0x380, 0x383, 0x503, + 0x510, # vnode not in ready state + 0x600, + 1000 # REST catch-all error + ]) : # allowed errors + self.logDebug("[=] Acceptable Taos library exception: errno=0x{:X}, msg: {}, SQL: {}".format(errno2, err, self._lastSql)) + print("_", end="", flush=True) + self._err = err + else: + errMsg = "[=] Unexpected Taos library exception: errno=0x{:X}, msg: {}, SQL: {}".format(errno2, err, self._lastSql) + self.logDebug(errMsg) + if gConfig.debug : + # raise # so that we see full stack + traceback.print_exc() + print("\n\n----------------------------\nProgram ABORTED Due to Unexpected TAOS Error: \n\n{}\n".format(errMsg) + + "----------------------------\n") + # sys.exit(-1) + self._err = err + self._aborted = True + except Exception as e : + self.logInfo("Non-TAOS exception encountered") + self._err = e + self._aborted = True + traceback.print_exc() + except BaseException as e : + self.logInfo("Python base exception encountered") + self._err = e + self._aborted = True + traceback.print_exc() + except : + self.logDebug("[=] Unexpected exception, SQL: {}".format(self._lastSql)) raise self._execStats.endTaskType(self.__class__.__name__, self.isSuccess()) @@ -971,8 +1253,21 @@ class Task(): self._execStats.incExecCount(self.__class__.__name__, self.isSuccess()) # TODO: merge with above. def execSql(self, sql): + self._lastSql = sql return self._dbManager.execute(sql) + def execWtSql(self, wt: WorkerThread, sql): # execute an SQL on the worker thread + self._lastSql = sql + return wt.execSql(sql) + + def queryWtSql(self, wt: WorkerThread, sql): # execute an SQL on the worker thread + self._lastSql = sql + return wt.querySql(sql) + + def getQueryResult(self, wt: WorkerThread): # execute an SQL on the worker thread + return wt.getQueryResult() + + class ExecutionStats: def __init__(self): @@ -987,6 +1282,12 @@ class ExecutionStats: self._failed = False self._failureReason = None + def __str__(self): + return "[ExecStats: _failed={}, _failureReason={}".format(self._failed, self._failureReason) + + def isFailed(self): + return self._failed == True + def startExec(self): self._execStartTime = time.time() @@ -1018,7 +1319,7 @@ class ExecutionStats: self._failed = True self._failureReason = reason - def logStats(self): + def printStats(self): logger.info("----------------------------------------------------------------------") logger.info("| Crash_Gen test {}, with the following stats:". format("FAILED (reason: {})".format(self._failureReason) if self._failed else "SUCCEEDED")) @@ -1033,11 +1334,17 @@ class ExecutionStats: logger.info("| Total Task Busy Time (elapsed time when any task is in progress): {:.3f} seconds".format(self._accRunTime)) logger.info("| Average Per-Task Execution Time: {:.3f} seconds".format(self._accRunTime/execTimesAny)) logger.info("| Total Elapsed Time (from wall clock): {:.3f} seconds".format(self._elapsedTime)) + logger.info("| Top numbers written: {}".format(TaskExecutor.getBoundedList())) logger.info("----------------------------------------------------------------------") class StateTransitionTask(Task): + LARGE_NUMBER_OF_TABLES = 35 + SMALL_NUMBER_OF_TABLES = 3 + LARGE_NUMBER_OF_RECORDS = 50 + SMALL_NUMBER_OF_RECORDS = 3 + @classmethod def getInfo(cls): # each sub class should supply their own information raise RuntimeError("Overriding method expected") @@ -1060,6 +1367,10 @@ class StateTransitionTask(Task): # return state.getValue() in cls.getBeginStates() raise RuntimeError("must be overriden") + @classmethod + def getRegTableName(cls, i): + return "db.reg_table_{}".format(i) + def execute(self, wt: WorkerThread): super().execute(wt) @@ -1073,7 +1384,7 @@ class TaskCreateDb(StateTransitionTask): return state.canCreateDb() def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): - wt.execSql("create database db") + self.execWtSql(wt, "create database db") class TaskDropDb(StateTransitionTask): @classmethod @@ -1085,7 +1396,7 @@ class TaskDropDb(StateTransitionTask): return state.canDropDb() def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): - wt.execSql("drop database db") + self.execWtSql(wt, "drop database db") logger.debug("[OPS] database dropped at {}".format(time.time())) class TaskCreateSuperTable(StateTransitionTask): @@ -1098,8 +1409,13 @@ class TaskCreateSuperTable(StateTransitionTask): return state.canCreateFixedSuperTable() def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): - tblName = self._dbManager.getFixedSuperTableName() - wt.execSql("create table db.{} (ts timestamp, speed int) tags (b binary(200), f float) ".format(tblName)) + if not wt.dbInUse(): # no DB yet, to the best of our knowledge + logger.debug("Skipping task, no DB yet") + return + + tblName = self._dbManager.getFixedSuperTableName() + # wt.execSql("use db") # should always be in place + self.execWtSql(wt, "create table db.{} (ts timestamp, speed int) tags (b binary(200), f float) ".format(tblName)) # No need to create the regular tables, INSERT will do that automatically @@ -1114,16 +1430,16 @@ class TaskReadData(StateTransitionTask): def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): sTbName = self._dbManager.getFixedSuperTableName() - dbc = wt.getDbConn() - dbc.query("select TBNAME from db.{}".format(sTbName)) # TODO: analyze result set later + self.queryWtSql(wt, "select TBNAME from db.{}".format(sTbName)) # TODO: analyze result set later + if random.randrange(5) == 0 : # 1 in 5 chance, simulate a broken connection. TODO: break connection in all situations - dbc.close() - dbc.open() + wt.getDbConn().close() + wt.getDbConn().open() else: - rTables = dbc.getQueryResult() + rTables = self.getQueryResult(wt) # wt.getDbConn().getQueryResult() # print("rTables[0] = {}, type = {}".format(rTables[0], type(rTables[0]))) for rTbName in rTables : # regular tables - dbc.query("select * from db.{}".format(rTbName[0])) # TODO: check success failure + self.execWtSql(wt, "select * from db.{}".format(rTbName[0])) # tdSql.query(" cars where tbname in ('carzero', 'carone')") @@ -1137,8 +1453,33 @@ class TaskDropSuperTable(StateTransitionTask): return state.canDropFixedSuperTable() def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): - tblName = self._dbManager.getFixedSuperTableName() - wt.execSql("drop table db.{}".format(tblName)) + # 1/2 chance, we'll drop the regular tables one by one, in a randomized sequence + if Dice.throw(2) == 0 : + tblSeq = list(range(2 + (self.LARGE_NUMBER_OF_TABLES if gConfig.larger_data else self.SMALL_NUMBER_OF_TABLES))) + random.shuffle(tblSeq) + tickOutput = False # if we have spitted out a "d" character for "drop regular table" + isSuccess = True + for i in tblSeq: + regTableName = self.getRegTableName(i); # "db.reg_table_{}".format(i) + try: + self.execWtSql(wt, "drop table {}".format(regTableName)) # nRows always 0, like MySQL + except taos.error.ProgrammingError as err: + errno2 = err.errno if (err.errno > 0) else 0x80000000 + err.errno # correcting for strange error number scheme + if ( errno2 in [0x362]) : # mnode invalid table name + isSuccess = False + logger.debug("[DB] Acceptable error when dropping a table") + continue # try to delete next regular table + + if (not tickOutput): + tickOutput = True # Print only one time + if isSuccess : + print("d", end="", flush=True) + else: + print("f", end="", flush=True) + + # Drop the super table itself + tblName = self._dbManager.getFixedSuperTableName() + self.execWtSql(wt, "drop table db.{}".format(tblName)) class TaskAlterTags(StateTransitionTask): @classmethod @@ -1153,20 +1494,18 @@ class TaskAlterTags(StateTransitionTask): tblName = self._dbManager.getFixedSuperTableName() dice = Dice.throw(4) if dice == 0 : - wt.execSql("alter table db.{} add tag extraTag int".format(tblName)) + sql = "alter table db.{} add tag extraTag int".format(tblName) elif dice == 1 : - wt.execSql("alter table db.{} drop tag extraTag".format(tblName)) + sql = "alter table db.{} drop tag extraTag".format(tblName) elif dice == 2 : - wt.execSql("alter table db.{} drop tag newTag".format(tblName)) + sql = "alter table db.{} drop tag newTag".format(tblName) else: # dice == 3 - wt.execSql("alter table db.{} change tag extraTag newTag".format(tblName)) + sql = "alter table db.{} change tag extraTag newTag".format(tblName) + + self.execWtSql(wt, sql) class TaskAddData(StateTransitionTask): activeTable : Set[int] = set() # Track which table is being actively worked on - LARGE_NUMBER_OF_TABLES = 35 - SMALL_NUMBER_OF_TABLES = 3 - LARGE_NUMBER_OF_RECORDS = 50 - SMALL_NUMBER_OF_RECORDS = 3 # We use these two files to record operations to DB, useful for power-off tests fAddLogReady = None @@ -1192,7 +1531,7 @@ class TaskAddData(StateTransitionTask): def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): ds = self._dbManager - wt.execSql("use db") # TODO: seems to be an INSERT bug to require this + # wt.execSql("use db") # TODO: seems to be an INSERT bug to require this tblSeq = list(range(self.LARGE_NUMBER_OF_TABLES if gConfig.larger_data else self.SMALL_NUMBER_OF_TABLES)) random.shuffle(tblSeq) for i in tblSeq: @@ -1202,10 +1541,10 @@ class TaskAddData(StateTransitionTask): print("x", end="", flush=True) else: self.activeTable.add(i) # marking it active - # No need to shuffle data sequence, unless later we decide to do non-increment insertion + # No need to shuffle data sequence, unless later we decide to do non-increment insertion + regTableName = self.getRegTableName(i); # "db.reg_table_{}".format(i) for j in range(self.LARGE_NUMBER_OF_RECORDS if gConfig.larger_data else self.SMALL_NUMBER_OF_RECORDS) : # number of records per table - nextInt = ds.getNextInt() - regTableName = "db.reg_table_{}".format(i) + nextInt = ds.getNextInt() if gConfig.record_ops: self.prepToRecordOps() self.fAddLogReady.write("Ready to write {} to {}\n".format(nextInt, regTableName)) @@ -1216,7 +1555,9 @@ class TaskAddData(StateTransitionTask): ds.getFixedSuperTableName(), ds.getNextBinary(), ds.getNextFloat(), ds.getNextTick(), nextInt) - wt.execSql(sql) + self.execWtSql(wt, sql) + # Successfully wrote the data into the DB, let's record it somehow + te.recordDataMark(nextInt) if gConfig.record_ops: self.fAddLogDone.write("Wrote {} to {}\n".format(nextInt, regTableName)) self.fAddLogDone.flush() @@ -1255,47 +1596,456 @@ class Dice(): raise RuntimeError("Cannot throw dice before seeding it") return random.randrange(start, stop) - -# Anyone needing to carry out work should simply come here -# class WorkDispatcher(): -# def __init__(self, dbState): -# # self.totalNumMethods = 2 -# self.tasks = [ -# # CreateTableTask(dbState), # Obsolete -# # DropTableTask(dbState), -# # AddDataTask(dbState), -# ] - -# def throwDice(self): -# max = len(self.tasks) - 1 -# dRes = random.randint(0, max) -# # logger.debug("Threw the dice in range [{},{}], and got: {}".format(0,max,dRes)) -# return dRes - -# def pickTask(self): -# dice = self.throwDice() -# return self.tasks[dice] - -# def doWork(self, workerThread): -# task = self.pickTask() -# task.execute(workerThread) - class LoggingFilter(logging.Filter): def filter(self, record: logging.LogRecord): if ( record.levelno >= logging.INFO ) : return True # info or above always log - msg = record.msg - # print("type = {}, value={}".format(type(msg), msg)) - # sys.exit() - # Commenting out below to adjust... # if msg.startswith("[TRD]"): # return False return True +class MyLoggingAdapter(logging.LoggerAdapter): + def process(self, msg, kwargs): + return "[{}]{}".format(threading.get_ident() % 10000, msg), kwargs + # return '[%s] %s' % (self.extra['connid'], msg), kwargs + +class SvcManager: + def __init__(self): + print("Starting TDengine Service Manager") + signal.signal(signal.SIGTERM, self.sigIntHandler) + signal.signal(signal.SIGINT, self.sigIntHandler) + signal.signal(signal.SIGUSR1, self.sigUsrHandler) # different handler! + self.inSigHandler = False + # self._status = MainExec.STATUS_RUNNING # set inside _startTaosService() + self.svcMgrThread = None + + def _doMenu(self): + choice = "" + while True: + print("\nInterrupting Service Program, Choose an Action: ") + print("1: Resume") + print("2: Terminate") + print("3: Restart") + # Remember to update the if range below + # print("Enter Choice: ", end="", flush=True) + while choice == "": + choice = input("Enter Choice: ") + if choice != "": + break # done with reading repeated input + if choice in ["1", "2", "3"]: + break # we are done with whole method + print("Invalid choice, please try again.") + choice = "" # reset + return choice + + def sigUsrHandler(self, signalNumber, frame) : + print("Interrupting main thread execution upon SIGUSR1") + if self.inSigHandler : # already + print("Ignoring repeated SIG...") + return # do nothing if it's already not running + self.inSigHandler = True + + choice = self._doMenu() + if choice == "1" : + self.sigHandlerResume() # TODO: can the sub-process be blocked due to us not reading from queue? + elif choice == "2" : + self.stopTaosService() + elif choice == "3" : + self.stopTaosService() + self.startTaosService() + else: + raise RuntimeError("Invalid menu choice: {}".format(choice)) + + self.inSigHandler = False + + def sigIntHandler(self, signalNumber, frame): + print("Sig INT Handler starting...") + if self.inSigHandler : + print("Ignoring repeated SIG_INT...") + return + self.inSigHandler = True + + self.stopTaosService() + print("INT signal handler returning...") + self.inSigHandler = False + + def sigHandlerResume(self) : + print("Resuming TDengine service manager thread (main thread)...\n\n") + + def _checkServiceManagerThread(self): + if self.svcMgrThread: # valid svc mgr thread + if self.svcMgrThread.isStopped(): # done? + self.svcMgrThread.procIpcBatch() # one last time. TODO: appropriate? + self.svcMgrThread = None # no more + + def _procIpcAll(self): + while self.svcMgrThread : # for as long as the svc mgr thread is still here + self.svcMgrThread.procIpcBatch() # regular processing, + time.sleep(0.5) # pause, before next round + self._checkServiceManagerThread() + print("Service Manager Thread (with subprocess) has ended, main thread now exiting...") + + def startTaosService(self): + if self.svcMgrThread: + raise RuntimeError("Cannot start TAOS service when one may already be running") + self.svcMgrThread = ServiceManagerThread() # create the object + self.svcMgrThread.start() + print("TAOS service started, printing out output...") + self.svcMgrThread.procIpcBatch(trimToTarget=10, forceOutput=True) # for printing 10 lines + print("TAOS service started") + + def stopTaosService(self, outputLines = 20): + print("Terminating Service Manager Thread (SMT) execution...") + if not self.svcMgrThread: + raise RuntimeError("Unexpected empty svc mgr thread") + self.svcMgrThread.stop() + if self.svcMgrThread.isStopped(): + self.svcMgrThread.procIpcBatch(outputLines) # one last time + self.svcMgrThread = None + print("----- End of TDengine Service Output -----\n") + print("SMT execution terminated") + else: + print("WARNING: SMT did not terminate as expected") + + def run(self): + self.startTaosService() + self._procIpcAll() # pump/process all the messages + if self.svcMgrThread: # if sig handler hasn't destroyed it by now + self.stopTaosService() # should have started already + +class ServiceManagerThread: + MAX_QUEUE_SIZE = 10000 + + def __init__(self): + self._tdeSubProcess = None + self._thread = None + self._status = None + + def getStatus(self): + return self._status + + def isRunning(self): + # return self._thread and self._thread.is_alive() + return self._status == MainExec.STATUS_RUNNING + + def isStopping(self): + return self._status == MainExec.STATUS_STOPPING + + def isStopped(self): + return self._status == MainExec.STATUS_STOPPED + + # Start the thread (with sub process), and wait for the sub service + # to become fully operational + def start(self): + if self._thread : + raise RuntimeError("Unexpected _thread") + if self._tdeSubProcess : + raise RuntimeError("TDengine sub process already created/running") + + self._status = MainExec.STATUS_STARTING + + self._tdeSubProcess = TdeSubProcess() + self._tdeSubProcess.start() + + self._ipcQueue = Queue() + self._thread = threading.Thread( + target=self.svcOutputReader, + args=(self._tdeSubProcess.getStdOut(), self._ipcQueue)) + self._thread.daemon = True # thread dies with the program + self._thread.start() + + # wait for service to start + for i in range(0, 10) : + time.sleep(1.0) + # self.procIpcBatch() # don't pump message during start up + print("_zz_", end="", flush=True) + if self._status == MainExec.STATUS_RUNNING : + logger.info("[] TDengine service READY to process requests") + return # now we've started + raise RuntimeError("TDengine service did not start successfully") # TODO: handle this better? + + def stop(self): + # can be called from both main thread or signal handler + print("Terminating TDengine service running as the sub process...") + if self.isStopped(): + print("Service already stopped") + return + if self.isStopping(): + print("Service is already being stopped") + return + # Linux will send Control-C generated SIGINT to the TDengine process already, ref: https://unix.stackexchange.com/questions/176235/fork-and-how-signals-are-delivered-to-processes + if not self._tdeSubProcess : + raise RuntimeError("sub process object missing") + + self._status = MainExec.STATUS_STOPPING + self._tdeSubProcess.stop() + + if self._tdeSubProcess.isRunning(): # still running + print("FAILED to stop sub process, it is still running... pid = {}".format(self.subProcess.pid)) + else: + self._tdeSubProcess = None # not running any more + self.join() # stop the thread, change the status, etc. + + def join(self): + # TODO: sanity check + if not self.isStopping(): + raise RuntimeError("Unexpected status when ending svc mgr thread: {}".format(self._status)) + + if self._thread : + self._thread.join() + self._thread = None + self._status = MainExec.STATUS_STOPPED + else : + print("Joining empty thread, doing nothing") + + def _trimQueue(self, targetSize): + if targetSize <= 0: + return # do nothing + q = self._ipcQueue + if (q.qsize() <= targetSize ) : # no need to trim + return + + logger.debug("Triming IPC queue to target size: {}".format(targetSize)) + itemsToTrim = q.qsize() - targetSize + for i in range(0, itemsToTrim) : + try: + q.get_nowait() + except Empty: + break # break out of for loop, no more trimming + + TD_READY_MSG = "TDengine is initialized successfully" + def procIpcBatch(self, trimToTarget = 0, forceOutput = False): + self._trimQueue(trimToTarget) # trim if necessary + # Process all the output generated by the underlying sub process, managed by IO thread + print("<", end="", flush=True) + while True : + try: + line = self._ipcQueue.get_nowait() # getting output at fast speed + self._printProgress("_o") + except Empty: + # time.sleep(2.3) # wait only if there's no output + # no more output + print(".>", end="", flush=True) + return # we are done with THIS BATCH + else: # got line, printing out + if forceOutput: + logger.info(line) + else: + logger.debug(line) + print(">", end="", flush=True) + + _ProgressBars = ["--", "//", "||", "\\\\"] + def _printProgress(self, msg): # TODO: assuming 2 chars + print(msg, end="", flush=True) + pBar = self._ProgressBars[Dice.throw(4)] + print(pBar, end="", flush=True) + print('\b\b\b\b', end="", flush=True) + + def svcOutputReader(self, out: IO, queue): + # Important Reference: https://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python + # print("This is the svcOutput Reader...") + # for line in out : + for line in iter(out.readline, b''): + # print("Finished reading a line: {}".format(line)) + # print("Adding item to queue...") + line = line.decode("utf-8").rstrip() + queue.put(line) # This might block, and then causing "out" buffer to block + self._printProgress("_i") + + if self._status == MainExec.STATUS_STARTING : # we are starting, let's see if we have started + if line.find(self.TD_READY_MSG) != -1 : # found + self._status = MainExec.STATUS_RUNNING + + # Trim the queue if necessary: TODO: try this 1 out of 10 times + self._trimQueue(self.MAX_QUEUE_SIZE * 9 // 10) # trim to 90% size + + if self.isStopping() : # TODO: use thread status instead + print("_w", end="", flush=True) # WAITING for stopping sub process to finish its outptu + + # queue.put(line) + print("\nNo more output from IO thread managing TDengine service") # meaning sub process must have died + out.close() + +class TdeSubProcess: + def __init__(self): + self.subProcess = None + + def getStdOut(self): + return self.subProcess.stdout + + def isRunning(self): + return self.subProcess != None + + def start(self): + ON_POSIX = 'posix' in sys.builtin_module_names + svcCmd = ['../../build/build/bin/taosd', '-c', '../../build/test/cfg'] + # svcCmd = ['vmstat', '1'] + if self.subProcess : # already there + raise RuntimeError("Corrupt process state") + + self.subProcess = subprocess.Popen( + svcCmd, + stdout=subprocess.PIPE, + # bufsize=1, # not supported in binary mode + close_fds=ON_POSIX) # had text=True, which interferred with reading EOF + + def stop(self): + if not self.subProcess: + print("Sub process already stopped") + return + + retCode = self.subProcess.poll() + if retCode : # valid return code, process ended + self.subProcess = None + else: # process still alive, let's interrupt it + print("Sub process is running, sending SIG_INT and waiting for it to terminate...") + self.subProcess.send_signal(signal.SIGINT) # sub process should end, then IPC queue should end, causing IO thread to end + try : + self.subProcess.wait(10) + except subprocess.TimeoutExpired as err: + print("Time out waiting for TDengine service process to exit") + else: + print("TDengine service process terminated successfully from SIG_INT") + self.subProcess = None + +class ClientManager: + def __init__(self): + print("Starting service manager") + signal.signal(signal.SIGTERM, self.sigIntHandler) + signal.signal(signal.SIGINT, self.sigIntHandler) + + self._status = MainExec.STATUS_RUNNING + self.tc = None + + def sigIntHandler(self, signalNumber, frame): + if self._status != MainExec.STATUS_RUNNING : + print("Ignoring repeated SIGINT...") + return # do nothing if it's already not running + self._status = MainExec.STATUS_STOPPING # immediately set our status + + print("Terminating program...") + self.tc.requestToStop() + + def _printLastNumbers(self): # to verify data durability + dbManager = DbManager(resetDb=False) + dbc = dbManager.getDbConn() + if dbc.query("show databases") == 0 : # no databae + return + if dbc.query("show tables") == 0 : # no tables + return + + dbc.execute("use db") + sTbName = dbManager.getFixedSuperTableName() + + # get all regular tables + dbc.query("select TBNAME from db.{}".format(sTbName)) # TODO: analyze result set later + rTables = dbc.getQueryResult() + + bList = TaskExecutor.BoundedList() + for rTbName in rTables : # regular tables + dbc.query("select speed from db.{}".format(rTbName[0])) + numbers = dbc.getQueryResult() + for row in numbers : + # print("<{}>".format(n), end="", flush=True) + bList.add(row[0]) + + print("Top numbers in DB right now: {}".format(bList)) + print("TDengine client execution is about to start in 2 seconds...") + time.sleep(2.0) + dbManager = None # release? + + def prepare(self): + self._printLastNumbers() + + def run(self): + if gConfig.auto_start_service : + svcMgr = SvcManager() + svcMgr.startTaosService() + + self._printLastNumbers() + + dbManager = DbManager() # Regular function + thPool = ThreadPool(gConfig.num_threads, gConfig.max_steps) + self.tc = ThreadCoordinator(thPool, dbManager) + + self.tc.run() + # print("exec stats: {}".format(self.tc.getExecStats())) + # print("TC failed = {}".format(self.tc.isFailed())) + if gConfig.auto_start_service : + svcMgr.stopTaosService() + # Print exec status, etc., AFTER showing messages from the server + self.conclude() + # print("TC failed (2) = {}".format(self.tc.isFailed())) + return 1 if self.tc.isFailed() else 0 # Linux return code: ref https://shapeshed.com/unix-exit-codes/ + + def conclude(self): + self.tc.printStats() + self.tc.getDbManager().cleanUp() + + +class MainExec: + STATUS_STARTING = 1 + STATUS_RUNNING = 2 + STATUS_STOPPING = 3 + STATUS_STOPPED = 4 + + @classmethod + def runClient(cls): + clientManager = ClientManager() + return clientManager.run() + + @classmethod + def runService(cls): + svcManager = SvcManager() + svcManager.run() + + @classmethod + def runTemp(cls): # for debugging purposes + # # Hack to exercise reading from disk, imcreasing coverage. TODO: fix + # dbc = dbState.getDbConn() + # sTbName = dbState.getFixedSuperTableName() + # dbc.execute("create database if not exists db") + # if not dbState.getState().equals(StateEmpty()): + # dbc.execute("use db") + + # rTables = None + # try: # the super table may not exist + # sql = "select TBNAME from db.{}".format(sTbName) + # logger.info("Finding out tables in super table: {}".format(sql)) + # dbc.query(sql) # TODO: analyze result set later + # logger.info("Fetching result") + # rTables = dbc.getQueryResult() + # logger.info("Result: {}".format(rTables)) + # except taos.error.ProgrammingError as err: + # logger.info("Initial Super table OPS error: {}".format(err)) + + # # sys.exit() + # if ( not rTables == None): + # # print("rTables[0] = {}, type = {}".format(rTables[0], type(rTables[0]))) + # try: + # for rTbName in rTables : # regular tables + # ds = dbState + # logger.info("Inserting into table: {}".format(rTbName[0])) + # sql = "insert into db.{} values ('{}', {});".format( + # rTbName[0], + # ds.getNextTick(), ds.getNextInt()) + # dbc.execute(sql) + # for rTbName in rTables : # regular tables + # dbc.query("select * from db.{}".format(rTbName[0])) # TODO: check success failure + # logger.info("Initial READING operation is successful") + # except taos.error.ProgrammingError as err: + # logger.info("Initial WRITE/READ error: {}".format(err)) + + # Sandbox testing code + # dbc = dbState.getDbConn() + # while True: + # rows = dbc.query("show databases") + # print("Rows: {}, time={}".format(rows, time.time())) + return def main(): # Super cool Python argument library: https://docs.python.org/3/library/argparse.html @@ -1308,93 +2058,52 @@ def main(): 2. You run the server there before this script: ./build/bin/taosd -c test/cfg ''')) + + parser.add_argument('-a', '--auto-start-service', action='store_true', + help='Automatically start/stop the TDengine service (default: false)') + parser.add_argument('-c', '--connector-type', action='store', default='native', type=str, + help='Connector type to use: native, rest, or mixed (default: 10)') parser.add_argument('-d', '--debug', action='store_true', help='Turn on DEBUG mode for more logging (default: false)') + parser.add_argument('-e', '--run-tdengine', action='store_true', + help='Run TDengine service in foreground (default: false)') parser.add_argument('-l', '--larger-data', action='store_true', help='Write larger amount of data during write operations (default: false)') parser.add_argument('-p', '--per-thread-db-connection', action='store_true', help='Use a single shared db connection (default: false)') parser.add_argument('-r', '--record-ops', action='store_true', help='Use a pair of always-fsynced fils to record operations performing + performed, for power-off tests (default: false)') - parser.add_argument('-s', '--max-steps', action='store', default=100, type=int, + parser.add_argument('-s', '--max-steps', action='store', default=1000, type=int, help='Maximum number of steps to run (default: 100)') - parser.add_argument('-t', '--num-threads', action='store', default=10, type=int, + parser.add_argument('-t', '--num-threads', action='store', default=5, type=int, help='Number of threads to run (default: 10)') global gConfig gConfig = parser.parse_args() - if len(sys.argv) == 1: - parser.print_help() - sys.exit() - + + # Logging Stuff global logger - logger = logging.getLogger('CrashGen') - logger.addFilter(LoggingFilter()) + _logger = logging.getLogger('CrashGen') # real logger + _logger.addFilter(LoggingFilter()) + ch = logging.StreamHandler() + _logger.addHandler(ch) + + logger = MyLoggingAdapter(_logger, []) # Logging adapter, to be used as a logger + if ( gConfig.debug ): logger.setLevel(logging.DEBUG) # default seems to be INFO else: logger.setLevel(logging.INFO) - ch = logging.StreamHandler() - logger.addHandler(ch) - - # resetDb = False # DEBUG only - # dbState = DbState(resetDb) # DBEUG only! - dbManager = DbManager() # Regular function + Dice.seed(0) # initial seeding of dice - tc = ThreadCoordinator( - ThreadPool(gConfig.num_threads, gConfig.max_steps), - # WorkDispatcher(dbState), # Obsolete? - dbManager - ) - - # # Hack to exercise reading from disk, imcreasing coverage. TODO: fix - # dbc = dbState.getDbConn() - # sTbName = dbState.getFixedSuperTableName() - # dbc.execute("create database if not exists db") - # if not dbState.getState().equals(StateEmpty()): - # dbc.execute("use db") - - # rTables = None - # try: # the super table may not exist - # sql = "select TBNAME from db.{}".format(sTbName) - # logger.info("Finding out tables in super table: {}".format(sql)) - # dbc.query(sql) # TODO: analyze result set later - # logger.info("Fetching result") - # rTables = dbc.getQueryResult() - # logger.info("Result: {}".format(rTables)) - # except taos.error.ProgrammingError as err: - # logger.info("Initial Super table OPS error: {}".format(err)) - - # # sys.exit() - # if ( not rTables == None): - # # print("rTables[0] = {}, type = {}".format(rTables[0], type(rTables[0]))) - # try: - # for rTbName in rTables : # regular tables - # ds = dbState - # logger.info("Inserting into table: {}".format(rTbName[0])) - # sql = "insert into db.{} values ('{}', {});".format( - # rTbName[0], - # ds.getNextTick(), ds.getNextInt()) - # dbc.execute(sql) - # for rTbName in rTables : # regular tables - # dbc.query("select * from db.{}".format(rTbName[0])) # TODO: check success failure - # logger.info("Initial READING operation is successful") - # except taos.error.ProgrammingError as err: - # logger.info("Initial WRITE/READ error: {}".format(err)) - - - - # Sandbox testing code - # dbc = dbState.getDbConn() - # while True: - # rows = dbc.query("show databases") - # print("Rows: {}, time={}".format(rows, time.time())) - - tc.run() - tc.logStats() - dbManager.cleanUp() - - # logger.info("Crash_Gen execution finished") + + # Run server or client + if gConfig.run_tdengine : # run server + MainExec.runService() + else : + return MainExec.runClient() if __name__ == "__main__": - main() + exitCode = main() + # print("Exiting with code: {}".format(exitCode)) + sys.exit(exitCode) diff --git a/tests/pytest/crash_gen.sh b/tests/pytest/crash_gen.sh index c845b39764..de80361aa3 100755 --- a/tests/pytest/crash_gen.sh +++ b/tests/pytest/crash_gen.sh @@ -38,4 +38,4 @@ export PYTHONPATH=$(pwd)/../../src/connector/python/linux/python3 export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$(pwd)/../../build/build/lib # Now we are all let, and let's see if we can find a crash. Note we pass all params -./crash_gen.py $@ +python3 ./crash_gen.py $@ diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 066dda5d97..83f94f727a 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -1,7 +1,6 @@ #!/bin/bash ulimit -c unlimited -python3 ./test.py -f client/client.py python3 ./test.py -f insert/basic.py python3 ./test.py -f insert/int.py python3 ./test.py -f insert/float.py @@ -146,8 +145,14 @@ python3 ./test.py -f query/queryJoin.py python3 ./test.py -f query/select_last_crash.py #stream +python3 ./test.py -f stream/metric_1.py +python3 ./test.py -f stream/new.py python3 ./test.py -f stream/stream1.py python3 ./test.py -f stream/stream2.py +python3 ./test.py -f stream/parser.py #alter table python3 ./test.py -f alter/alter_table_crash.py + +# client +python3 ./test.py -f client/client.py diff --git a/tests/pytest/insert/writeDBNonStop.py b/tests/pytest/insert/writeDBNonStop.py index c89853ffb6..bdc93f0469 100644 --- a/tests/pytest/insert/writeDBNonStop.py +++ b/tests/pytest/insert/writeDBNonStop.py @@ -67,7 +67,7 @@ class DBWriteNonStop: self.cursor.execute( "select first(ts), last(ts), min(speed), max(speed), avg(speed), count(*) from st") data = self.cursor.fetchall() - end = datetime.now() + end = datetime.now() self.writeDataToCSVFile(data, (end - start).seconds) time.sleep(.001) @@ -75,8 +75,9 @@ class DBWriteNonStop: self.cursor.close() self.conn.close() + test = DBWriteNonStop() test.connectDB() test.createTable() test.insertData() -test.closeConn() \ No newline at end of file +test.closeConn() diff --git a/tests/pytest/query/filterAllIntTypes.py b/tests/pytest/query/filterAllIntTypes.py index a2bab63c88..4d91168dae 100644 --- a/tests/pytest/query/filterAllIntTypes.py +++ b/tests/pytest/query/filterAllIntTypes.py @@ -89,17 +89,25 @@ class TDTestCase: tdSql.checkRows(101) # range for int type on column - tdSql.query("select * from st%s where num > 50 and num < 100" % curType) - tdSql.checkRows(49) + tdSql.query( + "select * from st%s where num > 50 and num < 100" % + curType) + tdSql.checkRows(49) - tdSql.query("select * from st%s where num >= 50 and num < 100" % curType) - tdSql.checkRows(50) + tdSql.query( + "select * from st%s where num >= 50 and num < 100" % + curType) + tdSql.checkRows(50) - tdSql.query("select * from st%s where num > 50 and num <= 100" % curType) - tdSql.checkRows(50) + tdSql.query( + "select * from st%s where num > 50 and num <= 100" % + curType) + tdSql.checkRows(50) - tdSql.query("select * from st%s where num >= 50 and num <= 100" % curType) - tdSql.checkRows(51) + tdSql.query( + "select * from st%s where num >= 50 and num <= 100" % + curType) + tdSql.checkRows(51) # > for int type on tag tdSql.query("select * from st%s where id > 5" % curType) @@ -135,16 +143,22 @@ class TDTestCase: # range for int type on tag tdSql.query("select * from st%s where id > 5 and id < 7" % curType) - tdSql.checkRows(10) + tdSql.checkRows(10) - tdSql.query("select * from st%s where id >= 5 and id < 7" % curType) - tdSql.checkRows(20) + tdSql.query( + "select * from st%s where id >= 5 and id < 7" % + curType) + tdSql.checkRows(20) - tdSql.query("select * from st%s where id > 5 and id <= 7" % curType) - tdSql.checkRows(20) + tdSql.query( + "select * from st%s where id > 5 and id <= 7" % + curType) + tdSql.checkRows(20) - tdSql.query("select * from st%s where id >= 5 and id <= 7" % curType) - tdSql.checkRows(30) + tdSql.query( + "select * from st%s where id >= 5 and id <= 7" % + curType) + tdSql.checkRows(30) print( "======= Verify filter for %s type finished =========" % diff --git a/tests/pytest/query/queryJoin.py b/tests/pytest/query/queryJoin.py index 6d135e1006..6ea240a334 100644 --- a/tests/pytest/query/queryJoin.py +++ b/tests/pytest/query/queryJoin.py @@ -77,7 +77,7 @@ class TDTestCase: # join queries tdSql.query( "select * from stb_p, stb_t where stb_p.ts=stb_t.ts and stb_p.id = stb_t.id") - tdSql.checkRows(6) + tdSql.checkRows(6) tdSql.error( "select ts, pressure, temperature, id, dscrption from stb_p, stb_t where stb_p.ts=stb_t.ts and stb_p.id = stb_t.id") @@ -108,7 +108,7 @@ class TDTestCase: tdSql.query("select stb_t.ts, stb_t.dscrption, stb_t.temperature, stb_t.id, stb_p.dscrption, stb_p.pressure from stb_p, stb_t where stb_p.ts=stb_t.ts and stb_p.id = stb_t.id") tdSql.checkRows(6) - + tdSql.query("select stb_t.ts, stb_t.dscrption, stb_t.temperature, stb_t.id, stb_p.dscrption, stb_p.pressure from stb_p, stb_t where stb_p.ts=stb_t.ts and stb_p.id = stb_t.id") tdSql.checkRows(6) diff --git a/tests/pytest/query/queryMetaData.py b/tests/pytest/query/queryMetaData.py index 7b95e4a81c..67df20cb9a 100755 --- a/tests/pytest/query/queryMetaData.py +++ b/tests/pytest/query/queryMetaData.py @@ -55,9 +55,9 @@ class MetadataQuery: def createTablesAndInsertData(self, threadID): cursor = self.connectDB() - cursor.execute("use test") + cursor.execute("use test") - tablesPerThread = int (self.tables / self.numOfTherads) + tablesPerThread = int(self.tables / self.numOfTherads) base = threadID * tablesPerThread for i in range(tablesPerThread): cursor.execute( @@ -68,24 +68,72 @@ class MetadataQuery: %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')''' % - (base + i + 1, - (base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100, - (base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100, - (base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100, - (base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100, - (base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100, - (base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100)) - + (base + i + 1, (base + i) % + 100, (base + i) % + 10000, (base + i) % + 1000000, (base + i) % + 100000000, (base + i) % + 100 * 1.1, (base + i) % + 100 * 2.3, (base + i) % + 2, (base + i) % + 100, (base + i) % + 100, (base + i) % + 100, (base + i) % + 10000, (base + i) % + 1000000, (base + i) % + 100000000, (base + i) % + 100 * 1.1, (base + i) % + 100 * 2.3, (base + i) % + 2, (base + i) % + 100, (base + i) % + 100, (base + i) % + 100, (base + i) % + 10000, (base + i) % + 1000000, (base + i) % + 100000000, (base + i) % + 100 * 1.1, (base + i) % + 100 * 2.3, (base + i) % + 2, (base + i) % + 100, (base + i) % + 100, (base + i) % + 100, (base + i) % + 10000, (base + i) % + 1000000, (base + i) % + 100000000, (base + i) % + 100 * 1.1, (base + i) % + 100 * 2.3, (base + i) % + 2, (base + i) % + 100, (base + i) % + 100, (base + i) % + 100, (base + i) % + 10000, (base + i) % + 1000000, (base + i) % + 100000000, (base + i) % + 100 * 1.1, (base + i) % + 100 * 2.3, (base + i) % + 2, (base + i) % + 100, (base + i) % + 100, (base + i) % + 100, (base + i) % + 10000, (base + i) % + 1000000, (base + i) % + 100000000, (base + i) % + 100 * 1.1, (base + i) % + 100 * 2.3, (base + i) % + 2, (base + i) % + 100, (base + i) % + 100)) + cursor.execute( "insert into t%d values(%d, 1) (%d, 2) (%d, 3) (%d, 4) (%d, 5)" % (base + i + 1, self.ts + 1, self.ts + 2, self.ts + 3, self.ts + 4, self.ts + 5)) - cursor.close() + cursor.close() def queryData(self, query): cursor = self.connectDB() cursor.execute("use test") - print("================= query tag data =================") + print("================= query tag data =================") startTime = datetime.now() cursor.execute(query) cursor.fetchall() @@ -107,15 +155,15 @@ if __name__ == '__main__': print( "================= Create %d tables and insert %d records into each table =================" % (t.tables, t.records)) - startTime = datetime.now() + startTime = datetime.now() threads = [] for i in range(t.numOfTherads): thread = threading.Thread( target=t.createTablesAndInsertData, args=(i,)) thread.start() threads.append(thread) - - for th in threads: + + for th in threads: th.join() endTime = datetime.now() diff --git a/tests/pytest/query/queryMetaPerformace.py b/tests/pytest/query/queryMetaPerformace.py index 0570311b08..8ab7105c9d 100644 --- a/tests/pytest/query/queryMetaPerformace.py +++ b/tests/pytest/query/queryMetaPerformace.py @@ -19,6 +19,7 @@ import time from datetime import datetime import numpy as np + class MyThread(threading.Thread): def __init__(self, func, args=()): @@ -35,17 +36,23 @@ class MyThread(threading.Thread): except Exception: return None + class MetadataQuery: def initConnection(self): self.tables = 100 self.records = 10 - self.numOfTherads =5 + self.numOfTherads = 5 self.ts = 1537146000000 self.host = "127.0.0.1" self.user = "root" self.password = "taosdata" self.config = "/etc/taos" - self.conn = taos.connect( self.host, self.user, self.password, self.config) + self.conn = taos.connect( + self.host, + self.user, + self.password, + self.config) + def connectDB(self): return self.conn.cursor() @@ -69,7 +76,7 @@ class MetadataQuery: cursor.execute("use test") base = threadID * self.tables - tablesPerThread = int (self.tables / self.numOfTherads) + tablesPerThread = int(self.tables / self.numOfTherads) for i in range(tablesPerThread): cursor.execute( '''create table t%d using meters tags( @@ -79,20 +86,69 @@ class MetadataQuery: %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')''' % - (base + i + 1, - (base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100, - (base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100, - (base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100, - (base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100, - (base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100, - (base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100)) + (base + i + 1, (base + i) % + 100, (base + i) % + 10000, (base + i) % + 1000000, (base + i) % + 100000000, (base + i) % + 100 * 1.1, (base + i) % + 100 * 2.3, (base + i) % + 2, (base + i) % + 100, (base + i) % + 100, (base + i) % + 100, (base + i) % + 10000, (base + i) % + 1000000, (base + i) % + 100000000, (base + i) % + 100 * 1.1, (base + i) % + 100 * 2.3, (base + i) % + 2, (base + i) % + 100, (base + i) % + 100, (base + i) % + 100, (base + i) % + 10000, (base + i) % + 1000000, (base + i) % + 100000000, (base + i) % + 100 * 1.1, (base + i) % + 100 * 2.3, (base + i) % + 2, (base + i) % + 100, (base + i) % + 100, (base + i) % + 100, (base + i) % + 10000, (base + i) % + 1000000, (base + i) % + 100000000, (base + i) % + 100 * 1.1, (base + i) % + 100 * 2.3, (base + i) % + 2, (base + i) % + 100, (base + i) % + 100, (base + i) % + 100, (base + i) % + 10000, (base + i) % + 1000000, (base + i) % + 100000000, (base + i) % + 100 * 1.1, (base + i) % + 100 * 2.3, (base + i) % + 2, (base + i) % + 100, (base + i) % + 100, (base + i) % + 100, (base + i) % + 10000, (base + i) % + 1000000, (base + i) % + 100000000, (base + i) % + 100 * 1.1, (base + i) % + 100 * 2.3, (base + i) % + 2, (base + i) % + 100, (base + i) % + 100)) for j in range(self.records): cursor.execute( "insert into t%d values(%d, %d)" % (base + i + 1, self.ts + j, j)) cursor.close() - def queryWithTagId(self, threadId, tagId, queryNum): - print("---------thread%d start-----------"%threadId) + + def queryWithTagId(self, threadId, tagId, queryNum): + print("---------thread%d start-----------" % threadId) query = '''select tgcol1, tgcol2, tgcol3, tgcol4, tgcol5, tgcol6, tgcol7, tgcol8, tgcol9, tgcol10, tgcol11, tgcol12, tgcol13, tgcol14, tgcol15, tgcol16, tgcol17, tgcol18, tgcol19, tgcol20, tgcol21, tgcol22, tgcol23, tgcol24, tgcol25, tgcol26, tgcol27, @@ -103,18 +159,19 @@ class MetadataQuery: latancy = [] cursor = self.connectDB() cursor.execute("use test") - for i in range(queryNum): + for i in range(queryNum): startTime = time.time() - cursor.execute(query.format(id = tagId, condition = i)) + cursor.execute(query.format(id=tagId, condition=i)) cursor.fetchall() - latancy.append((time.time() - startTime)) - print("---------thread%d end-----------"%threadId) + latancy.append((time.time() - startTime)) + print("---------thread%d end-----------" % threadId) return latancy + def queryData(self, query): cursor = self.connectDB() cursor.execute("use test") - print("================= query tag data =================") + print("================= query tag data =================") startTime = datetime.now() cursor.execute(query) cursor.fetchall() @@ -124,7 +181,7 @@ class MetadataQuery: (endTime - startTime).seconds) cursor.close() - #self.conn.close() + # self.conn.close() if __name__ == '__main__': @@ -132,18 +189,33 @@ if __name__ == '__main__': t = MetadataQuery() t.initConnection() - latancys = [] - threads = [] + latancys = [] + threads = [] tagId = 1 - queryNum = 1000 + queryNum = 1000 for i in range(t.numOfTherads): - thread = MyThread(t.queryWithTagId, args = (i, tagId, queryNum)) - threads.append(thread) + thread = MyThread(t.queryWithTagId, args=(i, tagId, queryNum)) + threads.append(thread) thread.start() - for i in range(t.numOfTherads): + for i in range(t.numOfTherads): threads[i].join() - latancys.extend(threads[i].get_result()) - print("Total query: %d"%(queryNum * t.numOfTherads)) - print("statistic(s): mean= %f, P50 = %f, P75 = %f, P95 = %f, P99 = %f" - %(sum(latancys)/(queryNum * t.numOfTherads), np.percentile(latancys, 50), np.percentile(latancys, 75), np.percentile(latancys, 95), np.percentile(latancys, 99))) - + latancys.extend(threads[i].get_result()) + print("Total query: %d" % (queryNum * t.numOfTherads)) + print( + "statistic(s): mean= %f, P50 = %f, P75 = %f, P95 = %f, P99 = %f" % + (sum(latancys) / + ( + queryNum * + t.numOfTherads), + np.percentile( + latancys, + 50), + np.percentile( + latancys, + 75), + np.percentile( + latancys, + 95), + np.percentile( + latancys, + 99))) diff --git a/tests/pytest/query/select_last_crash.py b/tests/pytest/query/select_last_crash.py index e49002716e..9b580a24ac 100644 --- a/tests/pytest/query/select_last_crash.py +++ b/tests/pytest/query/select_last_crash.py @@ -23,7 +23,6 @@ class TDTestCase: tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor()) - self.rowNum = 5000 self.ts = 1537146000000 @@ -36,7 +35,9 @@ class TDTestCase: "create table t1 using st tags('dev_001')") for i in range(self.rowNum): - tdSql.execute("insert into t1 values(%d, 'taosdata%d', %d)" % (self.ts + i, i + 1, i + 1)) + tdSql.execute( + "insert into t1 values(%d, 'taosdata%d', %d)" % + (self.ts + i, i + 1, i + 1)) tdSql.query("select last(*) from st") tdSql.checkRows(1) diff --git a/tests/pytest/random-test/random-test-multi-threading-3.py b/tests/pytest/random-test/random-test-multi-threading-3.py index 7079a5c118..a8e2c26ae5 100644 --- a/tests/pytest/random-test/random-test-multi-threading-3.py +++ b/tests/pytest/random-test/random-test-multi-threading-3.py @@ -330,7 +330,6 @@ class Test (Thread): self.q.put(-1) tdLog.exit("second thread failed, first thread exit too") - elif (self.threadId == 2): while True: self.dbEvent.wait() diff --git a/tests/pytest/regressiontest.sh b/tests/pytest/regressiontest.sh index eada5f67f7..d3a8deaf47 100755 --- a/tests/pytest/regressiontest.sh +++ b/tests/pytest/regressiontest.sh @@ -1,7 +1,6 @@ #!/bin/bash ulimit -c unlimited -python3 ./test.py -f client/client.py python3 ./test.py -f insert/basic.py python3 ./test.py -f insert/int.py python3 ./test.py -f insert/float.py @@ -138,6 +137,9 @@ python3 ./test.py -f query/filterOtherTypes.py python3 ./test.py -f query/queryError.py python3 ./test.py -f query/querySort.py python3 ./test.py -f query/queryJoin.py +python3 ./test.py -f query/filterCombo.py +python3 ./test.py -f query/queryNormal.py +python3 ./test.py -f query/select_last_crash.py #stream python3 ./test.py -f stream/stream1.py @@ -146,4 +148,5 @@ python3 ./test.py -f stream/stream2.py #alter table python3 ./test.py -f alter/alter_table_crash.py - +# client +python3 ./test.py -f client/client.py diff --git a/tests/pytest/smoketest.sh b/tests/pytest/smoketest.sh index 6b21912dd5..7c14b673e5 100755 --- a/tests/pytest/smoketest.sh +++ b/tests/pytest/smoketest.sh @@ -1,10 +1,6 @@ #!/bin/bash ulimit -c unlimited -# client -python3 ./test.py $1 -f client/client.py -python3 ./test.py $1 -s && sleep 1 - # insert python3 ./test.py $1 -f insert/basic.py python3 ./test.py $1 -s && sleep 1 @@ -35,3 +31,7 @@ python3 ./test.py $1 -s && sleep 1 python3 ./test.py $1 -f query/filter.py python3 ./test.py $1 -s && sleep 1 +# client +python3 ./test.py $1 -f client/client.py +python3 ./test.py $1 -s && sleep 1 + diff --git a/tests/pytest/stream/metric_1.py b/tests/pytest/stream/metric_1.py new file mode 100644 index 0000000000..b4cccac69c --- /dev/null +++ b/tests/pytest/stream/metric_1.py @@ -0,0 +1,104 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import time +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def createFuncStream(self, expr, suffix, value): + tbname = "strm_" + suffix + tdLog.info("create stream table %s" % tbname) + tdSql.query("select %s from stb interval(1d)" % expr) + tdSql.checkData(0, 1, value) + tdSql.execute("create table %s as select %s from stb interval(1d)" % (tbname, expr)) + + def checkStreamData(self, suffix, value): + sql = "select * from strm_" + suffix + tdSql.waitedQuery(sql, 1, 120) + tdSql.checkData(0, 1, value) + + def run(self): + tbNum = 10 + rowNum = 20 + + tdSql.prepare() + + tdLog.info("===== preparing data =====") + tdSql.execute( + "create table stb(ts timestamp, tbcol int, tbcol2 float) tags(tgcol int)") + for i in range(tbNum): + tdSql.execute("create table tb%d using stb tags(%d)" % (i, i)) + for j in range(rowNum): + tdSql.execute( + "insert into tb%d values (now - %dm, %d, %d)" % + (i, 1440 - j, j, j)) + time.sleep(0.1) + + self.createFuncStream("count(*)", "c1", 200) + self.createFuncStream("count(tbcol)", "c2", 200) + self.createFuncStream("count(tbcol2)", "c3", 200) + self.createFuncStream("avg(tbcol)", "av", 9.5) + self.createFuncStream("sum(tbcol)", "su", 1900) + self.createFuncStream("min(tbcol)", "mi", 0) + self.createFuncStream("max(tbcol)", "ma", 19) + self.createFuncStream("first(tbcol)", "fi", 0) + self.createFuncStream("last(tbcol)", "la", 19) + #tdSql.query("select stddev(tbcol) from stb interval(1d)") + #tdSql.query("select leastsquares(tbcol, 1, 1) from stb interval(1d)") + tdSql.query("select top(tbcol, 1) from stb interval(1d)") + tdSql.query("select bottom(tbcol, 1) from stb interval(1d)") + #tdSql.query("select percentile(tbcol, 1) from stb interval(1d)") + #tdSql.query("select diff(tbcol) from stb interval(1d)") + + tdSql.query("select count(tbcol) from stb where ts < now + 4m interval(1d)") + tdSql.checkData(0, 1, 200) + #tdSql.execute("create table strm_wh as select count(tbcol) from stb where ts < now + 4m interval(1d)") + + self.createFuncStream("count(tbcol)", "as", 200) + + tdSql.query("select count(tbcol) from stb interval(1d) group by tgcol") + tdSql.checkData(0, 1, 20) + + tdSql.query("select count(tbcol) from stb where ts < now + 4m interval(1d) group by tgcol") + tdSql.checkData(0, 1, 20) + + self.checkStreamData("c1", 200) + self.checkStreamData("c2", 200) + self.checkStreamData("c3", 200) + self.checkStreamData("av", 9.5) + self.checkStreamData("su", 1900) + self.checkStreamData("mi", 0) + self.checkStreamData("ma", 19) + self.checkStreamData("fi", 0) + self.checkStreamData("la", 19) + #self.checkStreamData("wh", 200) + self.checkStreamData("as", 200) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) + + diff --git a/tests/pytest/stream/new.py b/tests/pytest/stream/new.py new file mode 100644 index 0000000000..b8503f0b4e --- /dev/null +++ b/tests/pytest/stream/new.py @@ -0,0 +1,71 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import time +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + rowNum = 200 + totalNum = 200 + tdSql.prepare() + + tdLog.info("=============== step1") + tdSql.execute("create table mt(ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)") + for i in range(5): + tdSql.execute("create table tb%d using mt tags(%d)" % (i, i)) + for j in range(rowNum): + tdSql.execute("insert into tb%d values(now + %ds, %d, %d)" % (i, j, j, j)) + time.sleep(0.1) + + tdLog.info("=============== step2") + tdSql.query("select count(*), count(tbcol), count(tbcol2) from mt interval(10s)") + tdSql.execute("create table st as select count(*), count(tbcol), count(tbcol2) from mt interval(10s)") + + tdLog.info("=============== step3") + tdSql.waitedQuery("select * from st", 1, 120) + v = tdSql.getData(0, 3) + if v >= 51: + tdLog.exit("value is %d, which is larger than 51" % v) + + tdLog.info("=============== step4") + for i in range(5, 10): + tdSql.execute("create table tb%d using mt tags(%d)" % (i, i)) + for j in range(rowNum): + tdSql.execute("insert into tb%d values(now + %ds, %d, %d)" % (i, j, j, j)) + + tdLog.info("=============== step5") + tdLog.sleep(30) + tdSql.waitedQuery("select * from st order by ts desc", 1, 120) + v = tdSql.getData(0, 3) + if v <= 51: + tdLog.exit("value is %d, which is smaller than 51" % v) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) + + diff --git a/tests/pytest/stream/parser.py b/tests/pytest/stream/parser.py new file mode 100644 index 0000000000..3b231d2b39 --- /dev/null +++ b/tests/pytest/stream/parser.py @@ -0,0 +1,182 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import time +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + ''' + def bug2222(self): + tdSql.prepare() + tdSql.execute("create table superreal(ts timestamp, addr binary(5), val float) tags (deviceNo binary(20))") + tdSql.execute("create table real_001 using superreal tags('001')") + tdSql.execute("create table tj_001 as select sum(val) from real_001 interval(1m)") + + t = datetime.datetime.now() + for i in range(60): + ts = t.strftime("%Y-%m-%d %H:%M") + t += datetime.timedelta(minutes=1) + sql = "insert into real_001 values('%s:0%d', '1', %d)" % (ts, 0, i) + for j in range(4): + sql += ",('%s:0%d', '%d', %d)" % (ts, j + 1, j + 1, i) + tdSql.execute(sql) + time.sleep(60 + random.random() * 60 - 30) + ''' + + def tbase300(self): + tdLog.debug("begin tbase300") + + tdSql.prepare() + tdSql.execute("create table mt(ts timestamp, c1 int, c2 int) tags(t1 int)") + tdSql.execute("create table tb1 using mt tags(1)"); + tdSql.execute("create table tb2 using mt tags(2)"); + tdSql.execute("create table strm as select count(*), avg(c1), sum(c2), max(c1), min(c2),first(c1), last(c2) from mt interval(4s) sliding(2s)") + #tdSql.execute("create table strm as select count(*), avg(c1), sum(c2), max(c1), min(c2), first(c1) from mt interval(4s) sliding(2s)") + tdLog.sleep(10) + tdSql.execute("insert into tb2 values(now, 1, 1)"); + tdSql.execute("insert into tb1 values(now, 1, 1)"); + tdLog.sleep(4) + tdSql.query("select * from mt") + tdSql.query("select * from strm") + tdSql.execute("drop table tb1") + + tdSql.waitedQuery("select * from strm", 1, 100) + if tdSql.queryRows < 1 or tdSql.queryRows > 2: + tdLog.exit("rows should be 1 or 2") + + tdSql.execute("drop table tb2") + tdSql.execute("drop table mt") + tdSql.execute("drop table strm") + + def tbase304(self): + tdLog.debug("begin tbase304") + # we cannot reset query cache in server side, as a workaround, + # set super table name to mt304, need to change back to mt later + tdSql.execute("create table mt304 (ts timestamp, c1 int) tags(t1 int, t2 int)") + tdSql.execute("create table tb1 using mt304 tags(1, 1)") + tdSql.execute("create table tb2 using mt304 tags(1, -1)") + time.sleep(0.1) + tdSql.execute("create table strm as select count(*), avg(c1) from mt304 where t2 >= 0 interval(4s) sliding(2s)") + tdSql.execute("insert into tb1 values (now,1)") + tdSql.execute("insert into tb2 values (now,2)") + + tdSql.waitedQuery("select * from strm", 1, 100) + if tdSql.queryRows < 1 or tdSql.queryRows > 2: + tdLog.exit("rows should be 1 or 2") + + tdSql.checkData(0, 1, 1) + tdSql.checkData(0, 2, 1.000000000) + tdSql.execute("alter table mt304 drop tag t2") + tdSql.execute("insert into tb2 values (now,2)") + tdSql.execute("insert into tb1 values (now,1)") + tdSql.query("select * from strm") + tdSql.execute("alter table mt304 add tag t2 int") + tdLog.sleep(1) + tdSql.query("select * from strm") + + def wildcardFilterOnTags(self): + tdLog.debug("begin wildcardFilterOnTag") + tdSql.prepare() + tdSql.execute("create table stb (ts timestamp, c1 int, c2 binary(10)) tags(t1 binary(10))") + tdSql.execute("create table tb1 using stb tags('a1')") + tdSql.execute("create table tb2 using stb tags('b2')") + tdSql.execute("create table tb3 using stb tags('a3')") + tdSql.execute("create table strm as select count(*), avg(c1), first(c2) from stb where t1 like 'a%' interval(4s) sliding(2s)") + tdSql.query("describe strm") + tdSql.checkRows(4) + + tdLog.sleep(1) + tdSql.execute("insert into tb1 values (now, 0, 'tb1')") + tdLog.sleep(4) + tdSql.execute("insert into tb2 values (now, 2, 'tb2')") + tdLog.sleep(4) + tdSql.execute("insert into tb3 values (now, 0, 'tb3')") + + tdSql.waitedQuery("select * from strm", 4, 60) + tdSql.checkRows(4) + tdSql.checkData(0, 2, 0.000000000) + if tdSql.getData(0, 3) == 'tb2': + tdLog.exit("unexpected value of data03") + if tdSql.getData(1, 3) == 'tb2': + tdLog.exit("unexpected value of data13") + if tdSql.getData(2, 3) == 'tb2': + tdLog.exit("unexpected value of data23") + if tdSql.getData(3, 3) == 'tb2': + tdLog.exit("unexpected value of data33") + + tdLog.info("add table tb4 to see if stream still works correctly") + # The vnode client needs to refresh metadata cache to allow strm calculate tb4's data. + # But the current refreshing frequency is every 10 min + # commented out the case below to save running time + tdSql.execute("create table tb4 using stb tags('a4')") + tdSql.execute("insert into tb4 values(now, 4, 'tb4')") + tdSql.waitedQuery("select * from strm order by ts desc", 6, 60) + tdSql.checkRows(6) + tdSql.checkData(0, 2, 4) + tdSql.checkData(0, 3, "tb4") + + tdLog.info("change tag values to see if stream still works correctly") + tdSql.execute("alter table tb4 set tag t1='b4'") + tdLog.sleep(3) + tdSql.execute("insert into tb1 values (now, 1, 'tb1_a1')") + tdLog.sleep(4) + tdSql.execute("insert into tb4 values (now, -4, 'tb4_b4')") + tdSql.waitedQuery("select * from strm order by ts desc", 8, 100) + tdSql.checkRows(8) + tdSql.checkData(0, 2, 1) + tdSql.checkData(0, 3, "tb1_a1") + + def datatypes(self): + tdLog.debug("begin data types") + tdSql.prepare() + tdSql.execute("create table stb3 (ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 binary(15), c6 nchar(15), c7 bool) tags(t1 int, t2 binary(15))") + tdSql.execute("create table tb0 using stb3 tags(0, 'tb0')") + tdSql.execute("create table tb1 using stb3 tags(1, 'tb1')") + tdSql.execute("create table tb2 using stb3 tags(2, 'tb2')") + tdSql.execute("create table tb3 using stb3 tags(3, 'tb3')") + tdSql.execute("create table tb4 using stb3 tags(4, 'tb4')") + + tdSql.execute("create table strm0 as select count(ts), count(c1), max(c2), min(c4), first(c5), last(c6) from stb3 where ts < now + 30s interval(4s) sliding(2s)") + #tdSql.execute("create table strm0 as select count(ts), count(c1), max(c2), min(c4), first(c5) from stb where ts < now + 30s interval(4s) sliding(2s)") + tdLog.sleep(1) + tdSql.execute("insert into tb0 values (now, 0, 0, 0, 0, 'binary0', '涛思0', true) tb1 values (now, 1, 1, 1, 1, 'binary1', '涛思1', false) tb2 values (now, 2, 2, 2, 2, 'binary2', '涛思2', true) tb3 values (now, 3, 3, 3, 3, 'binary3', '涛思3', false) tb4 values (now, 4, 4, 4, 4, 'binary4', '涛思4', true) ") + + tdSql.waitedQuery("select * from strm0 order by ts desc", 2, 120) + tdSql.checkRows(2) + + tdSql.execute("insert into tb0 values (now, 10, 10, 10, 10, 'binary0', '涛思0', true) tb1 values (now, 11, 11, 11, 11, 'binary1', '涛思1', false) tb2 values (now, 12, 12, 12, 12, 'binary2', '涛思2', true) tb3 values (now, 13, 13, 13, 13, 'binary3', '涛思3', false) tb4 values (now, 14, 14, 14, 14, 'binary4', '涛思4', true) ") + tdSql.waitedQuery("select * from strm0 order by ts desc", 4, 120) + tdSql.checkRows(4) + + def run(self): + self.tbase300() + self.tbase304() + self.wildcardFilterOnTags() + self.datatypes() + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/stream/stream1.py b/tests/pytest/stream/stream1.py index 86244d29e0..c657379441 100644 --- a/tests/pytest/stream/stream1.py +++ b/tests/pytest/stream/stream1.py @@ -55,10 +55,7 @@ class TDTestCase: tdSql.checkRows(tbNum + 1) tdLog.info("===== step3 =====") - tdLog.info("sleeping 120 seconds") - time.sleep(120) - tdSql.query("select * from s0") - + tdSql.waitedQuery("select * from s0", 1, 120) try: tdSql.checkData(0, 1, rowNum) tdSql.checkData(0, 2, rowNum) @@ -82,10 +79,7 @@ class TDTestCase: tdSql.checkRows(tbNum + 1) tdLog.info("===== step7 =====") - tdLog.info("sleeping 120 seconds") - time.sleep(120) - - tdSql.query("select * from s0") + tdSql.waitedQuery("select * from s0", 1, 120) try: tdSql.checkData(0, 1, rowNum) tdSql.checkData(0, 2, rowNum) @@ -108,10 +102,7 @@ class TDTestCase: tdSql.checkRows(tbNum + 2) tdLog.info("===== step9 =====") - tdLog.info("sleeping 120 seconds") - time.sleep(120) - - tdSql.query("select * from s1") + tdSql.waitedQuery("select * from s1", 1, 120) try: tdSql.checkData(0, 1, rowNum * tbNum) tdSql.checkData(0, 2, rowNum * tbNum) @@ -134,9 +125,7 @@ class TDTestCase: tdSql.checkRows(tbNum + 2) tdLog.info("===== step13 =====") - tdLog.info("sleeping 120 seconds") - time.sleep(120) - tdSql.query("select * from s1") + tdSql.waitedQuery("select * from s1", 1, 120) try: tdSql.checkData(0, 1, rowNum * tbNum) tdSql.checkData(0, 2, rowNum * tbNum) diff --git a/tests/pytest/stream/stream2.py b/tests/pytest/stream/stream2.py index f1932daf47..44882f5972 100644 --- a/tests/pytest/stream/stream2.py +++ b/tests/pytest/stream/stream2.py @@ -53,8 +53,7 @@ class TDTestCase: tdSql.checkRows(tbNum + 1) tdLog.info("===== step3 =====") - time.sleep(120) - tdSql.query("select * from s0") + tdSql.waitedQuery("select * from s0", 1, 120) try: tdSql.checkData(0, 1, rowNum) except Exception as e: @@ -81,8 +80,7 @@ class TDTestCase: tdLog.info(repr(e)) tdLog.info("===== step7 =====") - time.sleep(120) - tdSql.query("select * from s0") + tdSql.waitedQuery("select * from s0", 1, 120) try: tdSql.checkData(0, 1, rowNum) tdSql.checkData(0, 2, rowNum) @@ -107,8 +105,7 @@ class TDTestCase: tdSql.checkRows(tbNum + 2) tdLog.info("===== step9 =====") - time.sleep(120) - tdSql.query("select * from s1") + tdSql.waitedQuery("select * from s1", 1, 120) try: tdSql.checkData(0, 1, totalNum) tdSql.checkData(0, 2, totalNum) @@ -137,8 +134,7 @@ class TDTestCase: tdLog.info(repr(e)) tdLog.info("===== step13 =====") - time.sleep(120) - tdSql.query("select * from s1") + tdSql.waitedQuery("select * from s1", 1, 120) try: tdSql.checkData(0, 1, totalNum) #tdSql.checkData(0, 2, None) diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index 370af1ba13..aa865ff9b4 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -190,32 +190,31 @@ class TDDnode: "dnode:%d is deployed and configured by %s" % (self.index, self.cfgPath)) - def start(self): + def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) - binPath = "" if ("community" in selfPath): - projPath = selfPath + "/../../../../" - - for root, dirs, files in os.walk(projPath): - if ("taosd" in files): - rootRealPath = os.path.dirname(os.path.realpath(root)) - if ("packaging" not in rootRealPath): - binPath = os.path.join(root, "taosd") - break + projPath = selfPath[:selfPath.find("community")] else: - projPath = selfPath + "/../../../" - for root, dirs, files in os.walk(projPath): - if ("taosd" in files): - rootRealPath = os.path.dirname(os.path.realpath(root)) - if ("packaging" not in rootRealPath): - binPath = os.path.join(root, "taosd") - break + projPath = selfPath[:selfPath.find("tests")] - if (binPath == ""): + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def start(self): + buildPath = self.getBuildPath() + + if (buildPath == ""): tdLog.exit("taosd not found!") else: - tdLog.info("taosd found in %s" % rootRealPath) + tdLog.info("taosd found in %s" % buildPath) + + binPath = buildPath + "/build/bin/taosd" if self.deployed == 0: tdLog.exit("dnode:%d is not deployed" % (self.index)) diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py index dc0366b214..96ea036adc 100644 --- a/tests/pytest/util/sql.py +++ b/tests/pytest/util/sql.py @@ -29,10 +29,8 @@ class TDSql: self.cursor = cursor if (log): - frame = inspect.stack()[1] - callerModule = inspect.getmodule(frame[0]) - callerFilename = callerModule.__file__ - self.cursor.log(callerFilename + ".sql") + caller = inspect.getframeinfo(inspect.stack()[1][0]) + self.cursor.log(caller.filename + ".sql") def close(self): self.cursor.close() @@ -55,12 +53,8 @@ class TDSql: except BaseException: expectErrNotOccured = False if expectErrNotOccured: - frame = inspect.stack()[1] - callerModule = inspect.getmodule(frame[0]) - callerFilename = callerModule.__file__ - tdLog.exit( - "%s failed: sql:%s, expect error not occured" % - (callerFilename, sql)) + caller = inspect.getframeinfo(inspect.stack()[1][0]) + tdLog.exit("%s(%d) failed: sql:%s, expect error not occured" % (caller.filename, caller.lineno, sql)) else: self.queryRows = 0 self.queryCols = 0 @@ -69,75 +63,68 @@ class TDSql: def query(self, sql): self.sql = sql - self.cursor.execute(sql) - self.queryResult = self.cursor.fetchall() - self.queryRows = len(self.queryResult) - self.queryCols = len(self.cursor.description) - # if self.queryRows == 1 and self.queryCols == 1: - # tdLog.info("sql:%s, rows:%d cols:%d data:%s" % (self.sql, self.queryRows, self.queryCols, self.queryResult[0][0])) - # else: - # tdLog.info("sql:%s, rows:%d cols:%d" % (self.sql, self.queryRows, self.queryCols)) + try: + self.cursor.execute(sql) + self.queryResult = self.cursor.fetchall() + self.queryRows = len(self.queryResult) + self.queryCols = len(self.cursor.description) + except Exception as e: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, sql, repr(e)) + tdLog.exit("%s(%d) failed: sql:%s, %s" % args) return self.queryRows + def waitedQuery(self, sql, expectRows, timeout): + tdLog.info("sql: %s, try to retrieve %d rows in %d seconds" % (sql, expectRows, timeout)) + self.sql = sql + try: + for i in range(timeout): + self.cursor.execute(sql) + self.queryResult = self.cursor.fetchall() + self.queryRows = len(self.queryResult) + self.queryCols = len(self.cursor.description) + if self.queryRows >= expectRows: + return (self.queryRows, i) + time.sleep(1) + except Exception as e: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, sql, repr(e)) + tdLog.exit("%s(%d) failed: sql:%s, %s" % args) + return (self.queryRows, timeout) + def checkRows(self, expectRows): - if self.queryRows != expectRows: - frame = inspect.stack()[1] - callerModule = inspect.getmodule(frame[0]) - callerFilename = callerModule.__file__ - tdLog.exit( - "%s failed: sql:%s, queryRows:%d != expect:%d" % - (callerFilename, self.sql, self.queryRows, expectRows)) - tdLog.info("sql:%s, queryRows:%d == expect:%d" % - (self.sql, self.queryRows, expectRows)) + if self.queryRows == expectRows: + tdLog.info("sql:%s, queryRows:%d == expect:%d" % (self.sql, self.queryRows, expectRows)) + else: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, self.sql, self.queryRows, expectRows) + tdLog.exit("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args) + + def checkRowCol(self, row, col): + caller = inspect.getframeinfo(inspect.stack()[2][0]) + if row < 0: + args = (caller.filename, caller.lineno, self.sql, row) + tdLog.exit("%s(%d) failed: sql:%s, row:%d is smaller than zero" % args) + if col < 0: + args = (caller.filename, caller.lineno, self.sql, row) + tdLog.exit("%s(%d) failed: sql:%s, col:%d is smaller than zero" % args) + if row > self.queryRows: + args = (caller.filename, caller.lineno, self.sql, row, self.queryRows) + tdLog.exit("%s(%d) failed: sql:%s, row:%d is larger than queryRows:%d" % args) + if col > self.queryCols: + args = (caller.filename, caller.lineno, self.sql, col, self.queryCols) + tdLog.exit("%s(%d) failed: sql:%s, col:%d is larger than queryCols:%d" % args) def checkDataType(self, row, col, dataType): - frame = inspect.stack()[1] - callerModule = inspect.getmodule(frame[0]) - callerFilename = callerModule.__file__ - - if row < 0: - tdLog.exit( - "%s failed: sql:%s, row:%d is smaller than zero" % - (callerFilename, self.sql, row)) - if col < 0: - tdLog.exit( - "%s failed: sql:%s, col:%d is smaller than zero" % - (callerFilename, self.sql, col)) - if row > self.queryRows: - tdLog.exit( - "%s failed: sql:%s, row:%d is larger than queryRows:%d" % - (callerFilename, self.sql, row, self.queryRows)) - if col > self.queryCols: - tdLog.exit( - "%s failed: sql:%s, col:%d is larger than queryCols:%d" % - (callerFilename, self.sql, col, self.queryCols)) - + self.checkRowCol(row, col) return self.cursor.istype(col, dataType) def checkData(self, row, col, data): - frame = inspect.stack()[1] - callerModule = inspect.getmodule(frame[0]) - callerFilename = callerModule.__file__ - - if row < 0: - tdLog.exit( - "%s failed: sql:%s, row:%d is smaller than zero" % - (callerFilename, self.sql, row)) - if col < 0: - tdLog.exit( - "%s failed: sql:%s, col:%d is smaller than zero" % - (callerFilename, self.sql, col)) - if row > self.queryRows: - tdLog.exit( - "%s failed: sql:%s, row:%d is larger than queryRows:%d" % - (callerFilename, self.sql, row, self.queryRows)) - if col > self.queryCols: - tdLog.exit( - "%s failed: sql:%s, col:%d is larger than queryCols:%d" % - (callerFilename, self.sql, col, self.queryCols)) + self.checkRowCol(row, col) if self.queryResult[row][col] != data: - tdLog.exit("%s failed: sql:%s row:%d col:%d data:%s != expect:%s" % ( - callerFilename, self.sql, row, col, self.queryResult[row][col], data)) + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) + tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) if data is None: tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % @@ -153,26 +140,7 @@ class TDSql: (self.sql, row, col, self.queryResult[row][col], data)) def getData(self, row, col): - frame = inspect.stack()[1] - callerModule = inspect.getmodule(frame[0]) - callerFilename = callerModule.__file__ - - if row < 0: - tdLog.exit( - "%s failed: sql:%s, row:%d is smaller than zero" % - (callerFilename, self.sql, row)) - if col < 0: - tdLog.exit( - "%s failed: sql:%s, col:%d is smaller than zero" % - (callerFilename, self.sql, col)) - if row > self.queryRows: - tdLog.exit( - "%s failed: sql:%s, row:%d is larger than queryRows:%d" % - (callerFilename, self.sql, row, self.queryRows)) - if col > self.queryCols: - tdLog.exit( - "%s failed: sql:%s, col:%d is larger than queryCols:%d" % - (callerFilename, self.sql, col, self.queryCols)) + self.checkRowCol(row, col) return self.queryResult[row][col] def executeTimes(self, sql, times): @@ -185,20 +153,21 @@ class TDSql: def execute(self, sql): self.sql = sql - self.affectedRows = self.cursor.execute(sql) + try: + self.affectedRows = self.cursor.execute(sql) + except Exception as e: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, sql, repr(e)) + tdLog.exit("%s(%d) failed: sql:%s, %s" % args) return self.affectedRows def checkAffectedRows(self, expectAffectedRows): if self.affectedRows != expectAffectedRows: - frame = inspect.stack()[1] - callerModule = inspect.getmodule(frame[0]) - callerFilename = callerModule.__file__ + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, self.sql, self.affectedRows, expectAffectedRows) + tdLog.exit("%s(%d) failed: sql:%s, affectedRows:%d != expect:%d" % args) - tdLog.exit( - "%s failed: sql:%s, affectedRows:%d != expect:%d" % - (callerFilename, self.sql, self.affectedRows, expectAffectedRows)) - tdLog.info("sql:%s, affectedRows:%d == expect:%d" % - (self.sql, self.affectedRows, expectAffectedRows)) + tdLog.info("sql:%s, affectedRows:%d == expect:%d" % (self.sql, self.affectedRows, expectAffectedRows)) tdSql = TDSql() diff --git a/tests/script/general/alter/cached_schema_after_alter.sim b/tests/script/general/alter/cached_schema_after_alter.sim index 2d049ec595..bf9b9eb6a3 100644 --- a/tests/script/general/alter/cached_schema_after_alter.sim +++ b/tests/script/general/alter/cached_schema_after_alter.sim @@ -68,7 +68,7 @@ endi if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != NULL then return -1 endi @@ -80,7 +80,7 @@ endi if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != NULL then return -1 endi diff --git a/tests/script/general/parser/fill.sim b/tests/script/general/parser/fill.sim index 4dfee222cc..488f807fbc 100644 --- a/tests/script/general/parser/fill.sim +++ b/tests/script/general/parser/fill.sim @@ -843,6 +843,14 @@ if $data81 != 4 then return -1 endi +# desc fill query +print desc fill query +sql select count(*) from m_fl_tb0 where ts>='2018-9-17 9:0:0' and ts<='2018-9-17 9:11:00' interval(1m) fill(value,10) order by ts desc; +if $rows != 12 then + return -1 +endi + + print =============== clear sql drop database $db sql show databases diff --git a/tests/script/general/parser/fill_us.sim b/tests/script/general/parser/fill_us.sim new file mode 100644 index 0000000000..190b565b42 --- /dev/null +++ b/tests/script/general/parser/fill_us.sim @@ -0,0 +1,1037 @@ +system sh/stop_dnodes.sh +system sh/ip.sh -i 1 -s up +system sh/deploy.sh -n dnode1 -m 192.168.0.1 -i 192.168.0.1 +system sh/cfg.sh -n dnode1 -c commitLog -v 0 +system sh/exec.sh -n dnode1 -s start +sleep 3000 +sql connect + +$dbPrefix = m_fl_db +$tbPrefix = m_fl_tb +$mtPrefix = m_fl_mt +$tbNum = 10 +$rowNum = 5 +$totalNum = $tbNum * $rowNum +$ts0 = 1537146000000000 # 2018-09-17 09:00:00.000000 +$delta = 600000000 +print ========== fill_us.sim +$i = 0 +$db = $dbPrefix . $i +$mt = $mtPrefix . $i + +sql drop database $db -x step1 +step1: +sql create database $db precision 'us' +sql use $db +sql create table $mt (ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 smallint, c6 bool, c7 binary(10), c8 nchar(10)) tags(tgcol int) + +$i = 0 +$ts = $ts0 +while $i < $tbNum + $tb = $tbPrefix . $i + sql create table $tb using $mt tags( $i ) + + $x = 0 + while $x < $rowNum + $xs = $x * $delta + $ts = $ts0 + $xs + sql insert into $tb values ( $ts , $x , $x , $x , $x , $x , true, 'BINARY', 'NCHAR' ) + $x = $x + 1 + endw + + $i = $i + 1 +endw + +# setup +$i = 0 +$tb = $tbPrefix . $i +$tsu = 4 * $delta +$tsu = $tsu + $ts0 + +## fill syntax test +# number of fill values exceeds number of selected columns +print select max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6) +sql select max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6) +if $data11 != 6 then + return -1 +endi +if $data12 != 6 then + return -1 +endi +if $data13 != 6.00000 then + return -1 +endi +if $data14 != 6.000000000 then + return -1 +endi + +# number of fill values is smaller than number of selected columns +print sql select max(c1), max(c2), max(c3) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6) +sql select max(c1), max(c2), max(c3) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6) +if $data11 != 6 then + return -1 +endi +if $data12 != 6 then + return -1 +endi +if $data13 != 6.00000 then + return -1 +endi + +# unspecified filling method +sql_error select max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill (6, 6, 6, 6, 6) + +## constant fill test +# count_with_fill +print constant_fill test +print count_with_constant_fill +print sql select count(c1), count(c2), count(c3), count(c4), count(c5), count(c6), count(c7), count(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6) +sql select count(c1), count(c2), count(c3), count(c4), count(c5), count(c6), count(c7), count(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6) +if $rows != 9 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data11 != 6 then + return -1 +endi +if $data21 != 1 then + return -1 +endi +if $data31 != 6 then + return -1 +endi +if $data41 != 1 then + return -1 +endi +if $data51 != 6 then + return -1 +endi +if $data61 != 1 then + return -1 +endi +if $data71 != 6 then + return -1 +endi +if $data81 != 1 then + return -1 +endi + +# avg_with_fill +print avg_witt_constant_fill +sql select avg(c1), avg(c2), avg(c3), avg(c4), avg(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6) +if $rows != 9 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data11 != 6.000000000 then + return -1 +endi +if $data21 != 1.000000000 then + return -1 +endi +if $data31 != 6.000000000 then + return -1 +endi +if $data41 != 2.000000000 then + return -1 +endi +if $data51 != 6.000000000 then + return -1 +endi +if $data61 != 3.000000000 then + return -1 +endi +if $data71 != 6.000000000 then + return -1 +endi +if $data81 != 4.000000000 then + return -1 +endi + +# max_with_fill +print max_with_fill +sql select max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6) +if $rows != 9 then + return -1 +endi +if $data01 != 0 then + return -1 +endi +if $data11 != 6 then + return -1 +endi +if $data21 != 1 then + return -1 +endi +if $data31 != 6 then + return -1 +endi +if $data41 != 2 then + return -1 +endi +if $data51 != 6 then + return -1 +endi +if $data61 != 3 then + return -1 +endi +if $data71 != 6 then + return -1 +endi +if $data81 != 4 then + return -1 +endi + +# min_with_fill +print min_with_fill +sql select min(c1), min(c2), min(c3), min(c4), min(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6) +if $rows != 9 then + return -1 +endi +if $data01 != 0 then + return -1 +endi +if $data11 != 6 then + return -1 +endi +if $data21 != 1 then + return -1 +endi +if $data31 != 6 then + return -1 +endi +if $data41 != 2 then + return -1 +endi +if $data51 != 6 then + return -1 +endi +if $data61 != 3 then + return -1 +endi +if $data71 != 6 then + return -1 +endi +if $data81 != 4 then + return -1 +endi + +# first_with_fill +print first_with_fill +sql select first(c1), first(c2), first(c3), first(c4), first(c5), first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6) +if $rows != 9 then + return -1 +endi +if $data01 != 0 then + return -1 +endi +if $data11 != 6 then + return -1 +endi +if $data21 != 1 then + return -1 +endi +if $data31 != 6 then + return -1 +endi +if $data41 != 2 then + return -1 +endi +if $data51 != 6 then + return -1 +endi +if $data61 != 3 then + return -1 +endi +if $data71 != 6 then + return -1 +endi +if $data81 != 4 then + return -1 +endi + +# check double type values +if $data04 != 0.000000000 then + return -1 +endi +print data14 = $data14 +if $data14 != 6.000000000 then + return -1 +endi +if $data24 != 1.000000000 then + return -1 +endi +if $data34 != 6.000000000 then + return -1 +endi +if $data44 != 2.000000000 then + return -1 +endi +if $data54 != 6.000000000 then + return -1 +endi +if $data64 != 3.000000000 then + return -1 +endi + +# check float type values +print $data03 $data13 +if $data03 != 0.00000 then + return -1 +endi +if $data13 != 6.00000 then + return -1 +endi +if $data23 != 1.00000 then + return -1 +endi +if $data33 != 6.00000 then + return -1 +endi +if $data43 != 2.00000 then + return -1 +endi +if $data53 != 6.00000 then + return -1 +endi +if $data63 != 3.00000 then + return -1 +endi +if $data73 != 6.00000 then + return -1 +endi +if $data83 != 4.00000 then + return -1 +endi + + +# last_with_fill +print last_with_fill +sql select last(c1), last(c2), last(c3), last(c4), last(c5), last(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6) +if $rows != 9 then + return -1 +endi +if $data01 != 0 then + return -1 +endi +if $data11 != 6 then + return -1 +endi +if $data21 != 1 then + return -1 +endi +if $data31 != 6 then + return -1 +endi +if $data41 != 2 then + return -1 +endi +if $data51 != 6 then + return -1 +endi +if $data61 != 3 then + return -1 +endi +if $data71 != 6 then + return -1 +endi +if $data81 != 4 then + return -1 +endi + +# fill_negative_values +sql select sum(c1), avg(c2), max(c3), min(c4), count(c5), count(c6), count(c7), count(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, -1, -1, -1, -1, -1, -1, -1, -1) +if $rows != 9 then + return -1 +endi +if $data01 != 0 then + return -1 +endi +if $data11 != -1 then + return -1 +endi + +# fill_char_values_to_arithmetic_fields +sql_error select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c') + +# fill_multiple_columns +sql_error select sum(c1), avg(c2), min(c3), max(c4), count(c6), first(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 99, 99, 99, 99, 99, abc, abc) +sql select sum(c1), avg(c2), min(c3), max(c4) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 99, 99, 99, 99) +if $rows != 9 then + return -1 +endi +print data01 = $data01 +print data11 = $data11 +if $data01 != 0 then + return -1 +endi +if $data11 != 99 then + return -1 +endi + +sql select * from $tb +#print data08 = $data08 +if $data08 != NCHAR then + return -1 +endi +#return -1 + + +# fill_into_nonarithmetic_fieds +sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000) +#if $data11 != 20000000 then +if $data11 != 1 then + return -1 +endi + +sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1, 1, 1) +sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1.1, 1.1, 1.1) +sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1, 1e1, 1e1) +sql select first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1') +# fill quoted values into bool column will throw error unless the value is 'true' or 'false' Note:2018-10-24 +# fill values into binary or nchar columns will be set to null automatically Note:2018-10-24 +sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1','1e1') +sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, true, true, true) +sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true', 'true','true') + + +# fill nonarithmetic values into arithmetic fields +sql_error select count(*) where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, abc); +sql_error select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true'); + +sql select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e1'); +if $rows != 9 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data11 != 10 then + return -1 +endi + +sql select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1); +if $rows != 9 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data11 != 10 then + return -1 +endi + +sql select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '10'); +if $rows != 9 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data11 != 10 then + return -1 +endi + + +## linear fill +# feature currently switched off 2018/09/29 +#sql select count(c1), count(c2), count(c3), count(c4), count(c5), count(c6), count(c7), count(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(linear) + +## previous fill +print fill(prev) +sql select count(c1), count(c2), count(c3), count(c4), count(c5), count(c6), count(c7), count(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(prev) +if $rows != 9 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data11 != 1 then + return -1 +endi +if $data21 != 1 then + return -1 +endi +if $data31 != 1 then + return -1 +endi +if $data41 != 1 then + return -1 +endi +if $data51 != 1 then + return -1 +endi +if $data61 != 1 then + return -1 +endi +if $data71 != 1 then + return -1 +endi +if $data81 != 1 then + return -1 +endi + +# avg_with_fill +sql select avg(c1), avg(c2), avg(c3), avg(c4), avg(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(prev) +if $rows != 9 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data21 != 1.000000000 then + return -1 +endi +if $data31 != 1.000000000 then + return -1 +endi +if $data41 != 2.000000000 then + return -1 +endi +if $data51 != 2.000000000 then + return -1 +endi +if $data61 != 3.000000000 then + return -1 +endi +if $data71 != 3.000000000 then + return -1 +endi +if $data81 != 4.000000000 then + return -1 +endi + +# max_with_fill +sql select max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(prev) +if $rows != 9 then + return -1 +endi +if $data01 != 0 then + return -1 +endi +if $data11 != 0 then + return -1 +endi +if $data21 != 1 then + return -1 +endi +if $data31 != 1 then + return -1 +endi +if $data41 != 2 then + return -1 +endi +if $data51 != 2 then + return -1 +endi +if $data61 != 3 then + return -1 +endi +if $data71 != 3 then + return -1 +endi +if $data81 != 4 then + return -1 +endi + +# min_with_fill +sql select min(c1), min(c2), min(c3), min(c4), min(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(prev) +if $rows != 9 then + return -1 +endi +if $data01 != 0 then + return -1 +endi +if $data11 != 0 then + return -1 +endi +if $data21 != 1 then + return -1 +endi +if $data31 != 1 then + return -1 +endi +if $data41 != 2 then + return -1 +endi +if $data51 != 2 then + return -1 +endi +if $data61 != 3 then + return -1 +endi +if $data71 != 3 then + return -1 +endi +if $data81 != 4 then + return -1 +endi + +# first_with_fill +sql select first(c1), first(c2), first(c3), first(c4), first(c5), first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(prev) +if $rows != 9 then + return -1 +endi +if $data01 != 0 then + return -1 +endi +if $data11 != 0 then + return -1 +endi +if $data21 != 1 then + return -1 +endi +if $data31 != 1 then + return -1 +endi +if $data41 != 2 then + return -1 +endi +if $data51 != 2 then + return -1 +endi +if $data61 != 3 then + return -1 +endi +if $data71 != 3 then + return -1 +endi +if $data81 != 4 then + return -1 +endi + +# last_with_fill +sql select last(c1), last(c2), last(c3), last(c4), last(c5), last(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(prev) +if $rows != 9 then + return -1 +endi +if $data01 != 0 then + return -1 +endi +if $data11 != 0 then + return -1 +endi +if $data21 != 1 then + return -1 +endi +if $data31 != 1 then + return -1 +endi +if $data41 != 2 then + return -1 +endi +if $data51 != 2 then + return -1 +endi +if $data61 != 3 then + return -1 +endi +if $data71 != 3 then + return -1 +endi +if $data81 != 4 then + return -1 +endi + +## NULL fill +print fill(value, NULL) +# count_with_fill +sql select count(c1), count(c2), count(c3), count(c4), count(c5), count(c6), count(c7), count(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill( NULL) +print select count(c1), count(c2), count(c3), count(c4), count(c5), count(c6), count(c7), count(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill( NULL) +sql select count(c1), count(c2), count(c3), count(c4), count(c5), count(c6), count(c7), count(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, NULL) +if $rows != 9 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data11 != null then + return -1 +endi +if $data21 != 1 then + return -1 +endi +if $data31 != null then + return -1 +endi +if $data41 != 1 then + return -1 +endi +if $data51 != null then + return -1 +endi +if $data61 != 1 then + return -1 +endi +if $data71 != null then + return -1 +endi +if $data81 != 1 then + return -1 +endi +sql select count(c1), count(c2), count(c3), count(c4), count(c5), count(c6), count(c7), count(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(none) +if $rows != 5 then + return -1 +endi + +# avg_with_fill +sql select avg(c1), avg(c2), avg(c3), avg(c4), avg(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, NULL) +if $rows != 9 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data11 != null then + return -1 +endi +if $data21 != 1.000000000 then + return -1 +endi +if $data31 != null then + return -1 +endi +if $data41 != 2.000000000 then + return -1 +endi +if $data51 != null then + return -1 +endi +if $data61 != 3.000000000 then + return -1 +endi +if $data71 != null then + return -1 +endi +if $data81 != 4.000000000 then + return -1 +endi + +# max_with_fill +sql select max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, NULL) +if $rows != 9 then + return -1 +endi +if $data01 != 0 then + return -1 +endi +if $data11 != null then + return -1 +endi +if $data21 != 1 then + return -1 +endi +if $data31 != null then + return -1 +endi +if $data41 != 2 then + return -1 +endi +if $data51 != null then + return -1 +endi +if $data61 != 3 then + return -1 +endi +if $data71 != null then + return -1 +endi +if $data81 != 4 then + return -1 +endi + +# min_with_fill +sql select min(c1), min(c2), min(c3), min(c4), min(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, NULL) +if $rows != 9 then + return -1 +endi +if $data01 != 0 then + return -1 +endi +if $data11 != null then + return -1 +endi +if $data21 != 1 then + return -1 +endi +if $data31 != null then + return -1 +endi +if $data41 != 2 then + return -1 +endi +if $data51 != null then + return -1 +endi +if $data61 != 3 then + return -1 +endi +if $data71 != null then + return -1 +endi +if $data81 != 4 then + return -1 +endi + +# first_with_fill +sql select first(c1), first(c2), first(c3), first(c4), first(c5), first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, NULL) +if $rows != 9 then + return -1 +endi +if $data01 != 0 then + return -1 +endi +if $data11 != null then + return -1 +endi +if $data21 != 1 then + return -1 +endi +if $data31 != null then + return -1 +endi +if $data41 != 2 then + return -1 +endi +if $data51 != null then + return -1 +endi +if $data61 != 3 then + return -1 +endi +if $data71 != null then + return -1 +endi +if $data81 != 4 then + return -1 +endi + +# last_with_fill +sql select last(c1), last(c2), last(c3), last(c4), last(c5), last(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, NULL) +if $rows != 9 then + return -1 +endi +if $data01 != 0 then + return -1 +endi +if $data11 != null then + return -1 +endi +if $data21 != 1 then + return -1 +endi +if $data31 != null then + return -1 +endi +if $data41 != 2 then + return -1 +endi +if $data51 != null then + return -1 +endi +if $data61 != 3 then + return -1 +endi +if $data71 != null then + return -1 +endi +if $data81 != 4 then + return -1 +endi + +# desc fill query +print desc fill query +sql select count(*) from m_fl_tb0 where ts>='2018-9-17 9:0:0' and ts<='2018-9-17 9:11:00' interval(1m) fill(value,10) order by ts desc; +if $rows != 12 then + return -1 +endi + + +#print =============== clear +#sql drop database $db +#sql show databases +#if $rows != 0 then +# return -1 +#endi + +######################### us ########################## +$start = 1537146000000000 # 2018-09-17 09:00:00.000000 +$delta = 600000000 + +sql create table us_st (ts timestamp, c1 int, c2 double) tags(tgcol int) +sql create table us_t1 using us_st tags( 1 ) + +sql insert into us_t1 values ('2018-09-17 09:00:00.000001', 1 , 1) +sql insert into us_t1 values ('2018-09-17 09:00:00.000002', 2 , 2) +sql insert into us_t1 values ('2018-09-17 09:00:00.000003', 3 , 3) +sql insert into us_t1 values ('2018-09-17 09:00:00.000004', 4 , 4) +sql insert into us_t1 values ('2018-09-17 09:00:00.000005', 5 , 5) +sql insert into us_t1 values ('2018-09-17 09:00:00.000006', 6 , 6) +sql insert into us_t1 values ('2018-09-17 09:00:00.000007', 7 , 7) +sql insert into us_t1 values ('2018-09-17 09:00:00.000008', 8 , 8) +sql insert into us_t1 values ('2018-09-17 09:00:00.000009', 9 , 9) + +sql insert into us_t1 values ('2018-09-17 09:00:00.000015', 15 , 15) +sql insert into us_t1 values ('2018-09-17 09:00:00.000016', 16 , 16) +sql insert into us_t1 values ('2018-09-17 09:00:00.000017', 17 , 17) + +sql insert into us_t1 values ('2018-09-17 09:00:00.000021', 21 , 21) +sql insert into us_t1 values ('2018-09-17 09:00:00.000022', 22 , 22) +sql insert into us_t1 values ('2018-09-17 09:00:00.000023', 23 , 23) + +sql insert into us_t1 values ('2018-09-17 09:00:00.000027', 27 , 27) +sql insert into us_t1 values ('2018-09-17 09:00:00.000028', 28 , 28) +sql insert into us_t1 values ('2018-09-17 09:00:00.000029', 29 , 29) + +print sql select avg(c1), avg(c2) from us_t1 where ts >= '2018-09-17 09:00:00.000002' and ts <= '2018-09-17 09:00:00.000021' interval(3u) fill(value, 999, 999) +sql select avg(c1), avg(c2) from us_t1 where ts >= '2018-09-17 09:00:00.000002' and ts <= '2018-09-17 09:00:00.000021' interval(3u) fill(value, 999, 999) +if $rows != 8 then + return -1 +endi +if $data01 != 2.000000000 then + return -1 +endi +if $data11 != 4.000000000 then + return -1 +endi +if $data21 != 7.000000000 then + return -1 +endi +if $data31 != 9.000000000 then + return -1 +endi +if $data41 != 999.000000000 then + return -1 +endi +if $data51 != 16.000000000 then + return -1 +endi +if $data61 != 999.000000000 then + return -1 +endi +if $data71 != 21.000000000 then + return -1 +endi + +sql select avg(c1), avg(c2) from us_t1 where ts >= '2018-09-17 09:00:00.000002' and ts <= '2018-09-17 09:00:00.000021' interval(3u) fill(none) +if $rows != 6 then + return -1 +endi +if $data01 != 2.000000000 then + return -1 +endi +if $data11 != 4.000000000 then + return -1 +endi +if $data21 != 7.000000000 then + return -1 +endi +if $data31 != 9.000000000 then + return -1 +endi +if $data41 != 16.000000000 then + return -1 +endi +if $data51 != 21.000000000 then + return -1 +endi + +sql select avg(c1), avg(c2) from us_t1 where ts >= '2018-09-17 09:00:00.000002' and ts <= '2018-09-17 09:00:00.000021' interval(3u) fill(null) +if $rows != 8 then + return -1 +endi +if $data01 != 2.000000000 then + return -1 +endi +if $data11 != 4.000000000 then + return -1 +endi +if $data21 != 7.000000000 then + return -1 +endi +if $data31 != 9.000000000 then + return -1 +endi +if $data41 != null then + print ===== $data41 + return -1 +endi +if $data51 != 16.000000000 then + return -1 +endi +if $data61 != null then + print ===== $data61 + return -1 +endi +if $data71 != 21.000000000 then + return -1 +endi + + + +sql select avg(c1), avg(c2) from us_t1 where ts >= '2018-09-17 09:00:00.000002' and ts <= '2018-09-17 09:00:00.000021' interval(3u) fill(prev) +if $rows != 8 then + return -1 +endi +if $data01 != 2.000000000 then + return -1 +endi +if $data11 != 4.000000000 then + return -1 +endi +if $data21 != 7.000000000 then + return -1 +endi +if $data31 != 9.000000000 then + return -1 +endi +if $data41 != 9.000000000 then + return -1 +endi +if $data51 != 16.000000000 then + return -1 +endi +if $data61 != 16.000000000 then + return -1 +endi +if $data71 != 21.000000000 then + return -1 +endi + +sql select avg(c1), avg(c2) from us_t1 where ts >= '2018-09-17 09:00:00.000002' and ts <= '2018-09-17 09:00:00.000021' interval(3u) fill(linear) +if $rows != 8 then + return -1 +endi +if $data01 != 2.000000000 then + return -1 +endi +if $data11 != 4.000000000 then + return -1 +endi +if $data21 != 7.000000000 then + return -1 +endi +if $data31 != 9.000000000 then + return -1 +endi +if $data41 != 12.500000000 then + return -1 +endi +if $data51 != 16.000000000 then + return -1 +endi +if $data61 != 18.500000000 then + return -1 +endi +if $data71 != 21.000000000 then + return -1 +endi + +print ======== fill_us.sim run end...... ================ \ No newline at end of file diff --git a/tests/script/general/parser/groupby.sim b/tests/script/general/parser/groupby.sim index 5d785a2fc3..70edf3535b 100644 --- a/tests/script/general/parser/groupby.sim +++ b/tests/script/general/parser/groupby.sim @@ -422,4 +422,63 @@ if $data97 != @group_tb0@ then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file + +#=========================== group by multi tags ====================== +sql create table st (ts timestamp, c int) tags (t1 int, t2 int, t3 int, t4 int); +sql create table t1 using st tags(1, 1, 1, 1); +sql create table t2 using st tags(1, 2, 2, 2); +sql insert into t1 values ('2020-03-27 04:11:16.000', 1)('2020-03-27 04:11:17.000', 2) ('2020-03-27 04:11:18.000', 3) ('2020-03-27 04:11:19.000', 4) ; +sql insert into t1 values ('2020-03-27 04:21:16.000', 1)('2020-03-27 04:31:17.000', 2) ('2020-03-27 04:51:18.000', 3) ('2020-03-27 05:10:19.000', 4) ; +sql insert into t2 values ('2020-03-27 04:11:16.000', 1)('2020-03-27 04:11:17.000', 2) ('2020-03-27 04:11:18.000', 3) ('2020-03-27 04:11:19.000', 4) ; +sql insert into t2 values ('2020-03-27 04:21:16.000', 1)('2020-03-27 04:31:17.000', 2) ('2020-03-27 04:51:18.000', 3) ('2020-03-27 05:10:19.000', 4) ; + +sql select irate(c) from st where t1="1" and ts >= '2020-03-27 04:11:17.732' and ts < '2020-03-27 05:11:17.732' interval(1m) sliding(15s) group by tbname,t1,t2; +if $rows != 40 then + return -1 +endi + +if $data01 != 1.000000000 then + return -1 +endi +if $data02 != t1 then + return -1 +endi +if $data03 != 1 then + return -1 +endi +if $data04 != 1 then + return -1 +endi + +if $data11 != 1.000000000 then + return -1 +endi +if $data12 != t1 then + return -1 +endi +if $data13 != 1 then + return -1 +endi +if $data14 != 1 then + return -1 +endi + +sql select irate(c) from st where t1="1" and ts >= '2020-03-27 04:11:17.732' and ts < '2020-03-27 05:11:17.732' interval(1m) sliding(15s) group by tbname,t1,t2 limit 1; +if $rows != 2 then + return -1 +endi + +if $data11 != 1.000000000 then + return -1 +endi +if $data12 != t2 then + return -1 +endi +if $data13 != 1 then + return -1 +endi +if $data14 != 2 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/parser/import_commit1.sim b/tests/script/general/parser/import_commit1.sim index a929c1846b..197ae58453 100644 --- a/tests/script/general/parser/import_commit1.sim +++ b/tests/script/general/parser/import_commit1.sim @@ -40,7 +40,7 @@ while $x < $rowNum endw print ====== tables created -sleep 60000 +sleep 6000 $ts = $ts0 + $delta $ts = $ts + 1 diff --git a/tests/script/general/parser/import_commit2.sim b/tests/script/general/parser/import_commit2.sim index c93b3168f1..e400d0c3cb 100644 --- a/tests/script/general/parser/import_commit2.sim +++ b/tests/script/general/parser/import_commit2.sim @@ -39,7 +39,7 @@ while $x < $rowNum endw print ====== tables created -sleep 60000 +sleep 6000 $ts = $ts0 + $delta $ts = $ts + 1 diff --git a/tests/script/general/parser/import_commit3.sim b/tests/script/general/parser/import_commit3.sim index 99ece98278..7e7451e689 100644 --- a/tests/script/general/parser/import_commit3.sim +++ b/tests/script/general/parser/import_commit3.sim @@ -39,7 +39,7 @@ while $x < $rowNum endw print ====== tables created -sleep 60000 +sleep 6000 $ts = $ts + 1 sql insert into $tb values ( $ts , -1, -1, -1, -1, -1) @@ -47,7 +47,7 @@ $ts = $ts0 + $delta $ts = $ts + 1 sql import into $tb values ( $ts , -2, -2, -2, -2, -2) -sleep 60000 +sleep 6000 sql show databases diff --git a/tests/script/general/parser/join.sim b/tests/script/general/parser/join.sim index 1bce6f1950..07f2cd3f77 100644 --- a/tests/script/general/parser/join.sim +++ b/tests/script/general/parser/join.sim @@ -210,6 +210,11 @@ if $data10 != @70-01-01 08:01:40.200@ then return -1 endi +print data06 = $data06 +print data07 = $data07 +print data08 = $data08 +print data00 = $data00 + if $data07 != 0 then return -1 endi @@ -255,14 +260,17 @@ endi print 3 #agg + where condition sql select count(join_tb1.c3), count(join_tb0.ts) from $tb1 , $tb2 where $ts1 = $ts2 and join_tb1.ts <= 100002 and join_tb0.c7 = true; - -$val = 2 -if $data00 != $val then - print expect 2, actaul: $data00 +if $rows != 1 then return -1 endi -if $data01 != $val then +print $data00 + +if $data00 != 2 then + return -1 +endi + +if $data01 != 2 then return -1 endi @@ -412,7 +420,7 @@ endi #======================limit offset=================================== # tag values not int -sql_error select count(*) from join_mt0, join_mt1 where join_mt0.ts=join_mt1.ts and join_mt0.t2=join_mt1.t2; #!!!!! +sql_error select count(*) from join_mt0, join_mt1 where join_mt0.ts=join_mt1.ts and join_mt0.t2=join_mt1.t2; # tag type not identical sql_error select count(*) from join_mt0, join_mt1 where join_mt1.t2 = join_mt0.t1 and join_mt1.ts=join_mt0.ts; @@ -447,4 +455,15 @@ sql insert into um2 using m2 tags(9) values(1000001, 10)(2000000, 20); sql_error select count(*) from m1,m2 where m1.a=m2.a and m1.ts=m2.ts; +#empty table join test, add for no result join test +sql create database ux1; +sql use ux1; +sql create table m1(ts timestamp, k int) tags(a binary(12), b int); +sql create table tm0 using m1 tags('abc', 1); +sql create table m2(ts timestamp, k int) tags(a int, b binary(12)); +sql create table tm2 using m2 tags(2, 'abc'); +sql select count(*) from tm0, tm2 where tm0.ts=tm2.ts; +sql select count(*) from m1, m2 where m1.ts=m2.ts and m1.b=m2.a +sql drop database ux1; + system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/general/parser/limit2_query.sim b/tests/script/general/parser/limit2_query.sim index 1dbd0f32bd..8294247a86 100644 --- a/tests/script/general/parser/limit2_query.sim +++ b/tests/script/general/parser/limit2_query.sim @@ -327,3 +327,7 @@ endi if $data98 != 9 then return -1 endi + +#add one more test case +sql select max(c1), last(c8) from lm2_db0.lm2_tb0 where ts >= 1537146000000 and ts <= 1543145400000 interval(5m) fill(linear) limit 10 offset 4089;" + diff --git a/tests/script/general/parser/select_with_tags.sim b/tests/script/general/parser/select_with_tags.sim index 329787b69c..9f944a586b 100644 --- a/tests/script/general/parser/select_with_tags.sim +++ b/tests/script/general/parser/select_with_tags.sim @@ -636,6 +636,15 @@ if $data00 != $data01 then return -1 endi +sql select first(ts), ts from select_tags_tb1 +if $row != 1 then + return -1 +endi + +if $data01 != @70-01-01 08:01:50.001@ then + return -1 +endi + print ======= selectivity + tags + group by + tags + filter + interval ================ sql select first(c1), t2, t1, tbname from select_tags_mt0 where c1<=2 interval(1d) group by tbname; if $row != 3 then diff --git a/tests/script/general/parser/sliding.sim b/tests/script/general/parser/sliding.sim new file mode 100644 index 0000000000..177c95651f --- /dev/null +++ b/tests/script/general/parser/sliding.sim @@ -0,0 +1,461 @@ +system sh/stop_dnodes.sh + +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c walLevel -v 0 +system sh/cfg.sh -n dnode1 -c debugFlag -v 135 +system sh/cfg.sh -n dnode1 -c rpcDebugFlag -v 135 +system sh/exec.sh -n dnode1 -s start +sleep 1000 +sql connect + +$dbPrefix = sliding_db +$tbPrefix = sliding_tb +$mtPrefix = sliding_mt +$tbNum = 8 +$rowNum = 10000 +$totalNum = $tbNum * $rowNum + +print =============== sliding.sim +$i = 0 +$db = $dbPrefix . $i +$mt = $mtPrefix . $i + +$tstart = 946656000000 + +$i = 0 +$db = $dbPrefix . $i +$mt = $mtPrefix . $i + +sql drop database if exits $db -x step1 +step1: +sql create database if not exists $db tables 4 keep 36500 +sql use $db +sql create table $mt (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int, t2 binary(12)) + +$i = 0 +while $i < $tbNum + $tb = $tbPrefix . $i + $tg2 = ' . abc + $tg2 = $tg2 . ' + sql create table $tb using $mt tags( $i , $tg2 ) + + $x = 0 + while $x < $rowNum + $ms = $x . m + $c = $x / 100 + $c = $c * 100 + $c = $x - $c + + $binary = ' . binary + $binary = $binary . $c + $binary = $binary . ' + + $nchar = ' . nchar + $nchar = $nchar . $c + $nchar = $nchar . ' + + sql insert into $tb values ($tstart , $c , $c , $c , $c , $c , $c , $c , $binary , $nchar ) + $tstart = $tstart + 30 + $x = $x + 1 + endw + + $i = $i + 1 + $tstart = 946656000000 +endw + +sleep 100 + +$i1 = 1 +$i2 = 0 + +$db = $dbPrefix . $i +$mt = $mtPrefix . $i + +$dbPrefix = sliding_db +$tbPrefix = sliding_tb +$mtPrefix = sliding_mt + +$tb1 = $tbPrefix . $i1 +$tb2 = $tbPrefix . $i2 +$ts1 = $tb1 . .ts +$ts2 = $tb2 . .ts + +print ===============================interval_sliding query +sql select count(*) from sliding_tb0 interval(30s) sliding(30s); +if $row != 10 then + return -1 +endi + +if $data00 != @00-01-01 00:00:00.000@ then + return -1 +endi + +if $data01 != 1000 then + return -1 +endi + +if $data10 != @00-01-01 00:00:30.000@ then + return -1 +endi + +if $data11 != 1000 then + return -1 +endi + +sql select stddev(c1) from sliding_tb0 interval(10a) sliding(10a) +if $row != 10000 then + return -1 +endi + +if $data00 != @00-01-01 00:00:00.000@ then + return -1 +endi + +if $data01 != 0.000000000 then + return -1 +endi + +if $data90 != @00-01-01 00:00:00.270@ then + return -1 +endi + +if $data91 != 0.000000000 then + return -1 +endi + +sql select stddev(c1),count(c2),first(c3),last(c4) from sliding_tb0 interval(10a) sliding(10a) order by ts desc; +if $row != 10000 then + return -1 +endi + +if $data00 != @00-01-01 00:04:59.970@ then + return -1 +endi + +if $data01 != 0.000000000 then + return -1 +endi + +if $data02 != 1 then + return -1 +endi + +if $data03 != 99 then + return -1 +endi + +if $data04 != 99 then + return -1 +endi + +if $data90 != @00-01-01 00:04:59.700@ then + return -1 +endi + +if $data91 != 0.000000000 then + return -1 +endi + +if $data92 != 1 then + return -1 +endi + +if $data93 != 90 then + return -1 +endi + +if $data94 != 90 then + return -1 +endi + +sql select count(c2),last(c4) from sliding_tb0 interval(30s) sliding(10s) order by ts asc; +if $row != 30 then + return -1 +endi + +if $data00 != @00-01-01 00:00:00.000@ then + return -1 +endi + +if $data01 != 1000 then + return -1 +endi + +if $data02 != 99 then + return -1 +endi + +sql select count(c2),stddev(c3),first(c4),last(c4) from sliding_tb0 where ts>'2000-01-01 0:0:0' and ts<'2000-1-1 0:0:31' interval(30s) sliding(30s) order by ts asc; +if $row != 2 then + return -1 +endi + +if $data04 != 99 then + return -1 +endi + +if $data01 != 999 then + return -1 +endi + +if $data02 != 28.837977152 then + return -1 +endi + +#interval offset + limit +sql select count(c2), first(c3),stddev(c4) from sliding_tb0 interval(10a) sliding(10a) order by ts desc limit 10 offset 990; +if $row != 10 then + return -1 +endi + +if $data00 != @00-01-01 00:04:30.270@ then + return -1 +endi + +if $data01 != 1 then + return -1 +endi + +if $data02 != 9 then + return -1 +endi + +if $data03 != 0.000000000 then + return -1 +endi + +if $data90 != @00-01-01 00:04:30.000@ then + return -1 +endi + +if $data91 != 1 then + return -1 +endi + +if $data92 != 0 then + return -1 +endi + +if $data93 != 0.000000000 then + return -1 +endi + +#interval offset test +sql select count(c2),last(c4),stddev(c3) from sliding_tb0 interval(30s) sliding(30s) order by ts asc limit 1000 offset 1; +if $row != 9 then + return -1 +endi + +if $data00 != @00-01-01 00:00:30.000@ then + return -1 +endi + +if $data01 != 1000 then + return -1 +endi + +if $data02 != 99 then + return -1 +endi + +if $data80 != @00-01-01 00:04:30.000@ then + return -1 +endi + +if $data81 != 1000 then + return -1 +endi + +sql select count(c2),last(c4),stddev(c3) from sliding_tb0 where ts>'2000-1-1 0:0:0' and ts<'2000-1-1 0:0:31' interval(30s) sliding(30s) order by ts asc limit 1000 offset 0; +if $row != 2 then + return -1 +endi + +if $data00 != @00-01-01 00:00:00.000@ then + return -1 +endi + +if $data01 != 999 then + return -1 +endi + +if $data02 != 99 then + return -1 +endi + +if $data03 != 28.837977152 then + return -1 +endi + +if $data10 != @00-01-01 00:00:30.000@ then + return -1 +endi + +if $data11 != 34 then + return -1 +endi + +if $data12 != 33 then + return -1 +endi + +if $data13 != 9.810708435 then + return -1 +endi + +sql select count(c2),last(c4),stddev(c3) from sliding_tb0 interval(30s) sliding(20s) order by ts asc limit 100 offset 1; +if $row != 14 then + return -1 +endi + +if $data00 != @00-01-01 00:00:20.000@ then + return -1 +endi + +if $data01 != 1000 then + return -1 +endi + +if $data02 != 66 then + return -1 +endi + +if $data03 != 28.866070048 then + return -1 +endi + +if $data90 != @00-01-01 00:03:20.000@ then + return -1 +endi + +if $data91 != 1000 then + return -1 +endi + +if $data92 != 66 then + return -1 +endi + +sql select count(c2),last(c4),stddev(c3) from sliding_tb0 interval(30s) sliding(20s) order by ts asc limit 100 offset 14; +if $row != 1 then + return -1 +endi + +sql select count(c2),last(c4),stddev(c3),spread(c3) from sliding_tb0 where c2 = 0 interval(30s) order by ts desc; +if $row != 10 then + return -1 +endi + +#00-01-01 00:04:30.000| 10| 0| 0.000000000| 0.000000000| +if $data00 != @00-01-01 00:04:30.000@ then + return -1 +endi + +if $data01 != 10 then + return -1 +endi + +if $data02 != 0 then + return -1 +endi + +if $data03 != 0.000000000 then + return -1 +endi + +sql select count(c2),last(c4),stddev(c3),spread(c3) from sliding_tb0 where c2 = 0 interval(30s) sliding(20s) order by ts desc limit 1 offset 15; +if $row != 0 then + return -1 +endi + +sql select count(*),stddev(c1),count(c1),first(c2),last(c3) from sliding_tb0 where ts>'2000-1-1 00:00:00' and ts<'2000-1-1 00:00:01.002' and c2 >= 0 interval(30s) sliding(10a) order by ts asc limit 1000; +if $row != 100 then + return -1 +endi + +if $data00 != @00-01-01 00:00:00.000@ then + return -1 +endi + +if $data02 != 9.521904571 then + return -1 +endi + +if $data05 != 33 then + return -1 +endi + +if $data10 != @00-01-01 00:00:00.010@ then + return -1 +endi + +if $data12 != 9.521904571 then + return -1 +endi + +if $data15 != 33 then + return -1 +endi + +if $data95 != 33 then + return -1 +endi + +sql select count(*),stddev(c1),count(c1),first(c2),last(c3) from sliding_tb0 where ts>'2000-1-1 00:00:00' and ts<'2000-1-1 00:00:01.002' and c2 >= 0 interval(30s) sliding(10a) order by ts desc limit 1000; +if $row != 100 then + return -1 +endi + +if $data00 != @00-01-01 00:00:00.990@ then + return -1 +endi + +if $data01 != 1 then + return -1 +endi + +if $data02 != 0.000000000 then + return -1 +endi + +if $data03 != 1 then + return -1 +endi + +if $data90 != @00-01-01 00:00:00.900@ then + return -1 +endi + +if $data91 != 4 then + return -1 +endi + +if $data92 != 1.118033989 then + return -1 +endi + +if $data93 != 4 then + return -1 +endi + +if $data94 != 30.00000 then + return -1 +endi + +print check boundary check crash at client side +sql select count(*) from sliding_mt0 where ts>now and ts < now-1h; + +print ========================query on super table + +print ========================error case +sql_error select sum(c1) from sliding_tb0 interval(1a) sliding(1a); +sql_error select sum(c1) from sliding_tb0 interval(10a) sliding(12a); +sql_error select sum(c1) from sliding_tb0 sliding(1n) interval(1y); +sql_error select sum(c1) from sliding_tb0 interval(-1y) sliding(1n); +sql_error select sum(c1) from sliding_tb0 interval(1y) sliding(-1n); +sql_error select sum(c1) from sliding_tb0 interval(0) sliding(0); +sql_error select sum(c1) from sliding_tb0 interval(0m) sliding(0m); +sql_error select sum(c1) from sliding_tb0 interval(m) sliding(m); +sql_error select sum(c1) from sliding_tb0 sliding(4m); + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/parser/slimit_alter_tags.sim b/tests/script/general/parser/slimit_alter_tags.sim index e8e81e8809..fb48dddba7 100644 --- a/tests/script/general/parser/slimit_alter_tags.sim +++ b/tests/script/general/parser/slimit_alter_tags.sim @@ -141,9 +141,9 @@ $res = 3 * $rowNum if $data00 != $res then return -1 endi -if $data01 != @18-09-17 09:00:00.000@ then - return -1 -endi +#if $data01 != @18-09-17 09:00:00.000@ then +# return -1 +#endi if $data02 != 3 then return -1 endi @@ -154,9 +154,9 @@ $res = 3 * $rowNum if $data10 != $res then return -1 endi -if $data11 != @18-09-17 09:00:00.000@ then - return -1 -endi +#if $data11 != @18-09-17 09:00:00.000@ then +# return -1 +#endi if $data15 != 2 then return -1 endi @@ -223,9 +223,9 @@ $res = 3 * $rowNum if $data00 != $res then return -1 endi -if $data01 != @18-09-17 09:00:00.000@ then - return -1 -endi +#if $data01 != @18-09-17 09:00:00.000@ then +# return -1 +#endi if $data02 != 3 then return -1 endi @@ -236,9 +236,9 @@ $res = 3 * $rowNum if $data10 != $res then return -1 endi -if $data11 != @18-09-17 09:00:00.000@ then - return -1 -endi +#if $data11 != @18-09-17 09:00:00.000@ then +# return -1 +#endi if $data15 != 2 then return -1 endi diff --git a/tests/script/general/parser/stream.sim b/tests/script/general/parser/stream.sim deleted file mode 100644 index 2f233b6189..0000000000 --- a/tests/script/general/parser/stream.sim +++ /dev/null @@ -1,212 +0,0 @@ -system sh/stop_dnodes.sh - -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 0 -system sh/cfg.sh -n dnode1 -c tableMetaKeepTimer -v 5 -system sh/exec.sh -n dnode1 -s start -sleep 3000 -sql connect -print ======================== stream.sim -sleep 2000 -$db = strm_db -$tb = tb -$mt = mt -$strm = strm -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== set up DB -$i = 0 - -sql drop database if exists $db -sql create database $db -sql use $db - - -## [TBASE300] -print ====== TBASE-300 -sql create table mt (ts timestamp, c1 int, c2 int) tags(t1 int) -sql create table tb1 using mt tags(1) -sql create table tb2 using mt tags(2) -sql create table strm as select count(*), avg(c1), sum(c2), max(c1), min(c2),first(c1), last(c2) from mt interval(4s) sliding(2s) -sleep 10000 -sql insert into tb2 values(now,1,1) -sql insert into tb1 values(now,1,1) -sleep 4000 -sql select * from mt -sql select * from strm -sql drop table tb1 -sleep 100000 -sql select * from strm -if $rows != 2 then - if $rows != 1 then - return -1 - endi -endi -sql drop table tb2 -sql drop table mt -sql drop table strm - -## [TBASE304] -print ====== TBASE-304 -sleep 10000 -# we cannot reset query cache in server side, as a workaround, -# set super table name to mt304, need to change back to mt later -print create mt304 -sql create table mt304 (ts timestamp, c1 int) tags(t1 int, t2 int) -print create tb1 -sql create table tb1 using mt304 tags(1, 1) -print create tb2 -sql create table tb2 using mt304 tags(1, -1) -print create strm -sql create table strm as select count(*), avg(c1) from mt304 where t2 >= 0 interval(4s) sliding(2s) -sql insert into tb1 values (now,1) -sql insert into tb2 values (now,2) -sleep 100000 -sql select * from strm; -if $rows != 2 then - print ==== expect rows = 2, actually returned rows = $rows - return -1 -endi -if $data01 != 1 then - return -1 -endi -print data02 = $data02 -if $data02 != 1.000000000 then - return -1 -endi -sql alter table mt304 drop tag t2; -sql insert into tb2 values (now,2); -sql insert into tb1 values (now,1); -sql select * from strm; -sql alter table mt304 add tag t2 int; -sleep 10000 -sql select * from strm - -print ================= create a stream with a wildcard filter on tags of a STable -sql drop database $db -sql create database $db -sql use $db -sql create table stb (ts timestamp, c1 int, c2 binary(10)) tags(t1 binary(10)) -sql create table tb1 using stb tags('a1') -sql create table tb2 using stb tags('b2') -sql create table tb3 using stb tags('a3') -sql create table strm as select count(*), avg(c1), first(c2) from stb where t1 like 'a%' interval(4s) sliding(2s) -sleep 11000 -sql insert into tb1 values (now, 0, 'tb1') -sleep 4000 -sql insert into tb2 values (now, 2, 'tb2') -sleep 4000 -sql insert into tb3 values (now, 0, 'tb3') -sleep 60000 - -sql describe strm -if $rows == 0 then - return -1 -endi - -sql select * from strm -sleep 1000 -print ======== data0: $data00 $data01 $data02 $data03 -print ======== data1: $data10 $data11 $data12 $data13 -print ======== data2: $data20 $data21 $data22 $data23 -print ======== data3: $data30 $data31 $data32 $data33 -if $rows != 4 then - print ==== expect rows = 4, actually returned rows = $rows - return -1 -endi -if $data02 != 0.000000000 then - return -1 -endi -if $data03 == tb2 then - return -1 -endi -if $data13 == tb2 then - return -1 -endi -if $data23 == tb2 then - return -1 -endi -if $data33 == tb2 then - return -1 -endi - -## The vnode client needs to refresh metadata cache to allow strm calculate tb4's data. But the current refreshing frequency is every 10 min -## commented out the case below to save running time -sql create table tb4 using stb tags('a4') -sql insert into tb4 values(now, 4, 'tb4') -sleep 60000 -sql select * from strm order by ts desc -print ======== data0: $data00 $data01 $data02 $data03 -#print ======== data1: $data10 $data11 $data12 $data13 -#print ======== data2: $data20 $data21 $data22 $data23 -#print ======== data3: $data30 $data31 $data32 $data33 -if $rows != 6 then - print ==== expect rows = 6, actually returned rows = $rows - return -1 -endi -if $data02 != 4.000000000 then - return -1 -endi -if $data03 != tb4 then - return -1 -endi - -print =============== change tag values to see if stream still works correctly -sql alter table tb4 set tag t1='b4' -sleep 3000 # waiting for new tag valid -sql insert into tb1 values (now, 1, 'tb1_a1') -sleep 4000 -sql insert into tb4 values (now, -4, 'tb4_b4') -sleep 100000 -sql select * from strm order by ts desc -sleep 1000 -print ======== data0: $data00 $data01 $data02 $data03 -#print ======== data1: $data10 $data11 $data12 $data13 -#print ======== data2: $data20 $data21 $data22 $data23 -#print ======== data3: $data30 $data31 $data32 $data33 -if $rows != 8 then - print ==== expect rows = 8, actually returned rows = $rows - return -1 -endi -if $data02 != 1.000000000 then - return -1 -endi -if $data03 != tb1_a1 then - return -1 -endi - -sql drop database if exists $db -sql drop database if exists strm_db_0 -sql create database strm_db_0 -sql use strm_db_0 - -sql create table stb (ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 binary(15), c6 nchar(15), c7 bool) tags(t1 int, t2 binary(15)) -sql create table tb0 using stb tags(0, 'tb0') -sql create table tb1 using stb tags(1, 'tb1') -sql create table tb2 using stb tags(2, 'tb2') -sql create table tb3 using stb tags(3, 'tb3') -sql create table tb4 using stb tags(4, 'tb4') - -sql create table strm0 as select count(ts), count(c1), max(c2), min(c4), first(c5), last(c6) from stb where ts < now + 30s interval(4s) sliding(2s) -sleep 1000 -sql insert into tb0 values (now, 0, 0, 0, 0, 'binary0', '涛思0', true) tb1 values (now, 1, 1, 1, 1, 'binary1', '涛思1', false) tb2 values (now, 2, 2, 2, 2, 'binary2', '涛思2', true) tb3 values (now, 3, 3, 3, 3, 'binary3', '涛思3', false) tb4 values (now, 4, 4, 4, 4, 'binary4', '涛思4', true) -sleep 30000 -sql select * from strm0 order by ts desc -sleep 1000 -if $rows != 2 then - print ==== expect rows = 2, actually returned rows = $rows - return -1 -endi - -sql insert into tb0 values (now, 10, 10, 10, 10, 'binary0', '涛思0', true) tb1 values (now, 11, 11, 11, 11, 'binary1', '涛思1', false) tb2 values (now, 12, 12, 12, 12, 'binary2', '涛思2', true) tb3 values (now, 13, 13, 13, 13, 'binary3', '涛思3', false) tb4 values (now, 14, 14, 14, 14, 'binary4', '涛思4', true) -sleep 30000 -sql select * from strm0 order by ts desc -sleep 10000 -if $rows == 4 then - print ==== actually returned rows = $rows, expect always not equal to 4 - return -1 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/parser/tags_dynamically_specifiy.sim b/tests/script/general/parser/tags_dynamically_specifiy.sim index 6e1766a91d..0a5d5c9716 100644 --- a/tests/script/general/parser/tags_dynamically_specifiy.sim +++ b/tests/script/general/parser/tags_dynamically_specifiy.sim @@ -28,16 +28,27 @@ sql insert into tb3 using stb (t3) tags (3.3) values ( now + 3s, 'binary3', 3 sql insert into tb4 (ts, c1, c2) using stb (t1, t2) tags ('tag4', 4) values ( now + 4s, 'binary4', 4) sql insert into tb5 (ts, c1, c3) using stb (t1, t3) tags ('tag5', 11.11) values ( now + 5s, 'binary5', 5.5) - +sql insert into tb6 (ts, c1, c3) using stb tags ('tag5', 6, 11.11) values ( now + 5s, 'binary6', 6.6) +sql insert into tb7 (ts, c1, c2, c3) using stb tags ('tag5', 7, 11.11) values ( now + 5s, 'binary7', 7, 7.7) sql select * from stb order by ts asc -if $rows != 5 then +if $rows != 7 then return -1 endi +sql_error insert into tb11 using stb (t1) tags () values ( now + 1s, 'binary1', 1, 1.1) +sql_error insert into tb12 using stb (t1, t3) tags () values ( now + 1s, 'binary1', 1, 1.1) +sql_error insert into tb13 using stb (t1, t2, t3) tags (8, 9.13, 'ac') values ( now + 1s, 'binary1', 1, 1.1) +sql_error insert into tb14 using stb () tags (2) values ( now + 2s, 'binary2', 2, 2.2) +sql_error insert into tb15 using stb (t2, t3) tags (3.3) values ( now + 3s, 'binary3', 3, 3.3) +sql_error insert into tb16 (ts, c1, c2) using stb (t1, t2) tags ('tag4', 4) values ( now + 4s, 'binary4') +sql_error insert into tb17 (ts, c1, c3) using stb (t1, t3) tags ('tag5', 11.11, 5) values ( now + 5s, 'binary5', 5.5) +sql_error insert into tb18 (ts, c1, c3) using stb tags ('tag5', 16) values ( now + 5s, 'binary6', 6.6) +sql_error insert into tb19 (ts, c1, c2, c3) using stb tags (19, 'tag5', 91.11) values ( now + 5s, 'binary7', 7, 7.7) + sql create table stbx (ts timestamp, c1 binary(10), c2 int, c3 float) tags (t1 binary(10), t2 int, t3 float) -sql insert into tb100 (ts, c1, c2, c3) using stbx (t1, t2, t3) tags ('tag100', 100, 100.9) values ( now + 10s, 'binary100', 100, 100.9) tb101 (ts, c1, c2, c3) using stbx (t1, t2, t3) tags ('tag101', 101, 101.9) values ( now + 10s, 'binary101', 101, 101.9) tb102 (ts, c1, c2, c3) using stbx (t1, t2, t3) tags ('tag102', 102, 102.9) values ( now + 10s, 'binary102', 102, 102.9) +sql insert into tb100 (ts, c1, c2, c3) using stbx (t1, t2, t3) tags ('tag100', 100, 100.123456) values ( now + 10s, 'binary100', 100, 100.9) tb101 (ts, c1, c2, c3) using stbx (t1, t2, t3) tags ('tag101', 101, 101.9) values ( now + 10s, 'binary101', 101, 101.9) tb102 (ts, c1, c2, c3) using stbx (t1, t2, t3) tags ('tag102', 102, 102.9) values ( now + 10s, 'binary102', 102, 102.9) sql select * from stbx if $rows != 3 then @@ -52,9 +63,38 @@ if $data05 != 100 then return -1 endi -#if $data06 != 100.90000 then -# print "expect: 100.90000, act: $data06" -# return -1 -#endi +if $data06 != 100.12346 then + print "expect: 100.12346, act: $data06" + return -1 +endi + +sql create table stby (ts timestamp, c1 binary(10), c2 int, c3 float) tags (t1 binary(10), t2 int, t3 float) +sql reset query cache +sql insert into tby1 using stby (t1) tags ('tag1') values ( now + 1s, 'binary1', 1, 1.1) +sql insert into tby2 using stby (t2) tags (2) values ( now + 2s, 'binary2', 2, 2.2) +sql insert into tby3 using stby (t3) tags (3.3) values ( now + 3s, 'binary3', 3, 3.3) +sql insert into tby4 (ts, c1, c2) using stby (t1, t2) tags ('tag4', 4) values ( now + 4s, 'binary4', 4) +sql insert into tby5 (ts, c1, c3) using stby (t1, t3) tags ('tag5', 11.11) values ( now + 5s, 'binary5', 5.5) +sql insert into tby6 (ts, c1, c3) using stby tags ('tag5', 6, 11.11) values ( now + 5s, 'binary6', 6.6) +sql insert into tby7 (ts, c1, c2, c3) using stby tags ('tag5', 7, 11.11) values ( now + 5s, 'binary7', 7, 7.7) +sql select * from stby order by ts asc +if $rows != 7 then + return -1 +endi + +sql reset query cache +sql insert into tby1 using stby (t1) tags ('tag1') values ( now + 1s, 'binary1y', 1, 1.1) +sql insert into tby2 using stby (t2) tags (2) values ( now + 2s, 'binary2y', 2, 2.2) +sql insert into tby3 using stby (t3) tags (3.3) values ( now + 3s, 'binary3y', 3, 3.3) +sql insert into tby4 (ts, c1, c2) using stby (t1, t2) tags ('tag4', 4) values ( now + 4s, 'binary4y', 4) +sql insert into tby5 (ts, c1, c3) using stby (t1, t3) tags ('tag5', 11.11) values ( now + 5s, 'binary5y', 5.5) +sql insert into tby6 (ts, c1, c3) using stby tags ('tag5', 6, 11.11) values ( now + 5s, 'binary6y', 6.6) +sql insert into tby7 (ts, c1, c2, c3) using stby tags ('tag5', 7, 11.11) values ( now + 5s, 'binary7y', 7, 7.7) + +sql select * from stby order by ts asc +if $rows != 14 then + return -1 +endi + system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/general/parser/tags_filter.sim b/tests/script/general/parser/tags_filter.sim index 42e8c8f9f9..9842b4fda6 100644 --- a/tests/script/general/parser/tags_filter.sim +++ b/tests/script/general/parser/tags_filter.sim @@ -23,7 +23,7 @@ sql create table stb (ts timestamp, c1 int) tags (t1 binary(10)) sql create table tb1 using stb tags('*') sql create table tb2 using stb tags('%') sql create table tb3 using stb tags('') -sql create table tb4 using stb tags('/'') +sql create table tb4 using stb tags('\'') sql insert into tb1 values ( $ts0 , 1) sql insert into tb2 values ( $ts0 , 2) @@ -54,7 +54,7 @@ if $data01 != 3 then return -1 endi -sql select * from stb where t1 = '/'' +sql select * from stb where t1 = '\'' if $rows != 1 then return -1 endi @@ -70,9 +70,8 @@ if $data01 != 1 then return -1 endi -sql_error select * from stb where t1 > 1 -sql_error select * from stb where t1 > '1' -sql_error select * from stb where t1 > 'a' +sql select * from stb where t1 > '1' +sql select * from stb where t1 > 'a' ## wildcard '%' #sql select * from stb where t1 like '%' @@ -91,7 +90,7 @@ if $data01 != 3 then return -1 endi -sql select * from stb where t1 like '/'' +sql select * from stb where t1 like '\'' if $rows != 1 then return -1 endi @@ -104,3 +103,50 @@ sql show databases if $rows != 0 then return -1 endi + +print ============tbase-1328 + +sql drop database if exists testselectwheretags; +sql CREATE DATABASE IF NOT EXISTS testselectwheretags; +sql USE testselectwheretags; +sql CREATE TABLE IF NOT EXISTS st1 (ts TIMESTAMP, v1 INT, v2 FLOAT, v3 BOOL) TAGS (farm NCHAR(2), period NCHAR(2), line NCHAR(2), unit INT); +sql CREATE TABLE IF NOT EXISTS a01 USING st1 TAGS ('2', 'c', '2', 2); +sql CREATE TABLE IF NOT EXISTS a02 USING st1 TAGS ('1', 'c', 'a', 1); +sql CREATE TABLE IF NOT EXISTS a03 USING st1 TAGS ('1', 'c', '02', 1); +sql INSERT INTO a01 VALUES (1574872693209, 3, 3.000000, 1); +sql INSERT INTO a02 VALUES (1574872683933, 2, 2.000000, 1); +sql INSERT INTO a03 VALUES (1574872683933, 2, 2.000000, 1); + +sql select * from st1 where line='02'; +if $rows != 1 then + return -1 +endi + +sql CREATE TABLE IF NOT EXISTS st2 (ts TIMESTAMP, v1 INT, v2 FLOAT) TAGS (farm BINARY(2), period BINARY(2), line BINARY(2)); + +sql CREATE TABLE IF NOT EXISTS b01 USING st2 TAGS ('01', '01', '01'); +sql CREATE TABLE IF NOT EXISTS b02 USING st2 TAGS ('01', '01', '01'); +sql CREATE TABLE IF NOT EXISTS b03 USING st2 TAGS ('01', '02', '01'); +sql CREATE TABLE IF NOT EXISTS b04 USING st2 TAGS ('01', '01', '02'); + +sql INSERT INTO b03 VALUES (1576043322749, 3, 3.000000); +sql INSERT INTO b03 VALUES (1576043323596, 3, 3.000000); + +sql INSERT INTO b02 VALUES (1576043315169, 2, 2.000000); +sql INSERT INTO b02 VALUES (1576043316295, 2, 2.000000); +sql INSERT INTO b02 VALUES (1576043317167, 2, 2.000000); + +sql INSERT INTO b01 VALUES (1576043305972, 1, 1.000000); +sql INSERT INTO b01 VALUES (1576043308513, 1, 1.000000); + +sql select * from st2 where period='02'; +if $rows != 2 then + return -1 +endi + +sql select sum(v2) from st2 group by farm,period,line; +if $rows != 2 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/general/parser/union.sim b/tests/script/general/parser/union.sim new file mode 100644 index 0000000000..14b6c97b7c --- /dev/null +++ b/tests/script/general/parser/union.sim @@ -0,0 +1,411 @@ +system sh/stop_dnodes.sh + +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c walLevel -v 0 +system sh/cfg.sh -n dnode1 -c debugFlag -v 135 +system sh/cfg.sh -n dnode1 -c rpcDebugFlag -v 135 +system sh/exec.sh -n dnode1 -s start +sleep 1000 +sql connect + +$dbPrefix = union_db +$tbPrefix = union_tb +$tbPrefix1 = union_tb_ +$mtPrefix = union_mt +$tbNum = 10 +$rowNum = 10000 +$totalNum = $tbNum * $rowNum + +print =============== union.sim +$i = 0 +$db = $dbPrefix . $i +$mt = $mtPrefix . $i + +$j = 1 + +$mt1 = $mtPrefix . $j + +sql drop database if exits $db -x step1 +step1: +sql create database if not exists $db maxtables 4 +sql use $db +sql create table $mt (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int) + +$i = 0 +$t = 1578203484000 + +while $i < $tbNum + $tb = $tbPrefix . $i + sql create table $tb using $mt tags( $i ) + + $x = 0 + while $x < $rowNum + $ms = $x * 1000 + $ms = $ms * 60 + + $c = $x / 100 + $c = $c * 100 + $c = $x - $c + $binary = 'binary . $c + $binary = $binary . ' + $nchar = 'nchar . $c + $nchar = $nchar . ' + + $t1 = $t + $ms + sql insert into $tb values ($t1 , $c , $c , $c , $c , $c , $c , $c , $binary , $nchar ) + $x = $x + 1 + endw + + $i = $i + 1 +endw + +sql create table $mt1 (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int) + +$j = 0 +$t = 1578203484000 +$rowNum = 1000 +$tbNum = 5 +$i = 0 + +while $i < $tbNum + $tb1 = $tbPrefix1 . $j + sql create table $tb1 using $mt1 tags( $i ) + + $x = 0 + while $x < $rowNum + $ms = $x * 1000 + $ms = $ms * 60 + + $c = $x / 100 + $c = $c * 100 + $c = $x - $c + $binary = 'binary . $c + $binary = $binary . ' + $nchar = 'nchar . $c + $nchar = $nchar . ' + + $t1 = $t + $ms + sql insert into $tb1 values ($t1 , $c , $c , $c , $c , $c , $c , $c , $binary , $nchar ) + $x = $x + 1 + endw + + $i = $i + 1 + $j = $j + 1 +endw + +print sleep 1sec. +sleep 1000 + +$i = 1 +$tb = $tbPrefix . $i + +## column type not identical +sql_error select count(*) as a from union_mt0 union all select avg(c1) as a from union_mt0 +sql_error select count(*) as a from union_mt0 union all select spread(c1) as a from union_mt0; + +## union not supported +sql_error (select count(*) from union_mt0) union (select count(*) from union_mt0); + +## column type not identical +sql_error select c1 from union_mt0 limit 10 union all select c2 from union_tb1 limit 20; + +## union not support recursively union +sql_error select c1 from union_tb0 limit 2 union all (select c1 from union_tb1 limit 1 union all select c1 from union_tb3 limit 2); +sql_error (select c1 from union_tb0 limit 1 union all select c1 from union_tb1 limit 1) union all (select c1 from union_tb0 limit 10 union all select c1 from union_tb1 limit 10); + +# union as subclause +sql_error (select c1 from union_tb0 limit 1 union all select c1 from union_tb1 limit 1) limit 1 + +# sql with parenthese +sql (((select c1 from union_tb0))) +if $rows != 10000 then + return -1 +endi + +if $data00 != 0 then + return -1 +endi + +if $data10 != 1 then + return -1 +endi + +# mixed order +sql select ts, c1 from union_tb1 order by ts asc limit 10 union all select ts, c1 from union_tb0 order by ts desc limit 2 union all select ts, c1 from union_tb2 order by ts asc limit 10 +if $rows != 22 then + return -1 +endi + +if $data00 != @20-01-05 13:51:24.000@ then + return -1 +endi + +if $data01 != 0 then + return -1 +endi + +if $data10 != @20-01-05 13:52:24.000@ then + return -1 +endi + +if $data11 != 1 then + return -1 +endi + +if $data90 != @20-01-05 14:00:24.000@ then + return -1 +endi + +if $data91 != 9 then + return -1 +endi + +# different sort order + +# super table & normal table mixed up +sql select c3 from union_tb0 limit 2 union all select sum(c1) as c3 from union_mt0; +if $rows != 3 then + return -1 +endi + +if $data00 != 0 then + return -1 +endi + +if $data10 != 1 then + return -1 +endi + +if $data20 != 4950000 then + return -1 +endi + +# type compatible +sql select c3 from union_tb0 limit 2 union all select sum(c1) as c3 from union_tb1; +if $rows != 3 then + return -1 +endi + +if $data00 != 0 then + return -1 +endi + +if $data10 != 1 then + return -1 +endi + +if $data20 != 495000 then + return -1 +endi + +# two join subclause +sql select count(*) as c from union_tb0, union_tb1 where union_tb0.ts=union_tb1.ts union all select union_tb0.c3 as c from union_tb0, union_tb1 where union_tb0.ts=union_tb1.ts limit 10 +if $rows != 11 then + return -1 +endi + +if $data00 != 10000 then + return -1 +endi + +if $data10 != 0 then + return -1 +endi + +if $data20 != 1 then + return -1 +endi + +if $data90 != 8 then + return -1 +endi + +print ===========================================tags union +# two super table tag union, limit is not active during retrieve tags query +sql select t1 from union_mt0 union all select t1 from union_mt0 limit 1 +if $rows != 20 then + return -1 +endi + +if $data00 != 0 then + return -1 +endi + +if $data90 != 9 then + return -1 +endi + +#========================================== two super table join subclause +print ================two super table join subclause +sql select avg(union_mt0.c1) as c from union_mt0 interval(1h) limit 10 union all select union_mt1.ts, union_mt1.c1/1.0 as c from union_mt0, union_mt1 where union_mt1.ts=union_mt0.ts and union_mt1.t1=union_mt0.t1 limit 5; +print the rows value is: $rows + +if $rows != 15 then + return -1 +endi + +# first subclause are empty +sql select count(*) as c from union_tb0 where ts>now+10y union all select sum(c1) as c from union_tb1; +if $rows != 1 then + return -1 +endi + +if $data00 != 495000 then + return -1 +endi + +# all subclause are empty +sql select c1 from union_tb0 limit 0 union all select c1 from union_tb1 where ts>'2021-1-1 0:0:0' +if $rows != 0 then + return -1 +endi + +# middle subclause empty +sql select c1 from union_tb0 limit 1 union all select c1 from union_tb1 where ts>'2030-1-1 0:0:0' union all select last(c1) as c1 from union_tb1; +if $rows != 2 then + return -1 +endi + +if $data00 != 0 then + return -1 +endi + +if $data10 != 99 then + return -1 +endi + +# multi-vnode projection query +sql (select c1 from union_mt0) union all select c1 from union_mt0; +if $rows != 200000 then + return -1 +endi + +# multi-vnode projection query + limit +sql (select ts, c1 from union_mt0 limit 1) union all (select ts, c1 from union_mt0 limit 1); +if $rows != 2 then + return -1 +endi + +if $data00 != @20-01-05 13:51:24.000@ then + return -1 +endi + +if $data01 != 0 then + return -1 +endi + +if $data10 != @20-01-05 13:51:24.000@ then + return -1 +endi + +if $data11 != 0 then + return -1 +endi + +# two aggregated functions for super tables +sql select sum(c1) as a from union_mt0 interval(1s) limit 9 union all select ts, max(c3) as a from union_mt0 limit 2; +if $rows != 10 then + return -1 +endi + +if $data00 != @20-01-05 13:51:24.000@ then + return -1 +endi + +if $data01 != 0 then + return -1 +endi + +if $data10 != @20-01-05 13:52:24.000@ then + return -1 +endi + +if $data11 != 10 then + return -1 +endi + +if $data20 != @20-01-05 13:53:24.000@ then + return -1 +endi + +if $data21 != 20 then + return -1 +endi + +if $data90 != @20-01-05 15:30:24.000@ then + return -1 +endi + +if $data91 != 99 then + return -1 +endi + +#1111111111111111111111111111111111111111111111111 +# two aggregated functions for normal tables +sql select sum(c1) as a from union_tb0 limit 1 union all select sum(c3) as a from union_tb1 limit 2; +if $rows != 2 then + return -1 +endi + +if $data00 != 495000 then + return -1 +endi + +if $data10 != 495000 then + return -1 +endi + +# two super table query + interval + limit +sql select ts, first(c3) as a from union_mt0 limit 1 union all select sum(c3) as a from union_mt0 interval(1h) limit 1; +if $rows != 2 then + return -1 +endi + +if $data00 != @20-01-05 13:51:24.000@ then + return -1 +endi + +if $data01 != 0 then + return -1 +endi + +if $data10 != @20-01-05 13:00:00.000@ then + return -1 +endi + +if $data11 != 360 then + return -1 +endi + +sql select server_status() union all select server_status() +if $rows != 2 then + return -1 +endi + +if $data00 != 1 then + return -1 +endi + +if $data10 != 1 then + return -1 +endi + +sql select client_version() union all select server_version() +if $rows != 2 then + return -1 +endi + +sql select database() union all select database() +if $rows != 2 then + return -1 +endi + +if $data00 != @union_db0@ then + return -1 +endi + +if $data10 != @union_db0@ then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/general/parser/where.sim b/tests/script/general/parser/where.sim index 4d86b50f38..710156a4ff 100644 --- a/tests/script/general/parser/where.sim +++ b/tests/script/general/parser/where.sim @@ -277,4 +277,44 @@ if $rows != 2 then return -1 endi +print ==========tbase-1363 +#sql create table $mt (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int) + +$i = 0 +while $i < 1 + $tb = test_null_filter + sql create table $tb using $mt tags( $i ) + + $x = 0 + while $x < 10000 + $ms = $x . m + $c = $x / 100 + $c = $c * 100 + $c = $x - $c + $binary = 'binary . $c + $binary = $binary . ' + $nchar = 'nchar . $c + $nchar = $nchar . ' + sql insert into $tb values (now + $ms , null , null , null , null , null , null , null , null , null ) + $x = $x + 1 + endw + + $i = $i + 1 +endw + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +sleep 2000 + +system sh/exec.sh -n dnode1 -s start + +sql select * from wh_mt0 where c3 = 'abc' and tbname in ('test_null_filter'); +if $row != 0 then + return -1 +endi + +sql select * from wh_mt0 where c3 = 'abc' and tbname in ('test_null_filter'); +if $row != 0 then + return -1 +endi + system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/general/stream/metrics_1.sim b/tests/script/general/stream/metrics_1.sim deleted file mode 100644 index 94498cb925..0000000000 --- a/tests/script/general/stream/metrics_1.sim +++ /dev/null @@ -1,287 +0,0 @@ -system sh/stop_dnodes.sh - -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 0 -system sh/exec.sh -n dnode1 -s start - -sleep 3000 -sql connect - -print ======================== dnode1 start - -$dbPrefix = m1_db -$tbPrefix = m1_tb -$mtPrefix = m1_mt -$stPrefix = m1_st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql drop databae $db -x step1 -step1: -sql create database $db -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 c1 -$i = 0 -$mt = $mtPrefix . $i - -sql select count(*) from $mt interval(1d) -print ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c1 -sql create table $st as select count(*) from $mt interval(1d) - -print =============== step3 c2 -sql select count(tbcol) from $mt interval(1d) -print ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c2 -sql create table $st as select count(tbcol) from $mt interval(1d) - -print =============== step4 c3 -sql select count(tbcol2) from $mt interval(1d) -print ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c3 -sql create table $st as select count(tbcol2) from $mt interval(1d) - -print =============== step5 avg -sql select avg(tbcol) from $mt interval(1d) -print ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . av -print create table $st as select avg(tbcol) from $mt interval(1d) -sql create table $st as select avg(tbcol) from $mt interval(1d) - -print =============== step6 su -sql select sum(tbcol) from $mt interval(1d) -print ===> $data00 $data01 -if $data01 != 1900 then - return -1 -endi - -$st = $stPrefix . su -sql create table $st as select sum(tbcol) from $mt interval(1d) - -print =============== step7 mi -sql select min(tbcol) from $mt interval(1d) -print ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . mi -sql create table $st as select min(tbcol) from $mt interval(1d) - -print =============== step8 ma -sql select max(tbcol) from $mt interval(1d) -print ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . ma -sql create table $st as select max(tbcol) from $mt interval(1d) - -print =============== step9 fi -sql select first(tbcol) from $mt interval(1d) -print ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . fi -sql create table $st as select first(tbcol) from $mt interval(1d) - -print =============== step10 la -sql select last(tbcol) from $mt interval(1d) -print ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . la -sql create table $st as select last(tbcol) from $mt interval(1d) - -print =============== step11 st -sql select stddev(tbcol) from $mt interval(1d) -x step11 - return -1 -step11: - -print =============== step12 le -sql select leastsquares(tbcol, 1, 1) from $mt interval(1d) -x step12 - return -1 -step12: - -print =============== step13 -sql select top(tbcol, 1) from $mt interval(1d) - -print =============== step14 - -sql select bottom(tbcol, 1) from $mt interval(1d) - -print =============== step15 pe - -sql select percentile(tbcol, 1) from $mt interval(1d) -x step15 - return -1 -step15: - -print =============== step16 -sql select diff(tbcol) from $mt interval(1d) -x step16 - return -1 -step16: - -print =============== step17 wh -sql select count(tbcol) from $mt where ts < now + 4m interval(1d) -print ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . wh -#sql create table $st as select count(tbcol) from $mt where ts < now + 4m interval(1d) - -print =============== step18 as -sql select count(tbcol) from $mt interval(1d) -print ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . as -sql create table $st as select count(tbcol) as c from $mt interval(1d) - -print =============== step19 gb -sql select count(tbcol) from $mt interval(1d) group by tgcol -print ===> $data00 $data01 -if $data01 != 20 then - return -1 -endi - -print =============== step20 x -sql select count(tbcol) from $mt where ts < now + 4m interval(1d) group by tgcol -print ===> $data00 $data01 -if $data01 != 20 then - return -1 -endi - -print =============== step21 -print sleep 120 seconds -sleep 120000 - -print =============== step22 -$st = $stPrefix . c1 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c2 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c3 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . av -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . su -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 1900 then - return -1 -endi - -$st = $stPrefix . mi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . ma -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . fi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . la -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -#$st = $stPrefix . wh -#sql select * from $st -#print ===> select * from $st ===> $data00 $data01 -#if $data01 != 200 then -# return -1 -#endi - -$st = $stPrefix . as -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi diff --git a/tests/script/general/stream/new_stream.sim b/tests/script/general/stream/new_stream.sim deleted file mode 100644 index 001602079b..0000000000 --- a/tests/script/general/stream/new_stream.sim +++ /dev/null @@ -1,106 +0,0 @@ -system sh/stop_dnodes.sh - -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 0 -system sh/cfg.sh -n dnode1 -c tableMetaKeepTimer -v 10 -system sh/exec.sh -n dnode1 -s start - -sleep 3000 -sql connect -print ======================== dnode1 start - -$dbPrefix = ns_db -$tbPrefix = ns_tb -$mtPrefix = ns_mt -$stPrefix = ns_st -$tbNum = 5 -$rowNum = 200 -$totalNum = 200 - -print =============== step1 - -$i = 0 -$db = $dbPrefix -$mt = $mtPrefix -$st = $stPrefix - -sql create database $db -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = 0 - $y = 0 - while $y < $rowNum - $ms = $x . s - sql insert into $tb values (now + $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 - -sql select count(*), count(tbcol), count(tbcol2) from $mt interval(10s) -print $data00 $data01 $data02 $data03 - -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $mt interval(10s) - -print =============== step3 -print sleep 120 seconds -sleep 120000 - -print =============== step4 - -sql select * from $st -print $st ==> $rows1 $data00 $data01 $data02 $data03 -if $data03 >= 51 then - return -1 -endi - -print =============== step5 - -$tbNum = 10 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - if $i == 0 then - sleep 2000 - endi - - $x = 0 - $y = 0 - while $y < $rowNum - $ms = $x . s - sql insert into $tb values (now + $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -print =============== step6 -print sleep 120 seconds -sleep 120000 - -print =============== step7 - -sql select * from $st order by ts desc -print $st ==> $rows1 $data00 $data01 $data02 $data03 -if $data03 <= 51 then - return -1 -endi - - - - - diff --git a/tests/script/general/stream/stream_1.sim b/tests/script/general/stream/stream_1.sim deleted file mode 100644 index 958c877ee5..0000000000 --- a/tests/script/general/stream/stream_1.sim +++ /dev/null @@ -1,206 +0,0 @@ -system sh/stop_dnodes.sh - -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 0 -system sh/exec.sh -n dnode1 -s start - -sleep 3000 -sql connect - -print ======================== dnode1 start - -$dbPrefix = s1_db -$tbPrefix = s1_tb -$mtPrefix = s1_mt -$stPrefix = s1_st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql drop databae $db -x step1 -step1: -sql create database $db -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 -$i = 0 -$tb = $tbPrefix . $i -$st = $stPrefix . $i - -sql select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) -print select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) ===> $data00 $data01 $data02, $data03 -if $data01 != $rowNum then - return -1 -endi -if $data02 != $rowNum then - return -1 -endi -if $data03 != $rowNum then - return -1 -endi - -sql show tables -if $rows != 10 then - return -1 -endi - -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) - -sql show tables -if $rows != 11 then - return -1 -endi - -print =============== step3 -print sleep 120 seconds -sleep 120000 -sql select * from $st -print select * from $st => $data01 -if $data01 != 20 then - return -1 -endi -if $data02 != 20 then - return -1 -endi -if $data03 != 20 then - return -1 -endi - -print =============== step4 -sql drop table $st -sql show tables -if $rows != 10 then - return -1 -endi - -print =============== step5 -sql select * from $st -x step4 - return -1 -step4: - -print =============== step6 -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) -sql show tables -if $rows != 11 then - return -1 -endi - -print =============== step7 -print sleep 120 seconds -sleep 120000 -sql select * from $st -print select * from $st => $data01 -if $data01 != 20 then - return -1 -endi -if $data02 != 20 then - return -1 -endi -if $data03 != 20 then - return -1 -endi - -print =============== step8 -$i = 1 -$st = $stPrefix . $i - -sql select count(*), count(tbcol), count(tbcol2) from $mt interval(1d) -print select count(*), count(tbcol), count(tbcol2) from $mt interval(1d) ===> $data00 $data01 $data02, $data03 -if $data01 != 200 then - return -1 -endi -if $data02 != 200 then - return -1 -endi -if $data03 != 200 then - return -1 -endi - -sql show tables -if $rows != 11 then - return -1 -endi - -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $mt interval(1d) - -sql show tables -if $rows != 12 then - return -1 -endi - -print =============== step9 -print sleep 120 seconds -sleep 120000 -sql select * from $st -print select * from $st => $data01 -if $data01 != 200 then - return -1 -endi -if $data02 != 200 then - return -1 -endi -if $data03 != 200 then - return -1 -endi - -print =============== step10 -sql drop table $st -sql show tables -if $rows != 11 then - return -1 -endi - -print =============== step11 -sql select * from $st -x step10 - return -1 -step10: - -print =============== step12 -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $mt interval(1d) - -sql show tables -if $rows != 12 then - return -1 -endi - -print =============== step13 -print sleep 120 seconds -sleep 120000 -sql select * from $st -print select * from $st => $data01 -if $data01 != 200 then - return -1 -endi -if $data02 != 200 then - return -1 -endi -if $data03 != 200 then - return -1 -endi - diff --git a/tests/script/general/stream/stream_2.sim b/tests/script/general/stream/stream_2.sim deleted file mode 100644 index 057529b427..0000000000 --- a/tests/script/general/stream/stream_2.sim +++ /dev/null @@ -1,194 +0,0 @@ -system sh/stop_dnodes.sh - -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 0 -system sh/exec.sh -n dnode1 -s start - -sleep 3000 -sql connect - -print ======================== dnode1 start - -$dbPrefix = s2_db -$tbPrefix = s2_tb -$mtPrefix = s2_mt -$stPrefix = s2_st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql drop databae $db -x step1 -step1: -sql create database $db -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 -$i = 0 -$tb = $tbPrefix . $i -$st = $stPrefix . $i - -sql select count(tbcol) from $tb interval(1d) -print select count(tbcol) from $tb interval(1d) ===> $data00 $data01 $data02, $data03 -if $data01 != $rowNum then - return -1 -endi - -sql show tables -if $rows != 10 then - return -1 -endi - -sql create table $st as select count(tbcol) from $tb interval(1d) - -sql show tables -if $rows != 11 then - return -1 -endi - -print =============== step3 -print sleep 120 seconds -sleep 120000 -sql select * from $st -print select * from $st => $data01 -if $data01 != 20 then - return -1 -endi - -print =============== step4 -sql drop table $st -sql show tables -if $rows != 10 then - return -1 -endi - -print =============== step5 -sql select * from $st -x step4 - return -1 -step4: - -print =============== step6 -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) -sql show tables -if $rows != 11 then - return -1 -endi - -print =============== step7 -print sleep 120 seconds -sleep 120000 -sql select * from $st -print select * from $st => $data01 -if $data01 != 20 then - return -1 -endi -if $data02 != 20 then - return -1 -endi -if $data03 != 20 then - return -1 -endi - -print =============== step8 -$i = 1 -$st = $stPrefix . $i - -sql select count(*), count(tbcol), count(tbcol2) from $mt interval(1d) -print select count(*), count(tbcol), count(tbcol2) from $mt interval(1d) ===> $data00 $data01 $data02, $data03 -if $data01 != 200 then - return -1 -endi -if $data02 != 200 then - return -1 -endi -if $data03 != 200 then - return -1 -endi - -sql show tables -if $rows != 11 then - return -1 -endi - -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $mt interval(1d) - -sql show tables -if $rows != 12 then - return -1 -endi - -print =============== step9 -print sleep 120 seconds -sleep 120000 -sql select * from $st -print select * from $st => $data01 $data02, $data03 -if $data01 != 200 then - return -1 -endi -if $data02 != 200 then - return -1 -endi -if $data03 != 200 then - return -1 -endi - -print =============== step10 -sql drop table $st -sql show tables -if $rows != 11 then - return -1 -endi - -print =============== step11 -sql select * from $st -x step10 - return -1 -step10: - -print =============== step12 -sql create table $st as select count(tbcol) from $mt interval(1d) - -sql show tables -if $rows != 12 then - return -1 -endi - -print =============== step13 -print sleep 120 seconds -sleep 120000 -sql select * from $st -print select * from $st => $data01 $data02, $data03 -if $data01 != 200 then - return -1 -endi -if $data02 != null then - return -1 -endi -if $data03 != null then - return -1 -endi - diff --git a/tests/script/general/stream/testSuite.sim b/tests/script/general/stream/testSuite.sim index 046348b123..44f886d02b 100644 --- a/tests/script/general/stream/testSuite.sim +++ b/tests/script/general/stream/testSuite.sim @@ -1,9 +1,6 @@ -run general/stream/stream_1.sim -run general/stream/stream_2.sim run general/stream/stream_3.sim run general/stream/stream_restart.sim run general/stream/table_1.sim -run general/stream/metrics_1.sim run general/stream/table_n.sim run general/stream/metrics_n.sim run general/stream/table_del.sim diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index 3c4733a25b..c86b17c4fc 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -1,10 +1,10 @@ cd ../../../debug; cmake .. cd ../../../debug; make -#./test.sh -f general/alter/cached_schema_after_alter.sim +./test.sh -f general/alter/cached_schema_after_alter.sim ./test.sh -f general/alter/count.sim ./test.sh -f general/alter/import.sim -#./test.sh -f general/alter/insert1.sim +./test.sh -f general/alter/insert1.sim ./test.sh -f general/alter/insert2.sim ./test.sh -f general/alter/metrics.sim ./test.sh -f general/alter/table.sim @@ -117,6 +117,7 @@ cd ../../../debug; make ./test.sh -f general/parser/import_commit3.sim ./test.sh -f general/parser/insert_tb.sim ./test.sh -f general/parser/first_last.sim +# dyh is processing this script #./test.sh -f general/parser/import_file.sim ./test.sh -f general/parser/lastrow.sim ./test.sh -f general/parser/nchar.sim @@ -135,6 +136,7 @@ cd ../../../debug; make ./test.sh -f general/parser/limit2.sim ./test.sh -f general/parser/fill.sim ./test.sh -f general/parser/fill_stb.sim +#./test.sh -f general/parser/fill_us.sim ./test.sh -f general/parser/where.sim ./test.sh -f general/parser/slimit.sim ./test.sh -f general/parser/select_with_tags.sim @@ -142,15 +144,15 @@ cd ../../../debug; make ./test.sh -f general/parser/tags_dynamically_specifiy.sim ./test.sh -f general/parser/groupby.sim ./test.sh -f general/parser/set_tag_vals.sim -#./test.sh -f general/parser/slimit_alter_tags.sim +#./test.sh -f general/parser/sliding.sim +./test.sh -f general/parser/tags_dynamically_specifiy.sim +./test.sh -f general/parser/tags_filter.sim +./test.sh -f general/parser/slimit_alter_tags.sim ./test.sh -f general/parser/join.sim ./test.sh -f general/parser/join_multivnode.sim ./test.sh -f general/parser/binary_escapeCharacter.sim ./test.sh -f general/parser/bug.sim -#./test.sh -f general/parser/stream_on_sys.sim -./test.sh -f general/parser/stream.sim ./test.sh -f general/parser/repeatAlter.sim -#./test.sh -f general/parser/repeatStream.sim ./test.sh -f general/stable/disk.sim ./test.sh -f general/stable/dnode3.sim @@ -201,7 +203,7 @@ cd ../../../debug; make ./test.sh -f general/tag/bool.sim ./test.sh -f general/tag/change.sim ./test.sh -f general/tag/column.sim -#./test.sh -f general/tag/commit.sim +./test.sh -f general/tag/commit.sim ./test.sh -f general/tag/create.sim ./test.sh -f general/tag/delete.sim ./test.sh -f general/tag/double.sim @@ -309,14 +311,14 @@ cd ../../../debug; make ./test.sh -f unique/vnode/replica3_repeat.sim ./test.sh -f unique/vnode/replica3_vgroup.sim -./test.sh -f general/stream/metrics_1.sim +# stream still has bugs +#./test.sh -f general/parser/stream_on_sys.sim +#./test.sh -f general/parser/repeatStream.sim + ./test.sh -f general/stream/metrics_del.sim ./test.sh -f general/stream/metrics_n.sim ./test.sh -f general/stream/metrics_replica1_vnoden.sim -#./test.sh -f general/stream/new_stream.sim ./test.sh -f general/stream/restart_stream.sim -./test.sh -f general/stream/stream_1.sim -./test.sh -f general/stream/stream_2.sim ./test.sh -f general/stream/stream_3.sim ./test.sh -f general/stream/stream_restart.sim ./test.sh -f general/stream/table_1.sim @@ -334,6 +336,7 @@ cd ../../../debug; make ./test.sh -f unique/arbitrator/dn3_mn1_replica_change_dropDnod.sim ./test.sh -f unique/arbitrator/dn3_mn1_replica_change.sim ./test.sh -f unique/arbitrator/dn3_mn1_stopDnode_timeout.sim +# lower the priority while file corruption #./test.sh -f unique/arbitrator/dn3_mn1_vnode_change.sim #./test.sh -f unique/arbitrator/dn3_mn1_vnode_corruptFile_offline.sim #./test.sh -f unique/arbitrator/dn3_mn1_vnode_corruptFile_online.sim diff --git a/tests/script/jenkins/simple.txt b/tests/script/jenkins/simple.txt index 135326af9e..2fe364bf26 100644 --- a/tests/script/jenkins/simple.txt +++ b/tests/script/jenkins/simple.txt @@ -148,7 +148,6 @@ cd ../../../debug; make ./test.sh -f general/parser/binary_escapeCharacter.sim ./test.sh -f general/parser/bug.sim #./test.sh -f general/parser/stream_on_sys.sim -./test.sh -f general/parser/stream.sim ./test.sh -f general/parser/repeatAlter.sim #./test.sh -f general/parser/repeatStream.sim diff --git a/tests/script/sh/deploy.sh b/tests/script/sh/deploy.sh index eb0a9b526d..9cd5b8e15f 100755 --- a/tests/script/sh/deploy.sh +++ b/tests/script/sh/deploy.sh @@ -110,6 +110,7 @@ echo "second ${HOSTNAME}:7200" >> $TAOS_CFG echo "serverPort ${NODE}" >> $TAOS_CFG echo "dataDir $DATA_DIR" >> $TAOS_CFG echo "logDir $LOG_DIR" >> $TAOS_CFG +echo "debugFlag 135" >> $TAOS_CFG echo "mDebugFlag 135" >> $TAOS_CFG echo "sdbDebugFlag 135" >> $TAOS_CFG echo "dDebugFlag 135" >> $TAOS_CFG diff --git a/tests/script/tmp/mnodes.sim b/tests/script/tmp/mnodes.sim index 67f3648f64..afc068b3f1 100644 --- a/tests/script/tmp/mnodes.sim +++ b/tests/script/tmp/mnodes.sim @@ -1,7 +1,120 @@ system sh/stop_dnodes.sh + system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode2 -i 2 system sh/deploy.sh -n dnode3 -i 3 + +system sh/cfg.sh -n dnode1 -c walLevel -v 2 +system sh/cfg.sh -n dnode2 -c walLevel -v 2 +system sh/cfg.sh -n dnode3 -c walLevel -v 2 + system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3 system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3 system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3 + +system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 4 +system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 4 +system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 4 + +system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 20 +system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 20 +system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 20 + +system sh/cfg.sh -n dnode1 -c http -v 1 +system sh/cfg.sh -n dnode2 -c http -v 1 +system sh/cfg.sh -n dnode3 -c http -v 1 + +system sh/cfg.sh -n dnode1 -c mDebugFlag -v 143 +system sh/cfg.sh -n dnode2 -c mDebugFlag -v 143 +system sh/cfg.sh -n dnode3 -c mDebugFlag -v 143 + +system sh/cfg.sh -n dnode1 -c sdbDebugFlag -v 143 +system sh/cfg.sh -n dnode2 -c sdbDebugFlag -v 143 +system sh/cfg.sh -n dnode3 -c sdbDebugFlag -v 143 + +system sh/cfg.sh -n dnode1 -c sdebugFlag -v 143 +system sh/cfg.sh -n dnode2 -c sdebugFlag -v 143 +system sh/cfg.sh -n dnode3 -c sdebugFlag -v 143 + +system sh/cfg.sh -n dnode1 -c rpcDebugFlag -v 135 +system sh/cfg.sh -n dnode2 -c rpcDebugFlag -v 135 +system sh/cfg.sh -n dnode3 -c rpcDebugFlag -v 135 + +system sh/cfg.sh -n dnode1 -c tsdbDebugFlag -v 131 +system sh/cfg.sh -n dnode2 -c tsdbDebugFlag -v 131 +system sh/cfg.sh -n dnode3 -c tsdbDebugFlag -v 131 + +system sh/cfg.sh -n dnode1 -c mqttDebugFlag -v 131 +system sh/cfg.sh -n dnode2 -c mqttDebugFlag -v 131 +system sh/cfg.sh -n dnode3 -c mqttDebugFlag -v 131 + +system sh/cfg.sh -n dnode1 -c qdebugFlag -v 131 +system sh/cfg.sh -n dnode2 -c qdebugFlag -v 131 +system sh/cfg.sh -n dnode3 -c qdebugFlag -v 131 + +system sh/cfg.sh -n dnode1 -c cDebugFlag -v 131 +system sh/cfg.sh -n dnode2 -c cDebugFlag -v 131 +system sh/cfg.sh -n dnode3 -c cDebugFlag -v 131 + +system sh/cfg.sh -n dnode1 -c udebugFlag -v 131 +system sh/cfg.sh -n dnode2 -c udebugFlag -v 131 +system sh/cfg.sh -n dnode3 -c udebugFlag -v 131 + +system sh/cfg.sh -n dnode1 -c wdebugFlag -v 131 +system sh/cfg.sh -n dnode2 -c wdebugFlag -v 131 +system sh/cfg.sh -n dnode3 -c wdebugFlag -v 131 + +print ============== deploy + +system sh/exec.sh -n dnode1 -s start +sleep 2001 +sql connect + +sql create dnode $hostname2 +sql create dnode $hostname3 +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start + +print =============== step1 +$x = 0 +show1: + $x = $x + 1 + sleep 2000 + if $x == 5 then + return -1 + endi +sql show mnodes -x show1 +$mnode1Role = $data2_1 +print mnode1Role $mnode1Role +$mnode2Role = $data2_2 +print mnode2Role $mnode2Role +$mnode3Role = $data2_3 +print mnode3Role $mnode3Role + +if $mnode1Role != master then + goto show1 +endi +if $mnode2Role != slave then + goto show1 +endi +if $mnode3Role != slave then + goto show1 +endi + +$x = 1 +show2: + +print =============== step $x +sql show mnodes +print $data0_1 $data2_1 +print $data0_2 $data2_2 +print $data0_3 $data2_3 + + +$x = $x + 1 +sleep 2000 +if $x == 1000 then + return -1 +endi + +goto show2 diff --git a/tests/script/unique/cluster/client1_0.sim b/tests/script/unique/cluster/client1_0.sim new file mode 100644 index 0000000000..6516fe58e4 --- /dev/null +++ b/tests/script/unique/cluster/client1_0.sim @@ -0,0 +1,56 @@ +$tblStart = 0 +$tblEnd = 10000 +$tsStart = 1325347200000 # 2012-01-01 00:00:00.000 +############################################################### + +sql connect + +$db = db1 +$stb = stb1 + +sql use $db + + +######sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(8)) +$tagPrex = ' . tag + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $tagBinary = $tagPrex . $i + $tagBinary = $tagBinary . ' + sql create table if not exists $tb using $stb tags ($i, $tagBinary) + $i = $i + 1 +endw + + +print ====================== client1_0 create table end, start insert data ............ + +$rowsPerLoop = 100 +$ts = $tsStart + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $x = 0 + while $x < $rowsPerLoop + sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) + $x = $x + 20 + $ts = $ts + 40a + endw + + $totalRows = $totalRows + $x + $i = $i + 1 + + if $i == $tblEnd then + $i = $tblStart + + sql select count(*) from $stb -x continue_loop + print data00 $data00 totalRows $totalRows + if $data00 < $totalRows then + print ********************** select error ********************** + endi + continue_loop: + print ====================== client1_0 insert data complete once ............ + endi +endw diff --git a/tests/script/unique/cluster/client1_1.sim b/tests/script/unique/cluster/client1_1.sim new file mode 100644 index 0000000000..dd1e68ab4f --- /dev/null +++ b/tests/script/unique/cluster/client1_1.sim @@ -0,0 +1,52 @@ +$tblStart = 10000 +$tblEnd = 20000 +$tsStart = 1325347200000 # 2012-01-01 00:00:00.000 +############################################################### + +sql connect + +$db = db1 +$stb = stb1 + +sql use $db + + +######sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(8)) +$tagPrex = ' . tag + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $tagBinary = $tagPrex . $i + $tagBinary = $tagBinary . ' + sql create table if not exists $tb using $stb tags ($i, $tagBinary) + $i = $i + 1 +endw + +$rowsPerLoop = 100 +$ts = $tsStart + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $x = 0 + while $x < $rowsPerLoop + sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) + $x = $x + 20 + $ts = $ts + 40a + endw + + $totalRows = $totalRows + $x + $i = $i + 1 + + if $i == $tblEnd then + $i = $tblStart + + sql select count(*) from $stb -x continue_loop + print data00 $data00 totalRows $totalRows + if $data00 < $totalRows then + print ********************** select error ********************** + endi + continue_loop: + endi +endw diff --git a/tests/script/unique/cluster/client1_2.sim b/tests/script/unique/cluster/client1_2.sim new file mode 100644 index 0000000000..4f87810e42 --- /dev/null +++ b/tests/script/unique/cluster/client1_2.sim @@ -0,0 +1,52 @@ +$tblStart = 20000 +$tblEnd = 30000 +$tsStart = 1325347200000 # 2012-01-01 00:00:00.000 +############################################################### + +sql connect + +$db = db1 +$stb = stb1 + +sql use $db + + +######sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(8)) +$tagPrex = ' . tag + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $tagBinary = $tagPrex . $i + $tagBinary = $tagBinary . ' + sql create table if not exists $tb using $stb tags ($i, $tagBinary) + $i = $i + 1 +endw + +$rowsPerLoop = 100 +$ts = $tsStart + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $x = 0 + while $x < $rowsPerLoop + sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) + $x = $x + 20 + $ts = $ts + 40a + endw + + $totalRows = $totalRows + $x + $i = $i + 1 + + if $i == $tblEnd then + $i = $tblStart + + sql select count(*) from $stb -x continue_loop + print data00 $data00 totalRows $totalRows + if $data00 < $totalRows then + print ********************** select error ********************** + endi + continue_loop: + endi +endw diff --git a/tests/script/unique/cluster/client1_3.sim b/tests/script/unique/cluster/client1_3.sim new file mode 100644 index 0000000000..04df0dfffc --- /dev/null +++ b/tests/script/unique/cluster/client1_3.sim @@ -0,0 +1,52 @@ +$tblStart = 30000 +$tblEnd = 40000 +$tsStart = 1325347200000 # 2012-01-01 00:00:00.000 +############################################################### + +sql connect + +$db = db1 +$stb = stb1 + +sql use $db + + +######sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(8)) +$tagPrex = ' . tag + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $tagBinary = $tagPrex . $i + $tagBinary = $tagBinary . ' + sql create table if not exists $tb using $stb tags ($i, $tagBinary) + $i = $i + 1 +endw + +$rowsPerLoop = 100 +$ts = $tsStart + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $x = 0 + while $x < $rowsPerLoop + sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) + $x = $x + 20 + $ts = $ts + 40a + endw + + $totalRows = $totalRows + $x + $i = $i + 1 + + if $i == $tblEnd then + $i = $tblStart + + sql select count(*) from $stb -x continue_loop + print data00 $data00 totalRows $totalRows + if $data00 < $totalRows then + print ********************** select error ********************** + endi + continue_loop: + endi +endw diff --git a/tests/script/unique/cluster/client2_0.sim b/tests/script/unique/cluster/client2_0.sim new file mode 100644 index 0000000000..75e4dccf7d --- /dev/null +++ b/tests/script/unique/cluster/client2_0.sim @@ -0,0 +1,52 @@ +$tblStart = 0 +$tblEnd = 10000 +$tsStart = 1325347200001 # 2012-01-01 00:00:00.001 +############################################################### + +sql connect + +$db = db1 +$stb = stb1 + +sql use $db + + +######sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(8)) +$tagPrex = ' . tag + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $tagBinary = $tagPrex . $i + $tagBinary = $tagBinary . ' + sql create table if not exists $tb using $stb tags ($i, $tagBinary) + $i = $i + 1 +endw + +$rowsPerLoop = 100 +$ts = $tsStart + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $x = 0 + while $x < $rowsPerLoop + sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) + $x = $x + 20 + $ts = $ts + 40a + endw + + $totalRows = $totalRows + $x + $i = $i + 1 + + if $i == $tblEnd then + $i = $tblStart + + sql select count(*) from $stb -x continue_loop + print data00 $data00 totalRows $totalRows + if $data00 < $totalRows then + print ********************** select error ********************** + endi + continue_loop: + endi +endw diff --git a/tests/script/unique/cluster/client2_1.sim b/tests/script/unique/cluster/client2_1.sim new file mode 100644 index 0000000000..4c0d755c04 --- /dev/null +++ b/tests/script/unique/cluster/client2_1.sim @@ -0,0 +1,52 @@ +$tblStart = 10000 +$tblEnd = 20000 +$tsStart = 1325347200001 # 2012-01-01 00:00:00.000 +############################################################### + +sql connect + +$db = db1 +$stb = stb1 + +sql use $db + + +######sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(8)) +$tagPrex = ' . tag + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $tagBinary = $tagPrex . $i + $tagBinary = $tagBinary . ' + sql create table if not exists $tb using $stb tags ($i, $tagBinary) + $i = $i + 1 +endw + +$rowsPerLoop = 100 +$ts = $tsStart + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $x = 0 + while $x < $rowsPerLoop + sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) + $x = $x + 20 + $ts = $ts + 40a + endw + + $totalRows = $totalRows + $x + $i = $i + 1 + + if $i == $tblEnd then + $i = $tblStart + + sql select count(*) from $stb -x continue_loop + print data00 $data00 totalRows $totalRows + if $data00 < $totalRows then + print ********************** select error ********************** + endi + continue_loop: + endi +endw diff --git a/tests/script/unique/cluster/client2_2.sim b/tests/script/unique/cluster/client2_2.sim new file mode 100644 index 0000000000..2f08facf38 --- /dev/null +++ b/tests/script/unique/cluster/client2_2.sim @@ -0,0 +1,52 @@ +$tblStart = 20000 +$tblEnd = 30000 +$tsStart = 1325347200001 # 2012-01-01 00:00:00.000 +############################################################### + +sql connect + +$db = db1 +$stb = stb1 + +sql use $db + + +######sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(8)) +$tagPrex = ' . tag + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $tagBinary = $tagPrex . $i + $tagBinary = $tagBinary . ' + sql create table if not exists $tb using $stb tags ($i, $tagBinary) + $i = $i + 1 +endw + +$rowsPerLoop = 100 +$ts = $tsStart + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $x = 0 + while $x < $rowsPerLoop + sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) + $x = $x + 20 + $ts = $ts + 40a + endw + + $totalRows = $totalRows + $x + $i = $i + 1 + + if $i == $tblEnd then + $i = $tblStart + + sql select count(*) from $stb -x continue_loop + print data00 $data00 totalRows $totalRows + if $data00 < $totalRows then + print ********************** select error ********************** + endi + continue_loop: + endi +endw diff --git a/tests/script/unique/cluster/client2_3.sim b/tests/script/unique/cluster/client2_3.sim new file mode 100644 index 0000000000..b83e5b6eaf --- /dev/null +++ b/tests/script/unique/cluster/client2_3.sim @@ -0,0 +1,52 @@ +$tblStart = 30000 +$tblEnd = 40000 +$tsStart = 1325347200001 # 2012-01-01 00:00:00.000 +############################################################### + +sql connect + +$db = db1 +$stb = stb1 + +sql use $db + + +######sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(8)) +$tagPrex = ' . tag + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $tagBinary = $tagPrex . $i + $tagBinary = $tagBinary . ' + sql create table if not exists $tb using $stb tags ($i, $tagBinary) + $i = $i + 1 +endw + +$rowsPerLoop = 100 +$ts = $tsStart + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $x = 0 + while $x < $rowsPerLoop + sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) + $x = $x + 20 + $ts = $ts + 40a + endw + + $totalRows = $totalRows + $x + $i = $i + 1 + + if $i == $tblEnd then + $i = $tblStart + + sql select count(*) from $stb -x continue_loop + print data00 $data00 totalRows $totalRows + if $data00 < $totalRows then + print ********************** select error ********************** + endi + continue_loop: + endi +endw diff --git a/tests/script/unique/cluster/cluster_main.sim b/tests/script/unique/cluster/cluster_main.sim new file mode 100644 index 0000000000..21800da2d4 --- /dev/null +++ b/tests/script/unique/cluster/cluster_main.sim @@ -0,0 +1,597 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/deploy.sh -n dnode2 -i 2 +system sh/deploy.sh -n dnode3 -i 3 +system sh/deploy.sh -n dnode4 -i 4 + +system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3 +system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3 +system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3 +system sh/cfg.sh -n dnode4 -c numOfMnodes -v 3 + +system sh/cfg.sh -n dnode1 -c walLevel -v 1 +system sh/cfg.sh -n dnode2 -c walLevel -v 1 +system sh/cfg.sh -n dnode3 -c walLevel -v 1 +system sh/cfg.sh -n dnode4 -c walLevel -v 1 + +system sh/cfg.sh -n dnode1 -c balanceInterval -v 10 +system sh/cfg.sh -n dnode2 -c balanceInterval -v 10 +system sh/cfg.sh -n dnode3 -c balanceInterval -v 10 +system sh/cfg.sh -n dnode4 -c balanceInterval -v 10 + +system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4 +system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4 +system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4 +system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4 + +system sh/cfg.sh -n dnode1 -c alternativeRole -v 0 +system sh/cfg.sh -n dnode2 -c alternativeRole -v 0 +system sh/cfg.sh -n dnode3 -c alternativeRole -v 0 +system sh/cfg.sh -n dnode4 -c alternativeRole -v 0 + +system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 1000 +system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 1000 +system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 1000 +system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 1000 + +system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator +system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator +system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator +system sh/cfg.sh -n dnode4 -c arbitrator -v $arbitrator + +print ============== step0: start tarbitrator +system sh/exec_tarbitrator.sh -s start + +print ============== step1: start dnode1/dnode2/dnode3 +system sh/exec.sh -n dnode1 -s start +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start +sleep 3000 +sql connect +sql create dnode $hostname2 +sql create dnode $hostname3 +sleep 3000 + +print ============== step2: create db1 with replica 3 +$db = db1 +print create database $db replica 3 +#sql create database $db replica 3 maxTables $totalTableNum +sql create database $db replica 3 +sql use $db + +print ============== step3: create stable stb1 +$stb = stb1 +sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(8)) + +print ============== step4: start 10 client1/ 10 client2/ 10 client3/ 10 client4/ 1 client5 +#run_back unique/cluster/client_test.sim +run_back unique/cluster/client1_0.sim +run_back unique/cluster/client1_1.sim +run_back unique/cluster/client1_2.sim +run_back unique/cluster/client1_3.sim +run_back unique/cluster/client2_0.sim +run_back unique/cluster/client2_1.sim +run_back unique/cluster/client2_2.sim +run_back unique/cluster/client2_3.sim +run_back unique/cluster/client3.sim +run_back unique/cluster/client4.sim + +sleep 20000 +wait_subsim_insert_data: +sql select count(*) from $stb +print data00 $data00 +if $data00 < 1 then + sleep 3000 + goto wait_subsim_insert_data +endi + + +print wait for a while to let clients start insert data +sleep 5000 + +$loop_cnt = 0 +loop_cluster_do: +print **** **** **** START loop cluster do (loop_cnt: $loop_cnt )**** **** **** **** +print ============== step5: start dnode4 and add into cluster, then wait dnode4 ready +system sh/exec.sh -n dnode4 -s start +sql create dnode $hostname4 + +wait_dnode4_ready_0: +$cnt = $cnt + 1 +if $cnt == 10 then + return -1 +endi +sql show dnodes +if $rows != 4 then + sleep 2000 + goto wait_dnode4_ready_0 +endi +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 +print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 +$dnode1Status = $data4_1 +$dnode2Status = $data4_2 +$dnode3Status = $data4_3 +#$dnode4Status = $data4_4 + +if $loop_cnt == 0 then + $dnode4Status = $data4_4 +elif $loop_cnt == 1 then + $dnode4Status = $data4_6 +elif $loop_cnt == 2 then + $dnode4Status = $data4_8 +else then + print **** **** **** END loop cluster do (loop_cnt: $loop_cnt )**** **** **** **** + return +endi + +if $dnode4Status != ready then + sleep 2000 + goto wait_dnode4_ready_0 +endi + + +print ============== step6: stop and drop dnode1, then remove data dir of dnode1 +system sh/exec.sh -n dnode1 -s stop -x SIGINT + +$cnt = 0 +wait_dnode1_offline_0: +$cnt = $cnt + 1 +if $cnt == 10 then + return -1 +endi +sql show dnodes +if $rows != 4 then + sleep 2000 + goto wait_dnode1_offline_0 +endi +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 +print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 + +$dnode2Status = $data4_2 +$dnode3Status = $data4_3 +$dnode4Status = $data4_4 + +if $loop_cnt == 0 then + $dnode1Status = $data4_1 +elif $loop_cnt == 1 then + $dnode1Status = $data4_5 +elif $loop_cnt == 2 then + $dnode1Status = $data4_7 +elif $loop_cnt == 3 then + $dnode1Status = $data4_9 +else then + print **** **** **** END loop cluster do (loop_cnt: $loop_cnt )**** **** **** **** + return +endi + +if $dnode1Status != offline then + sleep 2000 + goto wait_dnode1_offline_0 +endi + + + +$cnt = 0 +wait_mnode1_offline_0: +$cnt = $cnt + 1 +if $cnt == 10 then + return -1 +endi +print show mnodes +sql show mnodes +if $rows != 3 then + sleep 2000 + goto wait_mnode1_offline_0 +endi +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 +print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 +$mnode1Status = $data2_1 +$mnode2Status = $data2_2 +$mnode3Status = $data2_3 +$mnode4Status = $data2_4 + +if $loop_cnt == 0 then + $mnode1Status = $data2_1 +elif $loop_cnt == 1 then + $mnode1Status = $data2_5 +elif $loop_cnt == 2 then + $mnode1Status = $data2_7 +elif $loop_cnt == 3 then + $mnode1Status = $data2_9 +else then + print **** **** **** END loop cluster do (loop_cnt: $loop_cnt )**** **** **** **** + return +endi + +if $mnode1Status != offline then + sleep 2000 + goto wait_mnode1_offline_0 +endi + + + +sql drop dnode $hostname1 +system rm -rf ../../../sim/dnode1/data + +$cnt = 0 +wait_mnode4_slave_0: +$cnt = $cnt + 1 +if $cnt == 10 then + return -1 +endi +print show mnodes +sql show mnodes +if $rows != 3 then + sleep 2000 + goto wait_mnode4_slave_0 +endi +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 +print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 +$mnode1Status = $data2_1 +$mnode2Status = $data2_2 +$mnode3Status = $data2_3 +$mnode4Status = $data2_4 + +if $loop_cnt == 0 then + $mnode4Status = $data2_4 +elif $loop_cnt == 1 then + $mnode4Status = $data2_6 +elif $loop_cnt == 2 then + $mnode4Status = $data2_8 +else then + print **** **** **** END loop cluster do (loop_cnt: $loop_cnt )**** **** **** **** + return +endi + +if $mnode4Status != slave then + sleep 2000 + goto wait_mnode4_slave_0 +endi + +print ============== step7: stop dnode2, waiting dnode4 +system sh/exec.sh -n dnode2 -s stop -x SIGINT + +$cnt = 0 +wait_dnode2_offline_0: +$cnt = $cnt + 1 +if $cnt == 10 then + return -1 +endi +sql show dnodes +if $rows != 3 then + sleep 2000 + goto wait_dnode2_offline_0 +endi +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 +print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 +$dnode1Status = $data4_1 +$dnode2Status = $data4_2 +$dnode3Status = $data4_3 +$dnode4Status = $data4_4 + +if $dnode2Status != offline then + sleep 2000 + goto wait_dnode2_offline_0 +endi + +sleep 3000 +print show mnodes +sql show mnodes +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 +print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 + +print ============== step8: restart dnode2, then wait sync end +system sh/exec.sh -n dnode2 -s start + +$cnt = 0 +wait_dnode2_ready_0: +$cnt = $cnt + 1 +if $cnt == 10 then + return -1 +endi +sql show dnodes +if $rows != 3 then + sleep 2000 + goto wait_dnode2_ready_0 +endi +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 +print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 +$dnode1Status = $data4_1 +$dnode2Status = $data4_2 +$dnode3Status = $data4_3 +$dnode4Status = $data4_4 + +if $dnode2Status != ready then + sleep 2000 + goto wait_dnode2_ready_0 +endi + +sleep 3000 +print show mnodes +sql show mnodes +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 +print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 + +print ============== step9: stop dnode3, then wait sync end +system sh/exec.sh -n dnode3 -s stop -x SIGINT +sleep 3000 + +$cnt = 0 +wait_dnode3_offline_0: +$cnt = $cnt + 1 +if $cnt == 10 then + return -1 +endi +sql show dnodes +if $rows != 3 then + sleep 2000 + goto wait_dnode3_offline_0 +endi +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 +print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 +$dnode1Status = $data4_1 +$dnode2Status = $data4_2 +$dnode3Status = $data4_3 +$dnode4Status = $data4_4 + +if $dnode3Status != offline then + sleep 2000 + goto wait_dnode3_offline_0 +endi + +print ============== step10: restart dnode3, then wait sync end +system sh/exec.sh -n dnode3 -s start +sleep 3000 + +$cnt = 0 +wait_dnode3_ready_0: +$cnt = $cnt + 1 +if $cnt == 10 then + return -1 +endi +sql show dnodes +if $rows != 3 then + sleep 2000 + goto wait_dnode3_ready_0 +endi +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 +print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 +$dnode1Status = $data4_1 +$dnode2Status = $data4_2 +$dnode3Status = $data4_3 +$dnode4Status = $data4_4 + +if $dnode3Status != ready then + sleep 2000 + goto wait_dnode3_ready_0 +endi + +print ============== step11: stop dnode4, then wait sync end +system sh/exec.sh -n dnode4 -s stop -x SIGINT +sleep 3000 + +$cnt = 0 +wait_dnode4_offline_0: +$cnt = $cnt + 1 +if $cnt == 10 then + return -1 +endi +sql show dnodes +if $rows != 3 then + sleep 2000 + goto wait_dnode4_offline_0 +endi +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 +print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 +$dnode1Status = $data4_1 +$dnode2Status = $data4_2 +$dnode3Status = $data4_3 +#$dnode4Status = $data4_4 + +if $loop_cnt == 0 then + $dnode4Status = $data4_4 +elif $loop_cnt == 1 then + $dnode4Status = $data4_6 +elif $loop_cnt == 2 then + $dnode4Status = $data4_8 +else then + print **** **** **** END loop cluster do (loop_cnt: $loop_cnt )**** **** **** **** + return +endi + +if $dnode4Status != offline then + sleep 2000 + goto wait_dnode4_offline_0 +endi + +print ============== step12: restart dnode4, then wait sync end +system sh/exec.sh -n dnode4 -s start +sleep 3000 + +$cnt = 0 +wait_dnode4_ready_1: +$cnt = $cnt + 1 +if $cnt == 10 then + return -1 +endi +sql show dnodes +if $rows != 3 then + sleep 2000 + goto wait_dnode4_ready_1 +endi +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 +print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 +$dnode1Status = $data4_1 +$dnode2Status = $data4_2 +$dnode3Status = $data4_3 +#$dnode4Status = $data4_4 + +if $loop_cnt == 0 then + $dnode4Status = $data4_4 +elif $loop_cnt == 1 then + $dnode4Status = $data4_6 +elif $loop_cnt == 2 then + $dnode4Status = $data4_8 +else then + print **** **** **** END loop cluster do (loop_cnt: $loop_cnt )**** **** **** **** + return +endi + +if $dnode4Status != ready then + sleep 2000 + goto wait_dnode4_ready_1 +endi + +print ============== step13: alter replica 2 +sql alter database $db replica 2 +sql show databases +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09 + +if $data04 != 2 then + print rplica is not modify to 2, error!!!!!! + return -1 +endi + +print ============== step14: stop and drop dnode4, then remove data dir of dnode4 +system sh/exec.sh -n dnode4 -s stop -x SIGINT +sleep 3000 + +$cnt = 0 +wait_dnode4_offline_1: +$cnt = $cnt + 1 +if $cnt == 10 then + return -1 +endi +sql show dnodes +if $rows != 3 then + sleep 2000 + goto wait_dnode4_offline_1 +endi +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 +print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 + +$dnode2Status = $data4_2 +$dnode3Status = $data4_3 +#$dnode4Status = $data4_4 + +if $loop_cnt == 0 then + $dnode4Status = $data4_4 +elif $loop_cnt == 1 then + $dnode4Status = $data4_6 +elif $loop_cnt == 2 then + $dnode4Status = $data4_8 +else then + print **** **** **** END loop cluster do (loop_cnt: $loop_cnt )**** **** **** **** + return +endi + +if $dnode4Status != offline then + sleep 2000 + goto wait_dnode4_offline_1 +endi + +sleep 3000 +sql drop dnode $hostname4 +system rm -rf ../../../sim/dnode4/data + + +print ============== step15: alter replica 1 +sql alter database $db replica 1 +sql show databases +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09 +if $data04 != 1 then + print rplica is not modify to 1, error!!!!!! + return -1 +endi + + +print ============== step16: alter replica 2 +sql alter database $db replica 2 +sql show databases +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09 +if $data04 != 2 then + print rplica is not modify to 2, error!!!!!! + return -1 +endi + +print ============== step17: start dnode1 and add into cluster, then wait dnode1 ready +system sh/cfg.sh -n dnode1 -c first -v $hostname2 +system sh/cfg.sh -n dnode1 -c second -v $hostname3 + +system sh/exec.sh -n dnode1 -s start +sql create dnode $hostname1 + +wait_dnode1_ready_0: +$cnt = $cnt + 1 +if $cnt == 10 then + return -1 +endi +sql show dnodes +if $rows != 3 then + sleep 2000 + goto wait_dnode1_ready_0 +endi +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 +print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 +print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5 +#$dnode1Status = $data4_1 +$dnode2Status = $data4_2 +$dnode3Status = $data4_3 +$dnode4Status = $data4_4 + +if $loop_cnt == 0 then + $dnode1Status = $data4_5 +elif $loop_cnt == 1 then + $dnode1Status = $data4_7 +elif $loop_cnt == 2 then + $dnode1Status = $data4_9 +else then + print **** **** **** END loop cluster do (loop_cnt: $loop_cnt )**** **** **** **** + return +endi + +if $dnode1Status != ready then + sleep 2000 + goto wait_dnode1_ready_0 +endi + +print ============== step18: alter replica 3 +sql alter database $db replica 3 +sql show databases +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09 +if $data04 != 3 then + print rplica is not modify to 3, error!!!!!! + return -1 +endi + +print **** **** **** (loop_cnt: $loop_cnt ) end, continue...... **** **** **** **** +$loop_cnt = $loop_cnt + 1 +goto loop_cluster_do diff --git a/tests/test/c/createTablePerformance.c b/tests/test/c/createTablePerformance.c index 4ab6f98423..3edffd2a5e 100644 --- a/tests/test/c/createTablePerformance.c +++ b/tests/test/c/createTablePerformance.c @@ -31,6 +31,7 @@ char stableName[64] = "st"; int32_t numOfThreads = 30; int32_t numOfTables = 100000; int32_t maxTables = 5000; +int32_t replica = 1; int32_t numOfColumns = 2; typedef struct { @@ -96,7 +97,7 @@ void createDbAndSTable() { exit(1); } - sprintf(qstr, "create database if not exists %s maxtables %d", dbName, maxTables); + sprintf(qstr, "create database if not exists %s maxtables %d replica %d", dbName, maxTables, replica); TAOS_RES *pSql = taos_query(con, qstr); int32_t code = taos_errno(pSql); if (code != 0) { @@ -189,6 +190,8 @@ void printHelp() { printf("%s%s%s%d\n", indent, indent, "numOfThreads, default is ", numOfThreads); printf("%s%s\n", indent, "-n"); printf("%s%s%s%d\n", indent, indent, "numOfTables, default is ", numOfTables); + printf("%s%s\n", indent, "-r"); + printf("%s%s%s%d\n", indent, indent, "replica, default is ", replica); printf("%s%s\n", indent, "-columns"); printf("%s%s%s%d\n", indent, indent, "numOfColumns, default is ", numOfColumns); printf("%s%s\n", indent, "-tables"); @@ -212,6 +215,8 @@ void shellParseArgument(int argc, char *argv[]) { numOfThreads = atoi(argv[++i]); } else if (strcmp(argv[i], "-n") == 0) { numOfTables = atoi(argv[++i]); + } else if (strcmp(argv[i], "-r") == 0) { + replica = atoi(argv[++i]); } else if (strcmp(argv[i], "-tables") == 0) { maxTables = atoi(argv[++i]); } else if (strcmp(argv[i], "-columns") == 0) { @@ -226,6 +231,7 @@ void shellParseArgument(int argc, char *argv[]) { pPrint("%s numOfTables:%d %s", GREEN, numOfTables, NC); pPrint("%s numOfThreads:%d %s", GREEN, numOfThreads, NC); pPrint("%s numOfColumns:%d %s", GREEN, numOfColumns, NC); + pPrint("%s replica:%d %s", GREEN, replica, NC); pPrint("%s dbPara maxTables:%d %s", GREEN, maxTables, NC); pPrint("%s start create table performace test %s", GREEN, NC); diff --git a/tests/tsim/inc/sim.h b/tests/tsim/inc/sim.h index ae8848e1ac..ecd481f869 100644 --- a/tests/tsim/inc/sim.h +++ b/tests/tsim/inc/sim.h @@ -51,24 +51,12 @@ #define FAILED_POSTFIX "" #endif -#define simError(...) \ - if (simDebugFlag & DEBUG_ERROR) { \ - taosPrintLog("ERROR SIM ", 255, __VA_ARGS__); \ - } -#define simWarn(...) \ - if (simDebugFlag & DEBUG_WARN) { \ - taosPrintLog("WARN SIM ", simDebugFlag, __VA_ARGS__); \ - } -#define simTrace(...) \ - if (simDebugFlag & DEBUG_TRACE) { \ - taosPrintLog("SIM ", simDebugFlag, __VA_ARGS__); \ - } -#define simDump(x, y) \ - if (simDebugFlag & DEBUG_DUMP) { \ - taosDumpData(x, y); \ - } -#define simPrint(...) \ - { taosPrintLog("SIM ", 255, __VA_ARGS__); } +#define simFatal(...) { if (simDebugFlag & DEBUG_FATAL) { taosPrintLog("SIM FATAL ", 255, __VA_ARGS__); }} +#define simError(...) { if (simDebugFlag & DEBUG_ERROR) { taosPrintLog("SIM ERROR ", 255, __VA_ARGS__); }} +#define simWarn(...) { if (simDebugFlag & DEBUG_WARN) { taosPrintLog("SIM WARN ", 255, __VA_ARGS__); }} +#define simInfo(...) { if (simDebugFlag & DEBUG_INFO) { taosPrintLog("SIM INFO ", 255, __VA_ARGS__); }} +#define simDebug(...) { if (simDebugFlag & DEBUG_DEBUG) { taosPrintLog("SIM DEBUG ", simDebugFlag, __VA_ARGS__); }} +#define simTrace(...) { if (simDebugFlag & DEBUG_TRACE) { taosPrintLog("SIM TRACE ", simDebugFlag, __VA_ARGS__); }} enum { SIM_SCRIPT_TYPE_MAIN, SIM_SCRIPT_TYPE_BACKGROUND }; diff --git a/tests/tsim/src/simExe.c b/tests/tsim/src/simExe.c index 048ee04866..b077547709 100644 --- a/tests/tsim/src/simExe.c +++ b/tests/tsim/src/simExe.c @@ -75,7 +75,7 @@ char *simGetVariable(SScript *script, char *varName, int varLen) { for (int i = 0; i < MAX_QUERY_ROW_NUM; ++i) { if (strncmp(keyName, script->data[i][0], keyLen) == 0) { - simTrace("script:%s, keyName:%s, keyValue:%s", script->fileName, script->data[i][0], script->data[i][col]); + simDebug("script:%s, keyName:%s, keyValue:%s", script->fileName, script->data[i][0], script->data[i][col]); return script->data[i][col]; } } @@ -90,7 +90,7 @@ char *simGetVariable(SScript *script, char *varName, int varLen) { return "null"; } - simTrace("script:%s, data[%d][%d]=%s", script->fileName, row, col, script->data[row][col]); + simDebug("script:%s, data[%d][%d]=%s", script->fileName, row, col, script->data[row][col]); return script->data[row][col]; } } @@ -102,7 +102,7 @@ char *simGetVariable(SScript *script, char *varName, int varLen) { } if (strncmp(varName, var->varName, varLen) == 0) { // if (strlen(var->varValue) != 0) - // simTrace("script:%s, var:%s, value:%s", script->fileName, + // simDebug("script:%s, var:%s, value:%s", script->fileName, // var->varName, var->varValue); return var->varValue; } @@ -240,7 +240,7 @@ bool simExecuteRunCmd(SScript *script, char *option) { return false; } - simPrint("script:%s, start to execute", newScript->fileName); + simInfo("script:%s, start to execute", newScript->fileName); newScript->type = SIM_SCRIPT_TYPE_MAIN; simScriptPos++; @@ -262,7 +262,7 @@ bool simExecuteRunBackCmd(SScript *script, char *option) { sprintf(script->error, "lineNum:%d. parse file:%s error", script->lines[script->linePos].lineNum, fileName); return false; } - simPrint("script:%s, start to execute in background", newScript->fileName); + simInfo("script:%s, start to execute in background", newScript->fileName); newScript->type = SIM_SCRIPT_TYPE_BACKGROUND; script->bgScripts[script->bgScriptLen++] = newScript; @@ -336,7 +336,7 @@ bool simExecutePrintCmd(SScript *script, char *rest) { simVisuallizeOption(script, rest, buf); rest = buf; - simPrint("script:%s, %s", script->fileName, rest); + simInfo("script:%s, %s", script->fileName, rest); script->linePos++; return true; } @@ -351,9 +351,9 @@ bool simExecuteSleepCmd(SScript *script, char *option) { delta = atoi(option); if (delta <= 0) delta = 5; - simPrint("script:%s, sleep %dms begin", script->fileName, delta); + simInfo("script:%s, sleep %dms begin", script->fileName, delta); taosMsleep(delta); - simPrint("script:%s, sleep %dms finished", script->fileName, delta); + simInfo("script:%s, sleep %dms finished", script->fileName, delta); script->linePos++; return true; @@ -372,7 +372,7 @@ bool simExecuteReturnCmd(SScript *script, char *option) { sprintf(script->error, "lineNum:%d. error return %s", script->lines[script->linePos].lineNum, option); return false; } else { - simPrint("script:%s, return cmd execute with:%d", script->fileName, ret); + simInfo("script:%s, return cmd execute with:%d", script->fileName, ret); script->linePos = script->numOfLines; } @@ -418,7 +418,7 @@ void simCloseRestFulConnect(SScript *script) { void simCloseNativeConnect(SScript *script) { if (script->taos == NULL) return; - simTrace("script:%s, taos:%p closed", script->fileName, script->taos); + simDebug("script:%s, taos:%p closed", script->fileName, script->taos); taos_close(script->taos); taosMsleep(1200); @@ -468,7 +468,7 @@ int simParseHttpCommandResult(SScript *script, char *command) { cJSON_Delete(root); return retcode; } else { - simTrace("script:%s, json:status:%s not equal to succ, but code is %d, response:%s", script->fileName, + simDebug("script:%s, json:status:%s not equal to succ, but code is %d, response:%s", script->fileName, status->valuestring, retcode, command); cJSON_Delete(root); return 0; @@ -568,10 +568,10 @@ bool simCreateRestFulConnect(SScript *script, char *user, char *pass) { for (int attempt = 0; attempt < 10; ++attempt) { success = simExecuteRestFulCommand(script, command) == 0; if (!success) { - simTrace("script:%s, user:%s connect taosd failed:%s, attempt:%d", script->fileName, user, taos_errstr(NULL), attempt); + simDebug("script:%s, user:%s connect taosd failed:%s, attempt:%d", script->fileName, user, taos_errstr(NULL), attempt); taosMsleep(1000); } else { - simTrace("script:%s, user:%s connect taosd successed, attempt:%d", script->fileName, user, attempt); + simDebug("script:%s, user:%s connect taosd successed, attempt:%d", script->fileName, user, attempt); break; } } @@ -581,7 +581,7 @@ bool simCreateRestFulConnect(SScript *script, char *user, char *pass) { return false; } - simTrace("script:%s, connect taosd successed, auth:%p", script->fileName, script->auth); + simDebug("script:%s, connect taosd successed, auth:%p", script->fileName, script->auth); return true; } @@ -592,10 +592,10 @@ bool simCreateNativeConnect(SScript *script, char *user, char *pass) { for (int attempt = 0; attempt < 10; ++attempt) { taos = taos_connect(NULL, user, pass, NULL, tsDnodeShellPort); if (taos == NULL) { - simTrace("script:%s, user:%s connect taosd failed:%s, attempt:%d", script->fileName, user, taos_errstr(NULL), attempt); + simDebug("script:%s, user:%s connect taosd failed:%s, attempt:%d", script->fileName, user, taos_errstr(NULL), attempt); taosMsleep(1000); } else { - simTrace("script:%s, user:%s connect taosd successed, attempt:%d", script->fileName, user, attempt); + simDebug("script:%s, user:%s connect taosd successed, attempt:%d", script->fileName, user, attempt); break; } } @@ -606,7 +606,7 @@ bool simCreateNativeConnect(SScript *script, char *user, char *pass) { } script->taos = taos; - simTrace("script:%s, connect taosd successed, taos:%p", script->fileName, taos); + simDebug("script:%s, connect taosd successed, taos:%p", script->fileName, taos); return true; } @@ -643,11 +643,11 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) { ret = taos_errno(pSql); if (ret == TSDB_CODE_MND_TABLE_ALREADY_EXIST || ret == TSDB_CODE_MND_DB_ALREADY_EXIST) { - simTrace("script:%s, taos:%p, %s success, ret:%d:%s", script->fileName, script->taos, rest, ret, tstrerror(ret)); + simDebug("script:%s, taos:%p, %s success, ret:%d:%s", script->fileName, script->taos, rest, ret, tstrerror(ret)); ret = 0; break; } else if (ret != 0) { - simTrace("script:%s, taos:%p, %s failed, ret:%d:%s, error:%s", + simDebug("script:%s, taos:%p, %s failed, ret:%d:%s, error:%s", script->fileName, script->taos, rest, ret, tstrerror(ret), taos_errstr(pSql)); if (line->errorJump == SQL_JUMP_TRUE) { @@ -672,7 +672,7 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) { int num_fields = taos_field_count(pSql); if (num_fields != 0) { if (pSql == NULL) { - simTrace("script:%s, taos:%p, %s failed, result is null", script->fileName, script->taos, rest); + simDebug("script:%s, taos:%p, %s failed, result is null", script->fileName, script->taos, rest); if (line->errorJump == SQL_JUMP_TRUE) { script->linePos = line->jump; return true; @@ -794,11 +794,11 @@ bool simExecuteRestFulSqlCommand(SScript *script, char *rest) { ret = simExecuteRestFulCommand(script, command); if (ret == TSDB_CODE_MND_TABLE_ALREADY_EXIST || ret == TSDB_CODE_MND_DB_ALREADY_EXIST) { - simTrace("script:%s, taos:%p, %s success, ret:%d:%s", script->fileName, script->taos, rest, ret, tstrerror(ret)); + simDebug("script:%s, taos:%p, %s success, ret:%d:%s", script->fileName, script->taos, rest, ret, tstrerror(ret)); ret = 0; break; } else if (ret != 0) { - simTrace("script:%s, taos:%p, %s failed, ret:%d", + simDebug("script:%s, taos:%p, %s failed, ret:%d", script->fileName, script->taos, rest, ret); if (line->errorJump == SQL_JUMP_TRUE) { @@ -827,7 +827,7 @@ bool simExecuteSqlImpCmd(SScript *script, char *rest, bool isSlow) { simVisuallizeOption(script, rest, buf); rest = buf; - simTrace("script:%s, exec:%s", script->fileName, rest); + simDebug("script:%s, exec:%s", script->fileName, rest); strcpy(script->rows, "-1"); for (int row = 0; row < MAX_QUERY_ROW_NUM; ++row) { for (int col = 0; col < MAX_QUERY_COL_NUM; ++col) { @@ -883,7 +883,7 @@ bool simExecuteSqlErrorCmd(SScript *script, char *rest) { simVisuallizeOption(script, rest, buf); rest = buf; - simTrace("script:%s, exec:%s", script->fileName, rest); + simDebug("script:%s, exec:%s", script->fileName, rest); strcpy(script->rows, "-1"); for (int row = 0; row < MAX_QUERY_ROW_NUM; ++row) { for (int col = 0; col < MAX_QUERY_COL_NUM; ++col) { @@ -929,7 +929,7 @@ bool simExecuteSqlErrorCmd(SScript *script, char *rest) { } if (ret != TSDB_CODE_SUCCESS) { - simTrace("script:%s, taos:%p, %s execute, expect failed, so success, ret:%d:%s", + simDebug("script:%s, taos:%p, %s execute, expect failed, so success, ret:%d:%s", script->fileName, script->taos, rest, ret, tstrerror(ret)); script->linePos++; return true; diff --git a/tests/tsim/src/simMain.c b/tests/tsim/src/simMain.c index f016e36d41..5b2fc87307 100644 --- a/tests/tsim/src/simMain.c +++ b/tests/tsim/src/simMain.c @@ -49,7 +49,7 @@ int main(int argc, char *argv[]) { exit(1); } - simPrint("simulator is running ..."); + simInfo("simulator is running ..."); signal(SIGINT, simHandleSignal); SScript *script = simParseScript(scriptFile); diff --git a/tests/tsim/src/simSystem.c b/tests/tsim/src/simSystem.c index 3acfebb9bd..b50c853ea8 100644 --- a/tests/tsim/src/simSystem.c +++ b/tests/tsim/src/simSystem.c @@ -71,7 +71,7 @@ char *simParseHostName(char *varName) { } sprintf(hostName, "'%s:%d'", simHostName, port); - //simPrint("hostName:%s", hostName); + //simInfo("hostName:%s", hostName); return hostName; } @@ -102,20 +102,20 @@ void simFreeScript(SScript *script) { SScript *simProcessCallOver(SScript *script) { if (script->type == SIM_SCRIPT_TYPE_MAIN) { if (script->killed) { - simPrint("script:" FAILED_PREFIX "%s" FAILED_POSTFIX ", " FAILED_PREFIX + simInfo("script:" FAILED_PREFIX "%s" FAILED_POSTFIX ", " FAILED_PREFIX "failed" FAILED_POSTFIX ", error:%s", script->fileName, script->error); exit(-1); } else { - simPrint("script:" SUCCESS_PREFIX "%s" SUCCESS_POSTFIX ", " SUCCESS_PREFIX + simInfo("script:" SUCCESS_PREFIX "%s" SUCCESS_POSTFIX ", " SUCCESS_PREFIX "success" SUCCESS_POSTFIX, script->fileName); simCloseTaosdConnect(script); simScriptSucced++; simScriptPos--; if (simScriptPos == -1) { - simPrint("----------------------------------------------------------------------"); - simPrint("Simulation Test Done, " SUCCESS_PREFIX "%d" SUCCESS_POSTFIX " Passed:\n", simScriptSucced); + simInfo("----------------------------------------------------------------------"); + simInfo("Simulation Test Done, " SUCCESS_PREFIX "%d" SUCCESS_POSTFIX " Passed:\n", simScriptSucced); exit(0); } @@ -123,7 +123,7 @@ SScript *simProcessCallOver(SScript *script) { return simScriptList[simScriptPos]; } } else { - simPrint("script:%s, is stopped by main script", script->fileName); + simInfo("script:%s, is stopped by main script", script->fileName); simFreeScript(script); return NULL; } @@ -143,7 +143,7 @@ void *simExecuteScript(void *inputScript) { } else { SCmdLine *line = &script->lines[script->linePos]; char *option = script->optionBuffer + line->optionOffset; - simTrace("script:%s, line:%d with option \"%s\"", script->fileName, line->lineNum, option); + simDebug("script:%s, line:%d with option \"%s\"", script->fileName, line->lineNum, option); SCommand *cmd = &simCmdList[line->cmdno]; int ret = (*(cmd->executeCmd))(script, option);