diff --git a/.travis.yml b/.travis.yml index 2a0aa6372b..4d7a809e29 100644 --- a/.travis.yml +++ b/.travis.yml @@ -13,7 +13,7 @@ branches: matrix: - os: linux - dist: bionic + dist: focal language: c git: @@ -28,8 +28,6 @@ matrix: - build-essential - cmake - net-tools - - python-pip - - python-setuptools - python3-pip - python3-setuptools - valgrind @@ -54,13 +52,19 @@ matrix: cd ${TRAVIS_BUILD_DIR}/debug make install > /dev/null || travis_terminate $? - pip install numpy - pip install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python2/ pip3 install numpy pip3 install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python3/ cd ${TRAVIS_BUILD_DIR}/tests ./test-all.sh smoke || travis_terminate $? + sleep 1 + + cd ${TRAVIS_BUILD_DIR}/tests/pytest + pkill -TERM -x taosd + fuser -k -n tcp 6030 + sleep 1 + ./crash_gen.sh -a -p -t 4 -s 25|| travis_terminate $? + sleep 1 cd ${TRAVIS_BUILD_DIR}/tests/pytest ./valgrind-test.sh 2>&1 > mem-error-out.log @@ -160,7 +164,7 @@ matrix: script: - cmake .. > /dev/null - - make > /dev/null + - make - os: linux dist: bionic diff --git a/documentation/webdocs/markdowndocs/TAOS SQL-ch.md b/documentation/webdocs/markdowndocs/TAOS SQL-ch.md index 0be39ab8e4..343ce80422 100644 --- a/documentation/webdocs/markdowndocs/TAOS SQL-ch.md +++ b/documentation/webdocs/markdowndocs/TAOS SQL-ch.md @@ -480,9 +480,9 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 - **LEASTSQUARES** ```mysql - SELECT LEASTSQUARES(field_name) FROM tb_name [WHERE clause] + SELECT LEASTSQUARES(field_name, start_val, step_val) FROM tb_name [WHERE clause] ``` - 功能说明:统计表中某列的值是主键(时间戳)的拟合直线方程。 + 功能说明:统计表中某列的值是主键(时间戳)的拟合直线方程。start_val是自变量初始值,step_val是自变量的步长值。 返回结果数据类型:字符串表达式(斜率, 截距)。 应用字段:不能应用在timestamp、binary、nchar、bool类型字段。 说明:自变量是时间戳,因变量是该列的值。 diff --git a/documentation/webdocs/markdowndocs/TAOS SQL.md b/documentation/webdocs/markdowndocs/TAOS SQL.md index 72e41dbec4..c0d35e9afc 100644 --- a/documentation/webdocs/markdowndocs/TAOS SQL.md +++ b/documentation/webdocs/markdowndocs/TAOS SQL.md @@ -412,7 +412,7 @@ TDengine supports aggregations over numerical values, they are listed below: SELECT PERCENTILE(field_name, P) FROM { tb_name | stb_name } [WHERE clause] ``` Function: the value of the specified column below which `P` percent of the data points fall. - Return Data Type: the same data type. + Return Data Type: double. Applicable Data Types: all types except `timestamp`, `binary`, `nchar`, `bool`. Applied to: table/STable. Note: The range of `P` is `[0, 100]`. When `P=0` , `PERCENTILE` returns the equal value as `MIN`; when `P=100`, `PERCENTILE` returns the equal value as `MAX`. @@ -446,7 +446,7 @@ TDengine supports aggregations over numerical values, they are listed below: SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause] ``` Function: return the difference between the maximum and the mimimum value. - Return Data Type: the same data type. + Return Data Type: double. Applicable Data Types: all types except `timestamp`, `binary`, `nchar`, `bool`. Applied to: table/STable. Note: spread gives the range of data variation in a table/supertable; it is equivalent to `MAX()` - `MIN()` diff --git a/importSampleData/app/main.go b/importSampleData/app/main.go index d714fc339c..6996047026 100644 --- a/importSampleData/app/main.go +++ b/importSampleData/app/main.go @@ -18,7 +18,7 @@ import ( "sync" "time" - _ "github.com/taosdata/TDengine/src/connector/go/taosSql" + _ "github.com/taosdata/driver-go/taosSql" ) const ( diff --git a/src/client/inc/tscLog.h b/src/client/inc/tscLog.h index c395951742..94adcfe17a 100644 --- a/src/client/inc/tscLog.h +++ b/src/client/inc/tscLog.h @@ -31,9 +31,7 @@ extern int32_t tscEmbedded; #define tscInfo(...) { if (cDebugFlag & DEBUG_INFO) { taosPrintLog("TSC INFO ", tscEmbedded ? 255 : cDebugFlag, __VA_ARGS__); }} #define tscDebug(...) { if (cDebugFlag & DEBUG_DEBUG) { taosPrintLog("TSC DEBUG ", cDebugFlag, __VA_ARGS__); }} #define tscTrace(...) { if (cDebugFlag & DEBUG_TRACE) { taosPrintLog("TSC TRACE ", cDebugFlag, __VA_ARGS__); }} - -#define tscDebugDump(...) { if (cDebugFlag & DEBUG_DEBUG) { taosPrintLongString("TSC DEBUG ", cDebugFlag, __VA_ARGS__); }} -#define tscTraceDump(...) { if (cDebugFlag & DEBUG_TRACE) { taosPrintLongString("TSC TRACE ", cDebugFlag, __VA_ARGS__); }} +#define tscDebugL(...){ if (cDebugFlag & DEBUG_DEBUG) { taosPrintLongString("TSC DEBUG ", cDebugFlag, __VA_ARGS__); }} #ifdef __cplusplus } diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index f687d7f244..4992692109 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -87,6 +87,16 @@ typedef struct SVgroupTableInfo { SArray* itemList; //SArray } SVgroupTableInfo; +static FORCE_INLINE SQueryInfo* tscGetQueryInfoDetail(SSqlCmd* pCmd, int32_t subClauseIndex) { + assert(pCmd != NULL && subClauseIndex >= 0 && subClauseIndex < TSDB_MAX_UNION_CLAUSE); + + if (pCmd->pQueryInfo == NULL || subClauseIndex >= pCmd->numOfClause) { + return NULL; + } + + return pCmd->pQueryInfo[subClauseIndex]; +} + int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOffset, const char* name, STableMeta* pTableMeta, STableDataBlocks** dataBlocks); void tscDestroyDataBlock(STableDataBlocks* pDataBlock); diff --git a/src/client/inc/tschemautil.h b/src/client/inc/tschemautil.h index 99ffa4e766..67942ad42a 100644 --- a/src/client/inc/tschemautil.h +++ b/src/client/inc/tschemautil.h @@ -110,8 +110,6 @@ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg, size_t* size //todo tags value as well as the table id structure needs refactor char *tsGetTagsValue(STableMeta *pMeta); -void extractTableNameFromToken(SSQLToken *pToken, SSQLToken* pTable); - #ifdef __cplusplus } #endif diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index c8754e5beb..a398ad659e 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -320,6 +320,8 @@ typedef struct SSqlStream { SSqlObj *pSql; uint32_t streamId; char listed; + bool isProject; + int16_t precision; int64_t num; // number of computing count /* @@ -334,7 +336,6 @@ typedef struct SSqlStream { int64_t etime; // stream end query time, when time is larger then etime, the stream will be closed int64_t interval; int64_t slidingTime; - int16_t precision; void * pTimer; void (*fp)(); diff --git a/src/client/src/TSDBJNIConnector.c b/src/client/src/TSDBJNIConnector.c index d070fad11b..eb9b1cb479 100644 --- a/src/client/src/TSDBJNIConnector.c +++ b/src/client/src/TSDBJNIConnector.c @@ -29,9 +29,6 @@ #define jniDebug(...) { if (jniDebugFlag & DEBUG_DEBUG) { taosPrintLog("JNI DEBUG ", jniDebugFlag, __VA_ARGS__); }} #define jniTrace(...) { if (jniDebugFlag & DEBUG_TRACE) { taosPrintLog("JNI TRACE ", jniDebugFlag, __VA_ARGS__); }} -#define jniDebugDump(...) { if (jniDebugFlag & DEBUG_DEBUG) { taosPrintLongString("JNI DEBUG ", jniDebugFlag, __VA_ARGS__); }} -#define jniTraceDump(...) { if (jniDebugFlag & DEBUG_TRACE) { taosPrintLongString("JNI DEBUG ", jniDebugFlag, __VA_ARGS__); }} - int __init = 0; JavaVM *g_vm = NULL; diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c index 3fed3e4d67..41464aa660 100644 --- a/src/client/src/tscAsync.c +++ b/src/client/src/tscAsync.c @@ -55,7 +55,7 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, void (*fp)(), void* param, const strtolower(pSql->sqlstr, sqlstr); - tscDebugDump("%p SQL: %s", pSql, pSql->sqlstr); + tscDebugL("%p SQL: %s", pSql, pSql->sqlstr); pSql->cmd.curSql = pSql->sqlstr; int32_t code = tsParseSql(pSql, true); @@ -471,13 +471,19 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { } // in case of insert, redo parsing the sql string and build new submit data block for two reasons: - // 1. the table Id(tid & uid) may have been update, the submit block needs to be updated + // 1. the table Id(tid & uid) may have been update, the submit block needs to be updated accordingly. // 2. vnode may need the schema information along with submit block to update its local table schema. if (pCmd->command == TSDB_SQL_INSERT) { tscDebug("%p redo parse sql string to build submit block", pSql); pCmd->parseFinished = false; - if ((code = tsParseSql(pSql, true)) == TSDB_CODE_SUCCESS) { + code = tsParseSql(pSql, true); + + if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { + return; + } + + if (code == TSDB_CODE_SUCCESS) { /* * Discard previous built submit blocks, and then parse the sql string again and build up all submit blocks, * and send the required submit block according to index value in supporter to server. diff --git a/src/client/src/tscFunctionImpl.c b/src/client/src/tscFunctionImpl.c index afdad05b43..909338aa4a 100644 --- a/src/client/src/tscFunctionImpl.c +++ b/src/client/src/tscFunctionImpl.c @@ -14,7 +14,6 @@ */ #include "os.h" -#include "qast.h" #include "qextbuffer.h" #include "qfill.h" #include "qhistogram.h" @@ -23,6 +22,7 @@ #include "qtsbuf.h" #include "taosdef.h" #include "taosmsg.h" +#include "qast.h" #include "tscLog.h" #include "tscSubquery.h" #include "tscompression.h" @@ -340,13 +340,12 @@ bool stableQueryFunctChanged(int32_t funcId) { */ void resetResultInfo(SResultInfo *pResInfo) { pResInfo->initialized = false; } -void setResultInfoBuf(SResultInfo *pResInfo, int32_t size, bool superTable) { +void setResultInfoBuf(SResultInfo *pResInfo, int32_t size, bool superTable, char* buf) { assert(pResInfo->interResultBuf == NULL); pResInfo->bufLen = size; pResInfo->superTableQ = superTable; - - pResInfo->interResultBuf = calloc(1, (size_t)size); + pResInfo->interResultBuf = buf; } // set the query flag to denote that query is completed diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c index 1d66fb0467..83700ce0a5 100644 --- a/src/client/src/tscLocal.c +++ b/src/client/src/tscLocal.c @@ -406,7 +406,7 @@ int tscProcessLocalCmd(SSqlObj *pSql) { pSql->res.qhandle = 0x1; pSql->res.numOfRows = 0; } else if (pCmd->command == TSDB_SQL_RESET_CACHE) { - taosCacheEmpty(tscCacheHandle,false); + taosCacheEmpty(tscCacheHandle); } else if (pCmd->command == TSDB_SQL_SERV_VERSION) { tscProcessServerVer(pSql); } else if (pCmd->command == TSDB_SQL_CLI_VERSION) { diff --git a/src/client/src/tscLocalMerge.c b/src/client/src/tscLocalMerge.c index 8919d872a6..80fc82d90b 100644 --- a/src/client/src/tscLocalMerge.c +++ b/src/client/src/tscLocalMerge.c @@ -274,6 +274,10 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd pReducer->numOfBuffer = idx; SCompareParam *param = malloc(sizeof(SCompareParam)); + if (param == NULL) { + tfree(pReducer); + return; + } param->pLocalData = pReducer->pLocalDataSrc; param->pDesc = pReducer->pDesc; param->num = pReducer->pLocalDataSrc[0]->pMemBuffer->numOfElemsPerPage; @@ -284,6 +288,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd pRes->code = tLoserTreeCreate(&pReducer->pLoserTree, pReducer->numOfBuffer, param, treeComparator); if (pReducer->pLoserTree == NULL || pRes->code != 0) { + tfree(param); tfree(pReducer); return; } @@ -332,6 +337,8 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd tfree(pReducer->pResultBuf); tfree(pReducer->pFinalRes); tfree(pReducer->prevRowOfInput); + tfree(pReducer->pLoserTree); + tfree(param); tfree(pReducer); pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; return; @@ -364,7 +371,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd TSKEY stime = MIN(pQueryInfo->window.skey, pQueryInfo->window.ekey); int64_t revisedSTime = - taosGetIntervalStartTimestamp(stime, pQueryInfo->intervalTime, pQueryInfo->slidingTimeUnit, tinfo.precision); + taosGetIntervalStartTimestamp(stime, pQueryInfo->slidingTime, pQueryInfo->intervalTime, pQueryInfo->slidingTimeUnit, tinfo.precision); if (pQueryInfo->fillType != TSDB_FILL_NONE) { SFillColInfo* pFillCol = createFillColInfo(pQueryInfo); @@ -831,7 +838,7 @@ void savePrevRecordAndSetupInterpoInfo(SLocalReducer *pLocalReducer, SQueryInfo if (pFillInfo != NULL) { int64_t stime = (pQueryInfo->window.skey < pQueryInfo->window.ekey) ? pQueryInfo->window.skey : pQueryInfo->window.ekey; int64_t revisedSTime = - taosGetIntervalStartTimestamp(stime, pQueryInfo->slidingTime, pQueryInfo->slidingTimeUnit, tinfo.precision); + taosGetIntervalStartTimestamp(stime, pQueryInfo->slidingTime, pQueryInfo->intervalTime, pQueryInfo->slidingTimeUnit, tinfo.precision); taosResetFillInfo(pFillInfo, revisedSTime); } @@ -1301,9 +1308,7 @@ static void resetEnvForNewResultset(SSqlRes *pRes, SSqlCmd *pCmd, SLocalReducer if (pQueryInfo->fillType != TSDB_FILL_NONE) { TSKEY skey = MIN(pQueryInfo->window.skey, pQueryInfo->window.ekey); int64_t newTime = - taosGetIntervalStartTimestamp(skey, pQueryInfo->intervalTime, pQueryInfo->slidingTimeUnit, precision); -// taosResetFillInfo(pLocalReducer->pFillInfo, pQueryInfo->order.order, newTime, -// pQueryInfo->groupbyExpr.numOfGroupCols, 4096, 0, NULL, pLocalReducer->rowSize); + taosGetIntervalStartTimestamp(skey, pQueryInfo->slidingTime, pQueryInfo->intervalTime, pQueryInfo->slidingTimeUnit, precision); taosResetFillInfo(pLocalReducer->pFillInfo, newTime); } } diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index 4400ca2c25..95098bbab1 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -1310,6 +1310,11 @@ int tsParseSql(SSqlObj *pSql, bool initial) { tscDebug("%p resume to parse sql: %s", pSql, pCmd->curSql); } + ret = tscAllocPayload(&pSql->cmd, TSDB_DEFAULT_PAYLOAD_SIZE); + if (TSDB_CODE_SUCCESS != ret) { + return ret; + } + if (tscIsInsertData(pSql->sqlstr)) { /* * Set the fp before parse the sql string, in case of getTableMeta failed, in which @@ -1326,11 +1331,6 @@ int tsParseSql(SSqlObj *pSql, bool initial) { ret = tsParseInsertSql(pSql); } else { - ret = tscAllocPayload(&pSql->cmd, TSDB_DEFAULT_PAYLOAD_SIZE); - if (TSDB_CODE_SUCCESS != ret) { - return ret; - } - SSqlInfo SQLInfo = qSQLParse(pSql->sqlstr); ret = tscToSQLCmd(pSql, &SQLInfo); SQLInfoDestroy(&SQLInfo); diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index 22b6be1c57..9f0d1a26ab 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -538,7 +538,7 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) { pRes->numOfRows = 1; strtolower(pSql->sqlstr, sql); - tscDebugDump("%p SQL: %s", pSql, pSql->sqlstr); + tscDebugL("%p SQL: %s", pSql, pSql->sqlstr); if (tscIsInsertData(pSql->sqlstr)) { pStmt->isInsert = true; diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 2b325afa7c..65e2c976e0 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -18,19 +18,19 @@ #define _DEFAULT_SOURCE #include "os.h" -#include "qast.h" #include "taos.h" #include "taosmsg.h" -#include "tstoken.h" -#include "tstrbuild.h" -#include "ttime.h" +#include "qast.h" +#include "tcompare.h" +#include "tname.h" #include "tscLog.h" #include "tscUtil.h" #include "tschemautil.h" #include "tsclient.h" +#include "tstoken.h" +#include "tstrbuild.h" +#include "ttime.h" #include "ttokendef.h" -#include "tname.h" -#include "tcompare.h" #define DEFAULT_PRIMARY_TIMESTAMP_COL_NAME "_c0" @@ -90,6 +90,7 @@ static int32_t validateSqlFunctionInStreamSql(SSqlCmd* pCmd, SQueryInfo* pQueryI static int32_t buildArithmeticExprString(tSQLExpr* pExpr, char** exprString); static int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo); static int32_t validateArithmeticSQLExpr(SSqlCmd* pCmd, tSQLExpr* pExpr, SQueryInfo* pQueryInfo, SColumnList* pList, int32_t* type); +static int32_t validateEp(char* ep); static int32_t validateDNodeConfig(tDCLSQL* pOptions); static int32_t validateLocalConfig(tDCLSQL* pOptions); static int32_t validateColumnName(char* name); @@ -359,6 +360,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { case TSDB_SQL_CFG_DNODE: { const char* msg2 = "invalid configure options or values"; + const char* msg3 = "invalid dnode ep"; /* validate the ip address */ tDCLSQL* pDCL = pInfo->pDCLInfo; @@ -375,6 +377,10 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { strncpy(pCfg->ep, pDCL->a[0].z, pDCL->a[0].n); + if (validateEp(pCfg->ep) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); + } + strncpy(pCfg->config, pDCL->a[1].z, pDCL->a[1].n); if (pDCL->nTokens == 3) { @@ -654,11 +660,14 @@ int32_t parseIntervalClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQ int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) { const char* msg0 = "sliding value too small"; const char* msg1 = "sliding value no larger than the interval value"; + const char* msg2 = "sliding value can not less than 1% of interval value"; + + const static int32_t INTERVAL_SLIDING_FACTOR = 100; STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); - SSQLToken* pSliding = &pQuerySql->sliding; STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta); + SSQLToken* pSliding = &pQuerySql->sliding; if (pSliding->n != 0) { getTimestampInUsFromStr(pSliding->z, pSliding->n, &pQueryInfo->slidingTime); if (tinfo.precision == TSDB_TIME_PRECISION_MILLI) { @@ -676,6 +685,10 @@ int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQu pQueryInfo->slidingTime = pQueryInfo->intervalTime; } + if ((pQueryInfo->intervalTime != 0) && (pQueryInfo->intervalTime/pQueryInfo->slidingTime > INTERVAL_SLIDING_FACTOR)) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); + } + return TSDB_CODE_SUCCESS; } @@ -4487,10 +4500,12 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { SUpdateTableTagValMsg* pUpdateMsg = (SUpdateTableTagValMsg*) pCmd->payload; pUpdateMsg->head.vgId = htonl(pTableMeta->vgroupInfo.vgId); - pUpdateMsg->tid = htonl(pTableMeta->sid); - pUpdateMsg->uid = htobe64(pTableMeta->uid); - pUpdateMsg->colId = htons(pTagsSchema->colId); - pUpdateMsg->tversion = htons(pTableMeta->tversion); + pUpdateMsg->tid = htonl(pTableMeta->sid); + pUpdateMsg->uid = htobe64(pTableMeta->uid); + pUpdateMsg->colId = htons(pTagsSchema->colId); + pUpdateMsg->type = pTagsSchema->type; + pUpdateMsg->bytes = htons(pTagsSchema->bytes); + pUpdateMsg->tversion = htons(pTableMeta->tversion); pUpdateMsg->numOfTags = htons(numOfTags); pUpdateMsg->schemaLen = htonl(schemaLen); @@ -4627,6 +4642,24 @@ typedef struct SDNodeDynConfOption { int32_t len; // name string length } SDNodeDynConfOption; + +int32_t validateEp(char* ep) { + char buf[TSDB_EP_LEN + 1] = {0}; + tstrncpy(buf, ep, TSDB_EP_LEN); + + char *pos = strchr(buf, ':'); + if (NULL == pos) { + return TSDB_CODE_TSC_INVALID_SQL; + } + + uint16_t port = atoi(pos+1); + if (0 == port) { + return TSDB_CODE_TSC_INVALID_SQL; + } + + return TSDB_CODE_SUCCESS; +} + int32_t validateDNodeConfig(tDCLSQL* pOptions) { if (pOptions->nTokens < 2 || pOptions->nTokens > 3) { return TSDB_CODE_TSC_INVALID_SQL; @@ -6094,16 +6127,12 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSQLExpr* pS } } - + // NOTE: binary|nchar data allows the >|< type filter if ((*pExpr)->_node.optr != TSDB_RELATION_EQUAL && (*pExpr)->_node.optr != TSDB_RELATION_NOT_EQUAL) { if (pRight->nodeType == TSQL_NODE_VALUE) { if (pRight->pVal->nType == TSDB_DATA_TYPE_BOOL) { return TSDB_CODE_TSC_INVALID_SQL; } - if ((pRight->pVal->nType == TSDB_DATA_TYPE_BINARY || pRight->pVal->nType == TSDB_DATA_TYPE_NCHAR) - && (*pExpr)->_node.optr != TSDB_RELATION_LIKE) { - return TSDB_CODE_TSC_INVALID_SQL; - } } } } diff --git a/src/client/src/tscSchemaUtil.c b/src/client/src/tscSchemaUtil.c index da06e3e5e2..934a562387 100644 --- a/src/client/src/tscSchemaUtil.c +++ b/src/client/src/tscSchemaUtil.c @@ -215,25 +215,3 @@ __attribute__ ((unused)) static FORCE_INLINE size_t copy(char* dst, const char* return len; } -/* - * tablePrefix.columnName - * extract table name and save it in pTable, with only column name in pToken - */ -void extractTableNameFromToken(SSQLToken* pToken, SSQLToken* pTable) { - const char sep = TS_PATH_DELIMITER[0]; - - if (pToken == pTable || pToken == NULL || pTable == NULL) { - return; - } - - char* r = strnchr(pToken->z, sep, pToken->n, false); - - if (r != NULL) { // record the table name token - pTable->n = r - pToken->z; - pTable->z = pToken->z; - - r += 1; - pToken->n -= (r - pToken->z); - pToken->z = r; - } -} diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index d73983e77c..88fcc3828e 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -247,7 +247,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcIpSet *pIpSet) { } else { STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0); if (rpcMsg->code == TSDB_CODE_TDB_INVALID_TABLE_ID || rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID || - rpcMsg->code == TSDB_CODE_RPC_NETWORK_UNAVAIL) { + rpcMsg->code == TSDB_CODE_RPC_NETWORK_UNAVAIL || rpcMsg->code == TSDB_CODE_TDB_TABLE_RECONFIGURE) { if (pCmd->command == TSDB_SQL_CONNECT) { rpcMsg->code = TSDB_CODE_RPC_NETWORK_UNAVAIL; rpcFreeCont(rpcMsg->pCont); @@ -260,7 +260,12 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcIpSet *pIpSet) { // get table meta query will not retry, do nothing } else { tscWarn("%p it shall renew table meta, code:%s, retry:%d", pSql, tstrerror(rpcMsg->code), ++pSql->retry); - + + // set the flag to denote that sql string needs to be re-parsed and build submit block with table schema + if (rpcMsg->code == TSDB_CODE_TDB_TABLE_RECONFIGURE) { + pSql->cmd.submitSchema = 1; + } + pSql->res.code = rpcMsg->code; // keep the previous error code if (pSql->retry > pSql->maxRetry) { tscError("%p max retry %d reached, give up", pSql, pSql->maxRetry); @@ -433,8 +438,9 @@ void tscKillSTableQuery(SSqlObj *pSql) { * here, we cannot set the command = TSDB_SQL_KILL_QUERY. Otherwise, it may cause * sub-queries not correctly released and master sql object of super table query reaches an abnormal state. */ - pSql->pSubs[i]->res.code = TSDB_CODE_TSC_QUERY_CANCELLED; - rpcCancelRequest(pSql->pSubs[i]->pRpcCtx); + rpcCancelRequest(pSub->pRpcCtx); + pSub->res.code = TSDB_CODE_TSC_QUERY_CANCELLED; + tscQueueAsyncRes(pSub); } /* @@ -1950,7 +1956,7 @@ int tscProcessUseDbRsp(SSqlObj *pSql) { } int tscProcessDropDbRsp(SSqlObj *UNUSED_PARAM(pSql)) { - taosCacheEmpty(tscCacheHandle, false); + taosCacheEmpty(tscCacheHandle); return 0; } @@ -1996,7 +2002,7 @@ int tscProcessAlterTableMsgRsp(SSqlObj *pSql) { if (isSuperTable) { // if it is a super table, reset whole query cache tscDebug("%p reset query cache since table:%s is stable", pSql, pTableMetaInfo->name); - taosCacheEmpty(tscCacheHandle, false); + taosCacheEmpty(tscCacheHandle); } } diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index 8fbc1f0109..f9f93b3f89 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -617,19 +617,18 @@ void taos_stop_query(TAOS_RES *res) { if (pSql->signature != pSql) return; tscDebug("%p start to cancel query", res); - pSql->res.code = TSDB_CODE_TSC_QUERY_CANCELLED; SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); if (tscIsTwoStageSTableQuery(pQueryInfo, 0)) { tscKillSTableQuery(pSql); - return; } - if (pSql->cmd.command >= TSDB_SQL_LOCAL) { - return; + if (pSql->cmd.command < TSDB_SQL_LOCAL) { + rpcCancelRequest(pSql->pRpcCtx); } + pSql->res.code = TSDB_CODE_TSC_QUERY_CANCELLED; + tscQueueAsyncRes(pSql); - rpcCancelRequest(pSql->pRpcCtx); tscDebug("%p query is cancelled", res); } diff --git a/src/client/src/tscStream.c b/src/client/src/tscStream.c index a6d111dbc7..6cc27a4cfe 100644 --- a/src/client/src/tscStream.c +++ b/src/client/src/tscStream.c @@ -71,6 +71,7 @@ static void tscProcessStreamLaunchQuery(SSchedMsg *pMsg) { pSql->fp = tscProcessStreamQueryCallback; pSql->param = pStream; + pSql->res.completed = false; SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); @@ -86,7 +87,7 @@ static void tscProcessStreamLaunchQuery(SSchedMsg *pMsg) { // failed to get meter/metric meta, retry in 10sec. if (code != TSDB_CODE_SUCCESS) { int64_t retryDelayTime = tscGetRetryDelayTime(pStream->slidingTime, pStream->precision); - tscError("%p stream:%p,get metermeta failed, retry in %" PRId64 "ms", pStream->pSql, pStream, retryDelayTime); + tscDebug("%p stream:%p,get metermeta failed, retry in %" PRId64 "ms", pStream->pSql, pStream, retryDelayTime); tscSetRetryTimer(pStream, pSql, retryDelayTime); } else { @@ -108,7 +109,7 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) { SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); tscDebug("%p add into timer", pSql); - if (isProjectStream(pQueryInfo)) { + if (pStream->isProject) { /* * pQueryInfo->window.ekey, which is the start time, does not change in case of * repeat first execution, once the first execution failed. @@ -121,7 +122,19 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) { } } else { pQueryInfo->window.skey = pStream->stime - pStream->interval; - pQueryInfo->window.ekey = pStream->stime - 1; + int64_t etime = taosGetTimestamp(pStream->precision); + // delay to wait all data in last time window + if (pStream->precision == TSDB_TIME_PRECISION_MICRO) { + etime -= tsMaxStreamComputDelay * 1000l; + } else { + etime -= tsMaxStreamComputDelay; + } + if (etime > pStream->etime) { + etime = pStream->etime; + } else { + etime = pStream->stime + (etime - pStream->stime) / pStream->interval * pStream->interval; + } + pQueryInfo->window.ekey = etime; } // launch stream computing in a new thread @@ -137,7 +150,7 @@ static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOf SSqlStream *pStream = (SSqlStream *)param; if (tres == NULL || numOfRows < 0) { int64_t retryDelay = tscGetRetryDelayTime(pStream->slidingTime, pStream->precision); - tscError("%p stream:%p, query data failed, code:%d, retry in %" PRId64 "ms", pStream->pSql, pStream, numOfRows, + tscError("%p stream:%p, query data failed, code:0x%08x, retry in %" PRId64 "ms", pStream->pSql, pStream, numOfRows, retryDelay); STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pStream->pSql->cmd, 0, 0); @@ -151,17 +164,45 @@ static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOf taos_fetch_rows_a(tres, tscProcessStreamRetrieveResult, param); } -static void tscSetTimestampForRes(SSqlStream *pStream, SSqlObj *pSql) { - SSqlRes *pRes = &pSql->res; - - int64_t timestamp = *(int64_t *)pRes->data; - int64_t actualTimestamp = pStream->stime - pStream->interval; - - if (timestamp != actualTimestamp) { - // reset the timestamp of each agg point by using start time of each interval - *((int64_t *)pRes->data) = actualTimestamp; - tscWarn("%p stream:%p, timestamp of points is:%" PRId64 ", reset to %" PRId64, pSql, pStream, timestamp, actualTimestamp); +// no need to be called as this is alreay done in the query +static void tscStreamFillTimeGap(SSqlStream* pStream, TSKEY ts) { +#if 0 + SSqlObj * pSql = pStream->pSql; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + + if (pQueryInfo->fillType != TSDB_FILL_SET_VALUE && pQueryInfo->fillType != TSDB_FILL_NULL) { + return; } + + SSqlRes *pRes = &pSql->res; + /* failed to retrieve any result in this retrieve */ + pSql->res.numOfRows = 1; + void *row[TSDB_MAX_COLUMNS] = {0}; + char tmpRes[TSDB_MAX_BYTES_PER_ROW] = {0}; + void *oldPtr = pSql->res.data; + pSql->res.data = tmpRes; + int32_t rowNum = 0; + + while (pStream->stime + pStream->slidingTime < ts) { + pStream->stime += pStream->slidingTime; + *(TSKEY*)row[0] = pStream->stime; + for (int32_t i = 1; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) { + int16_t offset = tscFieldInfoGetOffset(pQueryInfo, i); + TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i); + assignVal(pSql->res.data + offset, (char *)(&pQueryInfo->fillVal[i]), pField->bytes, pField->type); + row[i] = pSql->res.data + offset; + } + (*pStream->fp)(pStream->param, pSql, row); + ++rowNum; + } + + if (rowNum > 0) { + tscDebug("%p stream:%p %d rows padded", pSql, pStream, rowNum); + } + + pRes->numOfRows = 0; + pRes->data = oldPtr; +#endif } static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOfRows) { @@ -170,7 +211,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf if (pSql == NULL || numOfRows < 0) { int64_t retryDelayTime = tscGetRetryDelayTime(pStream->slidingTime, pStream->precision); - tscError("%p stream:%p, retrieve data failed, code:%d, retry in %" PRId64 "ms", pSql, pStream, numOfRows, retryDelayTime); + tscError("%p stream:%p, retrieve data failed, code:0x%08x, retry in %" PRId64 "ms", pSql, pStream, numOfRows, retryDelayTime); tscSetRetryTimer(pStream, pStream->pSql, retryDelayTime); return; @@ -180,16 +221,11 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf if (numOfRows > 0) { // when reaching here the first execution of stream computing is successful. pStream->numOfRes += numOfRows; - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); - for(int32_t i = 0; i < numOfRows; ++i) { TAOS_ROW row = taos_fetch_row(res); tscDebug("%p stream:%p fetch result", pSql, pStream); - if (isProjectStream(pQueryInfo)) { - pStream->stime = *(TSKEY *)row[0]; - } else { - tscSetTimestampForRes(pStream, pSql); - } + tscStreamFillTimeGap(pStream, *(TSKEY*)row[0]); + pStream->stime = *(TSKEY *)row[0]; // user callback function (*pStream->fp)(pStream->param, res, row); @@ -199,55 +235,18 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf taos_fetch_rows_a(res, tscProcessStreamRetrieveResult, pStream); } else { // numOfRows == 0, all data has been retrieved pStream->useconds += pSql->res.useconds; - - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); - if (pStream->numOfRes == 0) { - if (pQueryInfo->fillType == TSDB_FILL_SET_VALUE || pQueryInfo->fillType == TSDB_FILL_NULL) { - SSqlRes *pRes = &pSql->res; - - /* failed to retrieve any result in this retrieve */ - pSql->res.numOfRows = 1; - void *row[TSDB_MAX_COLUMNS] = {0}; - char tmpRes[TSDB_MAX_BYTES_PER_ROW] = {0}; - - void *oldPtr = pSql->res.data; - pSql->res.data = tmpRes; - - for (int32_t i = 1; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) { - int16_t offset = tscFieldInfoGetOffset(pQueryInfo, i); - TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i); - - assignVal(pSql->res.data + offset, (char *)(&pQueryInfo->fillVal[i]), pField->bytes, pField->type); - row[i] = pSql->res.data + offset; - } - - tscSetTimestampForRes(pStream, pSql); - row[0] = pRes->data; - - // char result[512] = {0}; - // taos_print_row(result, row, pQueryInfo->fieldsInfo.pFields, pQueryInfo->fieldsInfo.numOfOutput); - // tscInfo("%p stream:%p query result: %s", pSql, pStream, result); - tscDebug("%p stream:%p fetch result", pSql, pStream); - - // user callback function - (*pStream->fp)(pStream->param, res, row); - - pRes->numOfRows = 0; - pRes->data = oldPtr; - } else if (isProjectStream(pQueryInfo)) { + if (pStream->isProject) { /* no resuls in the query range, retry */ // todo set retry dynamic time int32_t retry = tsProjectExecInterval; - tscError("%p stream:%p, retrieve no data, code:%d, retry in %" PRId32 "ms", pSql, pStream, numOfRows, retry); + tscError("%p stream:%p, retrieve no data, code:0x%08x, retry in %" PRId32 "ms", pSql, pStream, numOfRows, retry); tscSetRetryTimer(pStream, pStream->pSql, retry); return; } - } else { - if (isProjectStream(pQueryInfo)) { - pStream->stime += 1; - } + } else if (pStream->isProject) { + pStream->stime += 1; } tscDebug("%p stream:%p, query on:%s, fetch result completed, fetched rows:%" PRId64, pSql, pStream, pTableMetaInfo->name, @@ -256,16 +255,18 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf // release the metric/meter meta information reference, so data in cache can be updated taosCacheRelease(tscCacheHandle, (void**)&(pTableMetaInfo->pTableMeta), false); + tscFreeSqlResult(pSql); + tfree(pSql->pSubs); + pSql->numOfSubs = 0; tfree(pTableMetaInfo->vgroupList); tscSetNextLaunchTimer(pStream, pSql); } } static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer) { - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); int64_t delay = getDelayValueAfterTimewindowClosed(pStream, timer); - if (isProjectStream(pQueryInfo)) { + if (pStream->isProject) { int64_t now = taosGetTimestamp(pStream->precision); int64_t etime = now > pStream->etime ? pStream->etime : now; @@ -323,8 +324,7 @@ static int64_t getLaunchTimeDelay(const SSqlStream* pStream) { static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) { int64_t timer = 0; - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); - if (isProjectStream(pQueryInfo)) { + if (pStream->isProject) { /* * for project query, no mater fetch data successfully or not, next launch will issue * more than the sliding time window @@ -342,7 +342,6 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) { return; } } else { - pStream->stime += pStream->slidingTime; if ((pStream->stime - pStream->interval) >= pStream->etime) { tscDebug("%p stream:%p, stime:%" PRId64 " is larger than end time: %" PRId64 ", stop the stream", pStream->pSql, pStream, pStream->stime, pStream->etime); @@ -409,14 +408,16 @@ static void tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) { pStream->slidingTime = pQueryInfo->slidingTime; - pQueryInfo->intervalTime = 0; // clear the interval value to avoid the force time window split by query processor - pQueryInfo->slidingTime = 0; + if (pStream->isProject) { + pQueryInfo->intervalTime = 0; // clear the interval value to avoid the force time window split by query processor + pQueryInfo->slidingTime = 0; + } } static int64_t tscGetStreamStartTimestamp(SSqlObj *pSql, SSqlStream *pStream, int64_t stime) { SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); - if (isProjectStream(pQueryInfo)) { + if (pStream->isProject) { // no data in table, flush all data till now to destination meter, 10sec delay pStream->interval = tsProjectExecInterval; pStream->slidingTime = tsProjectExecInterval; @@ -489,7 +490,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p SSqlStream *pStream = (SSqlStream *)calloc(1, sizeof(SSqlStream)); if (pStream == NULL) { - tscError("%p open stream failed, sql:%s, reason:%s, code:%d", pSql, sqlstr, pCmd->payload, pRes->code); + tscError("%p open stream failed, sql:%s, reason:%s, code:0x%08x", pSql, sqlstr, pCmd->payload, pRes->code); tscFreeSqlObj(pSql); return NULL; } @@ -503,7 +504,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p } strtolower(pSql->sqlstr, sqlstr); - tscDebugDump("%p SQL: %s", pSql, pSql->sqlstr); + tscDebugL("%p SQL: %s", pSql, pSql->sqlstr); tsem_init(&pSql->rspSem, 0, 0); int32_t code = tsParseSql(pSql, true); @@ -514,7 +515,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p if (pRes->code != TSDB_CODE_SUCCESS) { setErrorInfo(pSql, pRes->code, pCmd->payload); - tscError("%p open stream failed, sql:%s, reason:%s, code:%d", pSql, sqlstr, pCmd->payload, pRes->code); + tscError("%p open stream failed, sql:%s, reason:%s, code:0x%08x", pSql, sqlstr, pCmd->payload, pRes->code); tscFreeSqlObj(pSql); return NULL; } @@ -523,6 +524,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta); + pStream->isProject = isProjectStream(pQueryInfo); pStream->fp = fp; pStream->callback = callback; pStream->param = param; @@ -565,6 +567,8 @@ void taos_close_stream(TAOS_STREAM *handle) { taosTmrStopA(&(pStream->pTimer)); tscDebug("%p stream:%p is closed", pSql, pStream); + // notify CQ to release the pStream object + pStream->fp(pStream->param, NULL, NULL); tscFreeSqlObj(pSql); pStream->pSql = NULL; diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 0243d115f0..d3f298c2b2 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -14,12 +14,12 @@ */ #include "os.h" -#include "tscSubquery.h" +#include "qtsbuf.h" #include "qast.h" #include "tcompare.h" -#include "tschemautil.h" -#include "qtsbuf.h" #include "tscLog.h" +#include "tscSubquery.h" +#include "tschemautil.h" #include "tsclient.h" typedef struct SInsertSupporter { @@ -57,10 +57,15 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSupporter* pSupporter1, SJ pSubQueryInfo1->tsBuf = output1; pSubQueryInfo2->tsBuf = output2; + // no result generated, return directly + if (pSupporter1->pTSBuf == NULL || pSupporter2->pTSBuf == NULL) { + tscDebug("%p at least one ts-comp is empty, 0 for secondary query after ts blocks intersecting", pSql); + return 0; + } + tsBufResetPos(pSupporter1->pTSBuf); tsBufResetPos(pSupporter2->pTSBuf); - // TODO add more details information if (!tsBufNextPos(pSupporter1->pTSBuf)) { tsBufFlush(output1); tsBufFlush(output2); @@ -210,6 +215,7 @@ static void tscDestroyJoinSupporter(SJoinSupporter* pSupporter) { pSupporter->f = NULL; } + tfree(pSupporter->pIdTagList); tscTagCondRelease(&pSupporter->tagCond); free(pSupporter); } @@ -420,43 +426,6 @@ static void updateQueryTimeRange(SQueryInfo* pQueryInfo, STimeWindow* win) { pQueryInfo->window = *win; } -static UNUSED_FUNC void tSIntersectionAndLaunchSecQuery(SJoinSupporter* pSupporter, SSqlObj* pSql) { - SSqlObj* pParentSql = pSupporter->pObj; - SQueryInfo* pParentQueryInfo = tscGetQueryInfoDetail(&pParentSql->cmd, pParentSql->cmd.clauseIndex); - -// if (tscNonOrderedProjectionQueryOnSTable(pParentQueryInfo, 0)) { -// STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); -// assert(pQueryInfo->numOfTables == 1); -// -// // for projection query, need to try next vnode -//// int32_t totalVnode = pTableMetaInfo->pMetricMeta->numOfVnodes; -// int32_t totalVnode = 0; -// if ((++pTableMetaInfo->vgroupIndex) < totalVnode) { -// tscDebug("%p current vnode:%d exhausted, try next:%d. total vnode:%d. current numOfRes:%d", pSql, -// pTableMetaInfo->vgroupIndex - 1, pTableMetaInfo->vgroupIndex, totalVnode, pRes->numOfTotal); -// -// pSql->cmd.command = TSDB_SQL_SELECT; -// pSql->fp = tscJoinQueryCallback; -// tscProcessSql(pSql); -// -// return; -// } -// } - - SJoinSupporter* p1 = pParentSql->pSubs[0]->param; - SJoinSupporter* p2 = pParentSql->pSubs[1]->param; - - STimeWindow win = TSWINDOW_INITIALIZER; - int64_t num = doTSBlockIntersect(pParentSql, p1, p2, &win); - if (num <= 0) { // no result during ts intersect - tscDebug("%p free all sub SqlObj and quit", pParentSql); - freeJoinSubqueryObj(pParentSql); - } else { - updateQueryTimeRange(pParentQueryInfo, &win); - tscLaunchRealSubqueries(pParentSql); - } -} - int32_t tscCompareTidTags(const void* p1, const void* p2) { const STidTags* t1 = (const STidTags*) varDataVal(p1); const STidTags* t2 = (const STidTags*) varDataVal(p2); @@ -713,9 +682,12 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow SArray *s1 = NULL, *s2 = NULL; getIntersectionOfTableTuple(pQueryInfo, pParentSql, &s1, &s2); if (taosArrayGetSize(s1) == 0 || taosArrayGetSize(s2) == 0) { // no results,return. - tscDebug("%p free all sub SqlObj and quit", pParentSql); + tscDebug("%p tag intersect does not generated qualified tables for join, free all sub SqlObj and quit", pParentSql); freeJoinSubqueryObj(pParentSql); + // set no result command + pParentSql->cmd.command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; + (*pParentSql->fp)(pParentSql->param, pParentSql, 0); } else { // proceed to for ts_comp query SSqlCmd* pSubCmd1 = &pParentSql->pSubs[0]->cmd; @@ -846,7 +818,10 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow if (num <= 0) { // no result during ts intersect tscDebug("%p no results generated in ts intersection, free all sub SqlObj and quit", pParentSql); freeJoinSubqueryObj(pParentSql); - + + // set no result command + pParentSql->cmd.command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; + (*pParentSql->fp)(pParentSql->param, pParentSql, 0); return; } @@ -1472,9 +1447,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { static void tscFreeSubSqlObj(SRetrieveSupport *trsupport, SSqlObj *pSql) { tscDebug("%p start to free subquery result", pSql); - if (pSql->res.code == TSDB_CODE_SUCCESS) { - taos_free_result(pSql); - } + taos_free_result(pSql); tfree(trsupport->localBuffer); @@ -1805,6 +1778,7 @@ static SSqlObj *tscCreateSqlObjForSubquery(SSqlObj *pSql, SRetrieveSupport *trsu pSql->pSubs[trsupport->subqueryIndex] = pNew; } + printf("------------alloc:%p\n", pNew); return pNew; } diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 9b6eff7123..a58423bbaa 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -14,21 +14,21 @@ */ #include "os.h" -#include "qast.h" +#include "hash.h" +#include "tscUtil.h" #include "taosmsg.h" +#include "qast.h" #include "tcache.h" #include "tkey.h" #include "tmd5.h" -#include "tscProfile.h" #include "tscLocalMerge.h" +#include "tscLog.h" +#include "tscProfile.h" #include "tscSubquery.h" #include "tschemautil.h" #include "tsclient.h" #include "ttimer.h" #include "ttokendef.h" -#include "tscLog.h" -#include "tscUtil.h" -#include "hash.h" static void freeQueryInfoImpl(SQueryInfo* pQueryInfo); static void clearAllTableMetaInfo(SQueryInfo* pQueryInfo, const char* address, bool removeFromCache); @@ -579,9 +579,9 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, bo int32_t numOfCols = tscGetNumOfColumns(pTableDataBlock->pTableMeta); for(int32_t j = 0; j < numOfCols; ++j) { STColumn* pCol = (STColumn*) pDataBlock; - pCol->colId = pSchema[j].colId; + pCol->colId = htons(pSchema[j].colId); pCol->type = pSchema[j].type; - pCol->bytes = pSchema[j].bytes; + pCol->bytes = htons(pSchema[j].bytes); pCol->offset = 0; pDataBlock += sizeof(STColumn); @@ -663,7 +663,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SArray* pTableDataBlockList) { } SSubmitBlk* pBlocks = (SSubmitBlk*) pOneTableBlock->pData; - int64_t destSize = dataBuf->size + pOneTableBlock->size + pBlocks->numOfRows * expandSize; + int64_t destSize = dataBuf->size + pOneTableBlock->size + pBlocks->numOfRows * expandSize + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta); if (dataBuf->nAllocSize < destSize) { while (dataBuf->nAllocSize < destSize) { @@ -691,7 +691,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SArray* pTableDataBlockList) { tscDebug("%p tableId:%s, sid:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pSql, pOneTableBlock->tableId, pBlocks->tid, pBlocks->numOfRows, pBlocks->sversion, GET_INT64_VAL(pBlocks->data), GET_INT64_VAL(ekey)); - int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize); + int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize) + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta); pBlocks->tid = htonl(pBlocks->tid); pBlocks->uid = htobe64(pBlocks->uid); @@ -1464,16 +1464,6 @@ STableMetaInfo* tscGetMetaInfo(SQueryInfo* pQueryInfo, int32_t tableIndex) { return pQueryInfo->pTableMetaInfo[tableIndex]; } -SQueryInfo* tscGetQueryInfoDetail(SSqlCmd* pCmd, int32_t subClauseIndex) { - assert(pCmd != NULL && subClauseIndex >= 0 && subClauseIndex < TSDB_MAX_UNION_CLAUSE); - - if (pCmd->pQueryInfo == NULL || subClauseIndex >= pCmd->numOfClause) { - return NULL; - } - - return pCmd->pQueryInfo[subClauseIndex]; -} - int32_t tscGetQueryInfoDetailSafely(SSqlCmd* pCmd, int32_t subClauseIndex, SQueryInfo** pQueryInfo) { int32_t ret = TSDB_CODE_SUCCESS; @@ -1832,7 +1822,6 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void STableMeta* pPrevTableMeta = taosCacheTransfer(tscCacheHandle, (void**)&pPrevInfo->pTableMeta); SVgroupsInfo* pVgroupsInfo = pPrevInfo->vgroupList; - pPrevInfo->vgroupList = NULL; pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, name, pPrevTableMeta, pVgroupsInfo, pTableMetaInfo->tagColList); } @@ -2097,7 +2086,7 @@ void tscTryQueryNextClause(SSqlObj* pSql, void (*queryFp)()) { } void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pFieldInfo, int32_t columnIndex) { - SFieldSupInfo* pInfo = taosArrayGet(pFieldInfo->pSupportInfo, columnIndex);//tscFieldInfoGetSupp(pFieldInfo, columnIndex); + SFieldSupInfo* pInfo = taosArrayGet(pFieldInfo->pSupportInfo, columnIndex); assert(pInfo->pSqlExpr != NULL); int32_t type = pInfo->pSqlExpr->resType; @@ -2112,7 +2101,7 @@ void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pFieldInfo, int32_t column if (isNull(pData, type)) { pRes->tsrow[columnIndex] = NULL; } else { - pRes->tsrow[columnIndex] = pData + VARSTR_HEADER_SIZE; + pRes->tsrow[columnIndex] = ((tstr*)pData)->data; } if (realLen < pInfo->pSqlExpr->resBytes - VARSTR_HEADER_SIZE) { // todo refactor diff --git a/src/common/inc/tname.h b/src/common/inc/tname.h index 10d725db32..2263a5dae1 100644 --- a/src/common/inc/tname.h +++ b/src/common/inc/tname.h @@ -3,6 +3,7 @@ #include "os.h" #include "taosmsg.h" +#include "tstoken.h" typedef struct SDataStatis { int16_t colId; @@ -23,10 +24,14 @@ void extractTableName(const char *tableId, char *name); char* extractDBName(const char *tableId, char *name); +void extractTableNameFromToken(SSQLToken *pToken, SSQLToken* pTable); + SSchema tGetTableNameColumnSchema(); bool tscValidateTableNameLength(size_t len); SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numOfFilters); +int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, int64_t intervalTime, char timeUnit, int16_t precision); + #endif // TDENGINE_NAME_H diff --git a/src/common/inc/tulog.h b/src/common/inc/tulog.h index 63c838be69..6365b21ef9 100644 --- a/src/common/inc/tulog.h +++ b/src/common/inc/tulog.h @@ -32,9 +32,6 @@ extern int32_t tscEmbedded; #define uDebug(...) { if (uDebugFlag & DEBUG_DEBUG) { taosPrintLog("UTL DEBUG ", uDebugFlag, __VA_ARGS__); }} #define uTrace(...) { if (uDebugFlag & DEBUG_TRACE) { taosPrintLog("UTL TRACE ", uDebugFlag, __VA_ARGS__); }} -#define uDebugDump(...) { if (uDebugFlag & DEBUG_DEBUG) { taosPrintLongString("UTL DEBUG ", uDebugFlag, __VA_ARGS__); }} -#define uTraceDump(...) { if (uDebugFlag & DEBUG_TRACE) { taosPrintLongString("UTL TRACE ", uDebugFlag, __VA_ARGS__); }} - #define pError(...) { taosPrintLog("APP ERROR ", 255, __VA_ARGS__); } #define pPrint(...) { taosPrintLog("APP INFO ", 255, __VA_ARGS__); } diff --git a/src/query/inc/tvariant.h b/src/common/inc/tvariant.h similarity index 100% rename from src/query/inc/tvariant.h rename to src/common/inc/tvariant.h diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index 67c104878a..fae771e855 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -129,7 +129,7 @@ int32_t tsMnodeEqualVnodeNum = 4; int32_t tsEnableHttpModule = 1; int32_t tsRestRowLimit = 10240; uint16_t tsHttpPort = 6020; // only tcp, range tcp[6020] -int32_t tsHttpCacheSessions = 100; +int32_t tsHttpCacheSessions = 1000; int32_t tsHttpSessionExpire = 36000; int32_t tsHttpMaxThreads = 2; int32_t tsHttpEnableCompress = 0; @@ -1210,7 +1210,7 @@ void taosInitGlobalCfg() { } bool taosCheckGlobalCfg() { - if (debugFlag & DEBUG_TRACE || debugFlag & DEBUG_DEBUG) { + if (debugFlag & DEBUG_TRACE || debugFlag & DEBUG_DEBUG || debugFlag & DEBUG_DUMP) { taosSetAllDebugFlag(); } diff --git a/src/common/src/tname.c b/src/common/src/tname.c index 295015d466..8b85ecfbc7 100644 --- a/src/common/src/tname.c +++ b/src/common/src/tname.c @@ -75,3 +75,56 @@ SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numO return pFilter; } + +int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, int64_t intervalTime, char timeUnit, int16_t precision) { + if (slidingTime == 0) { + return startTime; + } + + int64_t start = ((startTime - intervalTime) / slidingTime + 1) * slidingTime; + if (!(timeUnit == 'a' || timeUnit == 'm' || timeUnit == 's' || timeUnit == 'h')) { + /* + * here we revised the start time of day according to the local time zone, + * but in case of DST, the start time of one day need to be dynamically decided. + */ + // todo refactor to extract function that is available for Linux/Windows/Mac platform +#if defined(WINDOWS) && _MSC_VER >= 1900 + // see https://docs.microsoft.com/en-us/cpp/c-runtime-library/daylight-dstbias-timezone-and-tzname?view=vs-2019 + int64_t timezone = _timezone; + int32_t daylight = _daylight; + char** tzname = _tzname; +#endif + + int64_t t = (precision == TSDB_TIME_PRECISION_MILLI) ? MILLISECOND_PER_SECOND : MILLISECOND_PER_SECOND * 1000L; + start += timezone * t; + } + + int64_t end = start + intervalTime - 1; + if (end < startTime) { + start += slidingTime; + } + return start; +} + +/* + * tablePrefix.columnName + * extract table name and save it in pTable, with only column name in pToken + */ +void extractTableNameFromToken(SSQLToken* pToken, SSQLToken* pTable) { + const char sep = TS_PATH_DELIMITER[0]; + + if (pToken == pTable || pToken == NULL || pTable == NULL) { + return; + } + + char* r = strnchr(pToken->z, sep, pToken->n, false); + + if (r != NULL) { // record the table name token + pTable->n = r - pToken->z; + pTable->z = pToken->z; + + r += 1; + pToken->n -= (r - pToken->z); + pToken->z = r; + } +} diff --git a/src/query/src/tvariant.c b/src/common/src/tvariant.c similarity index 100% rename from src/query/src/tvariant.c rename to src/common/src/tvariant.c diff --git a/src/connector/jdbc/buildTDengine.sh b/src/connector/jdbc/buildTDengine.sh deleted file mode 100755 index cf98215c85..0000000000 --- a/src/connector/jdbc/buildTDengine.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash -ulimit -c unlimited - -function buildTDengine { - cd /root/TDengine - - git remote update - REMOTE_COMMIT=`git rev-parse --short remotes/origin/develop` - LOCAL_COMMIT=`git rev-parse --short @` - - echo " LOCAL: $LOCAL_COMMIT" - echo "REMOTE: $REMOTE_COMMIT" - if [ "$LOCAL_COMMIT" == "$REMOTE_COMMIT" ]; then - echo "repo up-to-date" - else - echo "repo need to pull" - git pull - - LOCAL_COMMIT=`git rev-parse --short @` - cd /root/TDengine/debug - rm -rf /root/TDengine/debug/* - cmake .. - make > /dev/null - make install - fi -} - -function restartTaosd { - systemctl stop taosd - pkill -KILL -x taosd - sleep 10 - - logDir=`grep 'logDir' /etc/taos/taos.cfg|awk 'END{print $2}'` - dataDir=`grep 'dataDir' /etc/taos/taos.cfg|awk '{print $2}'` - - rm -rf $logDir/* - rm -rf $dataDir/* - - taosd 2>&1 > /dev/null & - sleep 10 -} - -buildTDengine -restartTaosd diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNode.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNode.java new file mode 100644 index 0000000000..48290d3e62 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNode.java @@ -0,0 +1,236 @@ +package com.taosdata.jdbc.utils; + +import java.io.File; +import java.util.*; +import java.util.concurrent.TimeUnit; + +public class TDNode { + + private int index; + private int running; + private int deployed; + private boolean testCluster; + private String path; + private String cfgDir; + private String dataDir; + private String logDir; + private String cfgPath; + + public TDNode(int index) { + this.index = index; + running = 0; + deployed = 0; + testCluster = false; + } + + public void setPath(String path) { + this.path = path; + } + + public void setTestCluster(boolean testCluster) { + this.testCluster = testCluster; + } + + public void searchTaosd(File dir, ArrayList taosdPath) { + File[] fileList = dir.listFiles(); + + if(fileList == null || fileList.length == 0) { + return; + } + + for(File file : fileList) { + if(file.isFile()) { + if(file.getName().equals("taosd")) { + taosdPath.add(file.getAbsolutePath()); + } + } else { + searchTaosd(file, taosdPath); + } + } + } + + public void start() { + String selfPath = System.getProperty("user.dir"); + String binPath = ""; + String projDir = selfPath + "/../../../../"; + + try { + ArrayList taosdPath = new ArrayList<>(); + + File dir = new File(projDir); + String realProjDir = dir.getCanonicalPath(); + dir = new File(realProjDir); + System.out.println("project Dir: " + projDir); + searchTaosd(dir, taosdPath); + + if(taosdPath.size() == 0) { + System.out.println("The project path doens't exist"); + return; + } else { + for(String p : taosdPath) { + if(!p.contains("packing")) { + binPath = p; + } + } + } + } catch (Exception e) { + e.printStackTrace(); + } + + if(binPath.isEmpty()) { + System.out.println("taosd not found"); + return; + } else { + System.out.println("taosd found in " + binPath); + } + + if(this.deployed == 0) { + System.out.println("dnode" + index + "is not deployed"); + return; + } + + String cmd = "nohup " + binPath + " -c " + cfgDir + " > /dev/null 2>&1 & "; + System.out.println("start taosd cmd: " + cmd); + + try{ + Runtime.getRuntime().exec(cmd); + TimeUnit.SECONDS.sleep(5); + } catch (Exception e) { + e.printStackTrace(); + } + + this.running = 1; + } + + public void stop() { + String toBeKilled = "taosd"; + + if (this.running != 0) { + String killCmd = "pkill -kill -x " + toBeKilled; + String[] killCmds = {"sh", "-c", killCmd}; + try { + Runtime.getRuntime().exec(killCmds).waitFor(); + + for(int port = 6030; port < 6041; port ++) { + String fuserCmd = "fuser -k -n tcp " + port; + Runtime.getRuntime().exec(fuserCmd).waitFor(); + } + } catch (Exception e) { + e.printStackTrace(); + } + + this.running = 0; + System.out.println("dnode:" + this.index + " is stopped by pkill"); + } + } + + public void startIP() { + try{ + String cmd = "sudo ifconfig lo:" + index + "192.168.0." + index + " up"; + Runtime.getRuntime().exec(cmd).waitFor(); + } catch (Exception e) { + e.printStackTrace(); + } + } + + public void stopIP() { + try{ + String cmd = "sudo ifconfig lo:" + index + "192.168.0." + index + " down"; + Runtime.getRuntime().exec(cmd).waitFor(); + } catch (Exception e) { + e.printStackTrace(); + } + } + + public void setCfgConfig(String option, String value) { + try{ + String cmd = "echo " + option + " " + value + " >> " + this.cfgPath; + String[] cmdLine = {"sh", "-c", cmd}; + Process ps = Runtime.getRuntime().exec(cmdLine); + ps.waitFor(); + } catch (Exception e) { + e.printStackTrace(); + } + } + + public String getDnodeRootDir() { + String dnodeRootDir = this.path + "/sim/psim/dnode" + this.index; + return dnodeRootDir; + } + + public String getDnodesRootDir() { + String dnodesRootDir = this.path + "/sim/psim" + this.index; + return dnodesRootDir; + } + + public void deploy() { + this.logDir = this.path + "/sim/dnode" + this.index + "/log"; + this.dataDir = this.path + "/sim/dnode" + this.index + "/data"; + this.cfgDir = this.path + "/sim/dnode" + this.index + "/cfg"; + this.cfgPath = this.path + "/sim/dnode" + this.index + "/cfg/taos.cfg"; + + try { + String cmd = "rm -rf " + this.logDir; + Runtime.getRuntime().exec(cmd).waitFor(); + + cmd = "rm -rf " + this.cfgDir; + Runtime.getRuntime().exec(cmd).waitFor(); + + cmd = "rm -rf " + this.dataDir; + Runtime.getRuntime().exec(cmd).waitFor(); + + cmd = "mkdir -p " + this.logDir; + Runtime.getRuntime().exec(cmd).waitFor(); + + cmd = "mkdir -p " + this.cfgDir; + Runtime.getRuntime().exec(cmd).waitFor(); + + cmd = "mkdir -p " + this.dataDir; + Runtime.getRuntime().exec(cmd).waitFor(); + + cmd = "touch " + this.cfgPath; + Runtime.getRuntime().exec(cmd).waitFor(); + } catch (Exception e) { + e.printStackTrace(); + } + + if(this.testCluster) { + startIP(); + setCfgConfig("masterIp", "192.168.0.1"); + setCfgConfig("secondIp", "192.168.0.2"); + setCfgConfig("publicIp", "192.168.0." + this.index); + setCfgConfig("internalIp", "192.168.0." + this.index); + setCfgConfig("privateIp", "192.168.0." + this.index); + } + setCfgConfig("dataDir", this.dataDir); + setCfgConfig("logDir", this.logDir); + setCfgConfig("numOfLogLines", "1000000/00"); + setCfgConfig("mnodeEqualVnodeNum", "0"); + setCfgConfig("walLevel", "1"); + setCfgConfig("statusInterval", "1"); + setCfgConfig("numOfTotalVnodes", "64"); + setCfgConfig("numOfMnodes", "3"); + setCfgConfig("numOfThreadsPerCore", "2.0"); + setCfgConfig("monitor", "0"); + setCfgConfig("maxVnodeConnections", "30000"); + setCfgConfig("maxMgmtConnections", "30000"); + setCfgConfig("maxMeterConnections", "30000"); + setCfgConfig("maxShellConns", "30000"); + setCfgConfig("locale", "en_US.UTF-8"); + setCfgConfig("charset", "UTF-8"); + setCfgConfig("asyncLog", "0"); + setCfgConfig("anyIp", "0"); + setCfgConfig("dDebugFlag", "135"); + setCfgConfig("mDebugFlag", "135"); + setCfgConfig("sdbDebugFlag", "135"); + setCfgConfig("rpcDebugFlag", "135"); + setCfgConfig("tmrDebugFlag", "131"); + setCfgConfig("cDebugFlag", "135"); + setCfgConfig("httpDebugFlag", "135"); + setCfgConfig("monitorDebugFlag", "135"); + setCfgConfig("udebugFlag", "135"); + setCfgConfig("jnidebugFlag", "135"); + setCfgConfig("qdebugFlag", "135"); + this.deployed = 1; + } +} \ No newline at end of file diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNodes.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNodes.java new file mode 100644 index 0000000000..ea15ae9863 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNodes.java @@ -0,0 +1,94 @@ +package com.taosdata.jdbc.utils; + +import java.io.File; +import java.util.*; + +public class TDNodes { + private ArrayList tdNodes; + private boolean testCluster; + + public TDNodes () { + tdNodes = new ArrayList<>(); + for(int i = 1; i < 11; i ++) { + tdNodes.add(new TDNode(i)); + } + } + + public void setPath(String path) { + try { + String killCmd = "pkill -kill -x taosd"; + String[] killCmds = {"sh", "-c", killCmd}; + Runtime.getRuntime().exec(killCmds).waitFor(); + + String binPath = System.getProperty("user.dir"); + binPath += "/../../../debug"; + System.out.println("binPath: " + binPath); + + File file = new File(path); + binPath = file.getCanonicalPath(); + System.out.println("binPath real path: " + binPath); + + if(path.isEmpty()){ + file = new File(path + "/../../"); + path = file.getCanonicalPath(); + } + + for(int i = 0; i < tdNodes.size(); i++) { + tdNodes.get(i).setPath(path); + } + } catch (Exception e) { + e.printStackTrace(); + } + } + + public void setTestCluster(boolean testCluster) { + this.testCluster = testCluster; + } + + public void check(int index) { + if(index < 1 || index > 10) { + System.out.println("index: " + index + " should on a scale of [1, 10]"); + return; + } + } + + public void deploy(int index) { + try { + File file = new File(System.getProperty("user.dir") + "/../../../"); + String projectRealPath = file.getCanonicalPath(); + check(index); + tdNodes.get(index - 1).setTestCluster(this.testCluster); + tdNodes.get(index - 1).setPath(projectRealPath); + tdNodes.get(index - 1).deploy(); + } catch (Exception e) { + e.printStackTrace(); + System.out.println("deploy Test Exception"); + } + } + + public void cfg(int index, String option, String value) { + check(index); + tdNodes.get(index - 1).setCfgConfig(option, value); + } + + public void start(int index) { + check(index); + tdNodes.get(index - 1).start(); + } + + public void stop(int index) { + check(index); + tdNodes.get(index - 1).stop(); + } + + public void startIP(int index) { + check(index); + tdNodes.get(index - 1).startIP(); + } + + public void stopIP(int index) { + check(index); + tdNodes.get(index - 1).stopIP(); + } + +} \ No newline at end of file diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/BaseTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/BaseTest.java index fd9ab49c49..5f105fb782 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/BaseTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/BaseTest.java @@ -1,27 +1,38 @@ package com.taosdata.jdbc; -import java.io.BufferedReader; -import java.io.InputStreamReader; +import java.io.File; +import com.taosdata.jdbc.utils.TDNodes; +import org.junit.AfterClass; import org.junit.BeforeClass; public class BaseTest { + + private static boolean testCluster = false; + private static String deployPath = System.getProperty("user.dir"); + private static TDNodes tdNodes = new TDNodes(); + @BeforeClass public static void setupEnv() { try{ - String path = System.getProperty("user.dir"); - String bashPath = path + "/buildTDengine.sh"; + File file = new File(deployPath + "/../../../"); + String rootPath = file.getCanonicalPath(); + + tdNodes.setPath(rootPath); + tdNodes.setTestCluster(testCluster); - Process ps = Runtime.getRuntime().exec(bashPath); - ps.waitFor(); + tdNodes.deploy(1); + tdNodes.start(1); - BufferedReader br = new BufferedReader(new InputStreamReader(ps.getInputStream())); - while(br.readLine() != null) { - System.out.println(br.readLine()); - } } catch (Exception e) { e.printStackTrace(); + System.out.println("Base Test Exception"); } } + + @AfterClass + public static void cleanUpEnv() { + tdNodes.stop(1); + } } \ No newline at end of file diff --git a/src/connector/python/linux/python3/taos/cursor.py b/src/connector/python/linux/python3/taos/cursor.py index 06d6a19462..3f0f315d33 100644 --- a/src/connector/python/linux/python3/taos/cursor.py +++ b/src/connector/python/linux/python3/taos/cursor.py @@ -1,6 +1,7 @@ from .cinterface import CTaosInterface from .error import * from .constants import FieldType +import threading # querySeqNum = 0 @@ -37,6 +38,7 @@ class TDengineCursor(object): self._block_iter = 0 self._affected_rows = 0 self._logfile = "" + self._threadId = threading.get_ident() if connection is not None: self._connection = connection @@ -103,6 +105,12 @@ class TDengineCursor(object): def execute(self, operation, params=None): """Prepare and execute a database operation (query or command). """ + # if threading.get_ident() != self._threadId: + # info ="Cursor execute:Thread ID not match,creater:"+str(self._threadId)+" caller:"+str(threading.get_ident()) + # raise OperationalError(info) + # print(info) + # return None + if not operation: return None @@ -188,6 +196,11 @@ class TDengineCursor(object): def fetchall(self): """Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation. """ + # if threading.get_ident() != self._threadId: + # info ="[WARNING] Cursor fetchall:Thread ID not match,creater:"+str(self._threadId)+" caller:"+str(threading.get_ident()) + # raise OperationalError(info) + # print(info) + # return None if self._result is None or self._fields is None: raise OperationalError("Invalid use of fetchall") @@ -232,6 +245,12 @@ class TDengineCursor(object): def _handle_result(self): """Handle the return result from query. """ + # if threading.get_ident() != self._threadId: + # info = "Cursor handleresult:Thread ID not match,creater:"+str(self._threadId)+" caller:"+str(threading.get_ident()) + # raise OperationalError(info) + # print(info) + # return None + self._description = [] for ele in self._fields: self._description.append( diff --git a/src/cq/src/cqMain.c b/src/cq/src/cqMain.c index a633968616..04d3a6fd6d 100644 --- a/src/cq/src/cqMain.c +++ b/src/cq/src/cqMain.c @@ -109,6 +109,8 @@ void cqClose(void *handle) { while (pObj) { SCqObj *pTemp = pObj; pObj = pObj->next; + tdFreeSchema(pTemp->pSchema); + tfree(pTemp->sqlStr); free(pTemp); } @@ -242,6 +244,10 @@ static void cqCreateStream(SCqContext *pContext, SCqObj *pObj) { static void cqProcessStreamRes(void *param, TAOS_RES *tres, TAOS_ROW row) { SCqObj *pObj = (SCqObj *)param; + if (tres == NULL && row == NULL) { + pObj->pStream = NULL; + return; + } SCqContext *pContext = pObj->pContext; STSchema *pSchema = pObj->pSchema; if (pObj->pStream == NULL) return; @@ -263,8 +269,14 @@ static void cqProcessStreamRes(void *param, TAOS_RES *tres, TAOS_ROW row) { void* val = row[i]; if (val == NULL) { val = getNullValue(c->type); - } else if (IS_VAR_DATA_TYPE(c->type)) { + } else if (c->type == TSDB_DATA_TYPE_BINARY) { val = ((char*)val) - sizeof(VarDataLenT); + } else if (c->type == TSDB_DATA_TYPE_NCHAR) { + char buf[TSDB_MAX_NCHAR_LEN]; + size_t len = taos_fetch_lengths(tres)[i]; + taosMbsToUcs4(val, len, buf, sizeof(buf), &len); + memcpy(val + sizeof(VarDataLenT), buf, len); + varDataLen(val) = len; } tdAppendColVal(trow, val, c->type, c->bytes, c->offset); } diff --git a/src/dnode/inc/dnodeMgmt.h b/src/dnode/inc/dnodeMgmt.h index 826f4ff1c1..092c06d84b 100644 --- a/src/dnode/inc/dnodeMgmt.h +++ b/src/dnode/inc/dnodeMgmt.h @@ -22,6 +22,8 @@ extern "C" { int32_t dnodeInitMgmt(); void dnodeCleanupMgmt(); +int32_t dnodeInitMgmtTimer(); +void dnodeCleanupMgmtTimer(); void dnodeDispatchToMgmtQueue(SRpcMsg *rpcMsg); void* dnodeGetVnode(int32_t vgId); diff --git a/src/dnode/src/dnodeMWrite.c b/src/dnode/src/dnodeMWrite.c index 90d857155a..b53c66e00c 100644 --- a/src/dnode/src/dnodeMWrite.c +++ b/src/dnode/src/dnodeMWrite.c @@ -131,8 +131,8 @@ static void dnodeFreeMnodeWriteMsg(SMnodeMsg *pWrite) { taosFreeQitem(pWrite); } -void dnodeSendRpcMnodeWriteRsp(void *pRaw, int32_t code) { - SMnodeMsg *pWrite = pRaw; +void dnodeSendRpcMnodeWriteRsp(void *pMsg, int32_t code) { + SMnodeMsg *pWrite = pMsg; if (pWrite == NULL) return; if (code == TSDB_CODE_MND_ACTION_IN_PROGRESS) return; if (code == TSDB_CODE_MND_ACTION_NEED_REPROCESSED) { diff --git a/src/dnode/src/dnodeMain.c b/src/dnode/src/dnodeMain.c index 8d1ae0a50e..987a189959 100644 --- a/src/dnode/src/dnodeMain.c +++ b/src/dnode/src/dnodeMain.c @@ -57,6 +57,7 @@ static const SDnodeComponent tsDnodeComponents[] = { {"server", dnodeInitServer, dnodeCleanupServer}, {"mgmt", dnodeInitMgmt, dnodeCleanupMgmt}, {"modules", dnodeInitModules, dnodeCleanupModules}, + {"mgmt-tmr",dnodeInitMgmtTimer, dnodeCleanupMgmtTimer}, {"shell", dnodeInitShell, dnodeCleanupShell} }; diff --git a/src/dnode/src/dnodeMgmt.c b/src/dnode/src/dnodeMgmt.c index 1ae1287888..b1c93d7195 100644 --- a/src/dnode/src/dnodeMgmt.c +++ b/src/dnode/src/dnodeMgmt.c @@ -106,6 +106,12 @@ int32_t dnodeInitMgmt() { } } + int32_t code = vnodeInitResources(); + if (code != TSDB_CODE_SUCCESS) { + dnodeCleanupMgmt(); + return -1; + } + // create the queue and thread to handle the message tsMgmtQset = taosOpenQset(); if (tsMgmtQset == NULL) { @@ -127,7 +133,7 @@ int32_t dnodeInitMgmt() { pthread_attr_init(&thAttr); pthread_attr_setdetachstate(&thAttr, PTHREAD_CREATE_JOINABLE); - int32_t code = pthread_create(&tsQthread, &thAttr, dnodeProcessMgmtQueue, NULL); + code = pthread_create(&tsQthread, &thAttr, dnodeProcessMgmtQueue, NULL); pthread_attr_destroy(&thAttr); if (code != 0) { dError("failed to create thread to process mgmt queue, reason:%s", strerror(errno)); @@ -141,6 +147,12 @@ int32_t dnodeInitMgmt() { return -1; } + dInfo("dnode mgmt is initialized"); + + return TSDB_CODE_SUCCESS; +} + +int32_t dnodeInitMgmtTimer() { tsDnodeTmr = taosTmrInit(100, 200, 60000, "DND-DM"); if (tsDnodeTmr == NULL) { dError("failed to init dnode timer"); @@ -149,13 +161,11 @@ int32_t dnodeInitMgmt() { } taosTmrReset(dnodeSendStatusMsg, 500, NULL, tsDnodeTmr, &tsStatusTimer); - - dInfo("dnode mgmt is initialized"); - + dInfo("dnode mgmt timer is initialized"); return TSDB_CODE_SUCCESS; } -void dnodeCleanupMgmt() { +void dnodeCleanupMgmtTimer() { if (tsStatusTimer != NULL) { taosTmrStopA(&tsStatusTimer); tsStatusTimer = NULL; @@ -165,7 +175,10 @@ void dnodeCleanupMgmt() { taosTmrCleanUp(tsDnodeTmr); tsDnodeTmr = NULL; } +} +void dnodeCleanupMgmt() { + dnodeCleanupMgmtTimer(); dnodeCloseVnodes(); if (tsMgmtQset) taosQsetThreadResume(tsMgmtQset); @@ -282,13 +295,12 @@ static void *dnodeOpenVnode(void *param) { } static int32_t dnodeOpenVnodes() { - int32_t *vnodeList = calloc(TSDB_MAX_VNODES, sizeof(int32_t)); + int32_t vnodeList[TSDB_MAX_VNODES] = {0}; int32_t numOfVnodes = 0; int32_t status = dnodeGetVnodeList(vnodeList, &numOfVnodes); if (status != TSDB_CODE_SUCCESS) { dInfo("get dnode list failed"); - free(vnodeList); return status; } @@ -334,7 +346,6 @@ static int32_t dnodeOpenVnodes() { free(pThread->vnodeList); } - free(vnodeList); free(threads); dInfo("there are total vnodes:%d, openned:%d failed:%d", numOfVnodes, openVnodes, failedVnodes); @@ -342,7 +353,7 @@ static int32_t dnodeOpenVnodes() { } void dnodeStartStream() { - int32_t vnodeList[TSDB_MAX_VNODES]; + int32_t vnodeList[TSDB_MAX_VNODES] = {0}; int32_t numOfVnodes = 0; int32_t status = vnodeGetVnodeList(vnodeList, &numOfVnodes); @@ -359,7 +370,7 @@ void dnodeStartStream() { } static void dnodeCloseVnodes() { - int32_t vnodeList[TSDB_MAX_VNODES]; + int32_t vnodeList[TSDB_MAX_VNODES]= {0}; int32_t numOfVnodes = 0; int32_t status; @@ -398,6 +409,7 @@ static int32_t dnodeProcessCreateVnodeMsg(SRpcMsg *rpcMsg) { void *pVnode = vnodeAcquireVnode(pCreate->cfg.vgId); if (pVnode != NULL) { + dDebug("vgId:%d, already exist, processed as alter msg", pCreate->cfg.vgId); int32_t code = vnodeAlter(pVnode, pCreate); vnodeRelease(pVnode); return code; diff --git a/src/dnode/src/dnodeSystem.c b/src/dnode/src/dnodeSystem.c index 01f0cf25c0..971bd0a110 100644 --- a/src/dnode/src/dnodeSystem.c +++ b/src/dnode/src/dnodeSystem.c @@ -20,6 +20,7 @@ #include "tglobal.h" #include "dnodeInt.h" #include "dnodeMain.h" +#include "tfile.h" static void signal_handler(int32_t signum, siginfo_t *sigInfo, void *context); static sem_t exitSem; @@ -67,6 +68,18 @@ int32_t main(int32_t argc, char *argv[]) { taosSetAllocMode(TAOS_ALLOC_MODE_DETECT_LEAK, NULL, true); } } +#endif +#ifdef TAOS_RANDOM_FILE_FAIL + else if (strcmp(argv[i], "--random-file-fail-factor") == 0) { + if ( (i+1) < argc ) { + int factor = atoi(argv[i+1]); + printf("The factor of random failure is %d\n", factor); + taosSetRandomFileFailFactor(factor); + } else { + printf("Please specify a number for random failure factor!"); + exit(EXIT_FAILURE); + } + } #endif } diff --git a/src/dnode/src/dnodeVWrite.c b/src/dnode/src/dnodeVWrite.c index b20e6c9749..546e8cecb9 100644 --- a/src/dnode/src/dnodeVWrite.c +++ b/src/dnode/src/dnodeVWrite.c @@ -210,6 +210,7 @@ static void *dnodeProcessWriteQueue(void *param) { int32_t numOfMsgs; int type; void *pVnode, *item; + SRspRet *pRspRet; dDebug("write worker:%d is running", pWorker->workerId); @@ -222,9 +223,11 @@ static void *dnodeProcessWriteQueue(void *param) { for (int32_t i = 0; i < numOfMsgs; ++i) { pWrite = NULL; + pRspRet = NULL; taosGetQitem(pWorker->qall, &type, &item); if (type == TAOS_QTYPE_RPC) { pWrite = (SWriteMsg *)item; + pRspRet = &pWrite->rspRet; pHead = (SWalHead *)(pWrite->pCont - sizeof(SWalHead)); pHead->msgType = pWrite->rpcMsg.msgType; pHead->version = 0; @@ -234,7 +237,7 @@ static void *dnodeProcessWriteQueue(void *param) { pHead = (SWalHead *)item; } - int32_t code = vnodeProcessWrite(pVnode, type, pHead, item); + int32_t code = vnodeProcessWrite(pVnode, type, pHead, pRspRet); if (pWrite) pWrite->rpcMsg.code = code; } @@ -247,6 +250,11 @@ static void *dnodeProcessWriteQueue(void *param) { if (type == TAOS_QTYPE_RPC) { pWrite = (SWriteMsg *)item; dnodeSendRpcVnodeWriteRsp(pVnode, item, pWrite->rpcMsg.code); + } else if (type == TAOS_QTYPE_FWD) { + pHead = (SWalHead *)item; + vnodeConfirmForward(pVnode, pHead->version, 0); + taosFreeQitem(item); + vnodeRelease(pVnode); } else { taosFreeQitem(item); vnodeRelease(pVnode); diff --git a/src/inc/query.h b/src/inc/query.h index 88badc2d7b..c648270b21 100644 --- a/src/inc/query.h +++ b/src/inc/query.h @@ -87,8 +87,8 @@ int32_t qKillQuery(qinfo_t qinfo); void* qOpenQueryMgmt(int32_t vgId); void qSetQueryMgmtClosed(void* pExecutor); void qCleanupQueryMgmt(void* pExecutor); -void** qRegisterQInfo(void* pMgmt, void* qInfo); -void** qAcquireQInfo(void* pMgmt, void** key); +void** qRegisterQInfo(void* pMgmt, uint64_t qInfo); +void** qAcquireQInfo(void* pMgmt, uint64_t key); void** qReleaseQInfo(void* pMgmt, void* pQInfo, bool needFree); #ifdef __cplusplus diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h index 4a7d86c434..d2bef9ea57 100644 --- a/src/inc/taoserror.h +++ b/src/inc/taoserror.h @@ -129,9 +129,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_CLUSTER_CFG_INCONSISTENT, 0, 0x0335, "mnode clus TAOS_DEFINE_ERROR(TSDB_CODE_MND_ACCT_ALREADY_EXIST, 0, 0x0340, "mnode accounts already exist") TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_ACCT, 0, 0x0341, "mnode invalid account") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_ACCT_PARA, 0, 0x0342, "mnode invalid account parameter") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_ACCT_OPTION, 0, 0x0343, "mnode invalid acct option") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_ACCTS, 0, 0x0344, "mnode too many accounts") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_ACCT_OPTION, 0, 0x0342, "mnode invalid acct option") TAOS_DEFINE_ERROR(TSDB_CODE_MND_USER_ALREADY_EXIST, 0, 0x0350, "mnode user already exist") TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_USER, 0, 0x0351, "mnode invalid user") @@ -145,7 +143,6 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TABLE_ID, 0, 0x0361, "mnode inva TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TABLE_NAME, 0, 0x0362, "mnode invalid table name") TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TABLE_TYPE, 0, 0x0363, "mnode invalid table type") TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_TAGS, 0, 0x0364, "mnode too many tags") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_TABLES, 0, 0x0365, "mnode too many tables") TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_TIMESERIES, 0, 0x0366, "mnode not enough time series") TAOS_DEFINE_ERROR(TSDB_CODE_MND_NOT_SUPER_TABLE, 0, 0x0367, "mnode no super table") // operation only available for super table TAOS_DEFINE_ERROR(TSDB_CODE_MND_COL_NAME_TOO_LONG, 0, 0x0368, "mnode column name too long") @@ -161,13 +158,13 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_DB_OPTION, 0, 0x0382, "mnode inva TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_DB, 0, 0x0383, "mnode invalid database") TAOS_DEFINE_ERROR(TSDB_CODE_MND_MONITOR_DB_FORBIDDEN, 0, 0x0384, "mnode monitor db forbidden") TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_DATABASES, 0, 0x0385, "mnode too many databases") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_DB_IN_DROPPING, 0, 0x0386, "mnode db in dropping") // dnode TAOS_DEFINE_ERROR(TSDB_CODE_DND_MSG_NOT_PROCESSED, 0, 0x0400, "dnode message not processed") TAOS_DEFINE_ERROR(TSDB_CODE_DND_OUT_OF_MEMORY, 0, 0x0401, "dnode out of memory") TAOS_DEFINE_ERROR(TSDB_CODE_DND_NO_WRITE_ACCESS, 0, 0x0402, "dnode no disk write access") TAOS_DEFINE_ERROR(TSDB_CODE_DND_INVALID_MSG_LEN, 0, 0x0403, "dnode invalid message length") -TAOS_DEFINE_ERROR(TSDB_CODE_DND_INVALID_FILE_FORMAT, 0, 0x0404, "dnode invalid file format") // vnode TAOS_DEFINE_ERROR(TSDB_CODE_VND_ACTION_IN_PROGRESS, 0, 0x0500, "vnode action in progress") @@ -180,7 +177,9 @@ TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_DISK_PERMISSIONS, 0, 0x0506, "vnode no d TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_SUCH_FILE_OR_DIR, 0, 0x0507, "vnode no such file or directory") TAOS_DEFINE_ERROR(TSDB_CODE_VND_OUT_OF_MEMORY, 0, 0x0508, "vnode out of memory") TAOS_DEFINE_ERROR(TSDB_CODE_VND_APP_ERROR, 0, 0x0509, "vnode app error") -TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_WRITE_AUTH, 0, 0x0214, "vnode no write auth") +TAOS_DEFINE_ERROR(TSDB_CODE_VND_INVALID_STATUS, 0, 0x0510, "vnode not in ready state") +TAOS_DEFINE_ERROR(TSDB_CODE_VND_NOT_SYNCED, 0, 0x0511, "vnode not in synced state") +TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_WRITE_AUTH, 0, 0x0512, "vnode no write auth") // tsdb TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_TABLE_ID, 0, 0x0600, "tsdb invalid table id") @@ -200,6 +199,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_ACTION, 0, 0x060D, "tsdb inval TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_CREATE_TB_MSG, 0, 0x060E, "tsdb invalid create table message") TAOS_DEFINE_ERROR(TSDB_CODE_TDB_NO_TABLE_DATA_IN_MEM, 0, 0x060F, "tsdb no table data in memory skiplist") TAOS_DEFINE_ERROR(TSDB_CODE_TDB_FILE_ALREADY_EXISTS, 0, 0x0610, "tsdb file already exists") +TAOS_DEFINE_ERROR(TSDB_CODE_TDB_TABLE_RECONFIGURE, 0, 0x0611, "tsdb need to reconfigure table") // query TAOS_DEFINE_ERROR(TSDB_CODE_QRY_INVALID_QHANDLE, 0, 0x0700, "query invalid handle") diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h index 6155f08e76..ac89d1dabb 100644 --- a/src/inc/taosmsg.h +++ b/src/inc/taosmsg.h @@ -203,8 +203,7 @@ typedef struct SSubmitBlk { typedef struct SSubmitMsg { SMsgHead header; int32_t length; - int32_t compressed : 2; - int32_t numOfBlocks : 30; + int32_t numOfBlocks; SSubmitBlk blocks[]; } SSubmitMsg; @@ -285,6 +284,8 @@ typedef struct { int32_t tid; int16_t tversion; int16_t colId; + int8_t type; + int16_t bytes; int32_t tagValLen; int16_t numOfTags; int32_t schemaLen; @@ -472,7 +473,7 @@ typedef struct { typedef struct { int32_t code; - uint64_t qhandle; + uint64_t qhandle; // query handle } SQueryTableRsp; typedef struct { @@ -485,7 +486,7 @@ typedef struct SRetrieveTableRsp { int32_t numOfRows; int8_t completed; // all results are returned to client int16_t precision; - int64_t offset; // updated offset value for multi-vnode projection query + int64_t offset; // updated offset value for multi-vnode projection query int64_t useconds; char data[]; } SRetrieveTableRsp; diff --git a/src/inc/tsdb.h b/src/inc/tsdb.h index e2fec9b3dd..b8cc1768e8 100644 --- a/src/inc/tsdb.h +++ b/src/inc/tsdb.h @@ -108,12 +108,14 @@ void tsdbClearTableCfg(STableCfg *config); void* tsdbGetTableTagVal(const void* pTable, int32_t colId, int16_t type, int16_t bytes); char* tsdbGetTableName(void *pTable); -STableId tsdbGetTableId(void *pTable); + +#define TSDB_TABLEID(_table) ((STableId*) (_table)) + STableCfg *tsdbCreateTableCfgFromMsg(SMDCreateTableMsg *pMsg); int tsdbCreateTable(TSDB_REPO_T *repo, STableCfg *pCfg); int tsdbDropTable(TSDB_REPO_T *pRepo, STableId tableId); -int tsdbUpdateTagValue(TSDB_REPO_T *repo, SUpdateTableTagValMsg *pMsg); +int tsdbUpdateTableTagValue(TSDB_REPO_T *repo, SUpdateTableTagValMsg *pMsg); TSKEY tsdbGetTableLastKey(TSDB_REPO_T *repo, uint64_t uid); void tsdbStartStream(TSDB_REPO_T *repo); @@ -233,9 +235,10 @@ bool tsdbNextDataBlock(TsdbQueryHandleT *pQueryHandle); * Get current data block information * * @param pQueryHandle + * @param pBlockInfo * @return */ -SDataBlockInfo tsdbRetrieveDataBlockInfo(TsdbQueryHandleT *pQueryHandle); +void tsdbRetrieveDataBlockInfo(TsdbQueryHandleT *pQueryHandle, SDataBlockInfo* pBlockInfo); /** * diff --git a/src/inc/vnode.h b/src/inc/vnode.h index a034bc5706..1e6cfa9700 100644 --- a/src/inc/vnode.h +++ b/src/inc/vnode.h @@ -60,7 +60,10 @@ void* vnodeGetWal(void *pVnode); int32_t vnodeProcessWrite(void *pVnode, int qtype, void *pHead, void *item); int32_t vnodeGetVnodeList(int32_t vnodeList[], int32_t *numOfVnodes); void vnodeBuildStatusMsg(void *param); +void vnodeConfirmForward(void *param, uint64_t version, int32_t code); void vnodeSetAccess(SDMVgroupAccess *pAccess, int32_t numOfVnodes); + +int32_t vnodeInitResources(); void vnodeCleanupResources(); int32_t vnodeProcessRead(void *pVnode, SReadMsg *pReadMsg); diff --git a/src/kit/shell/src/shellImport.c b/src/kit/shell/src/shellImport.c index ba123ac2d4..a440db7301 100644 --- a/src/kit/shell/src/shellImport.c +++ b/src/kit/shell/src/shellImport.c @@ -206,9 +206,10 @@ static void shellSourceFile(TAOS *con, char *fptr) { if (code != 0) { fprintf(stderr, "DB error: %s: %s (%d)\n", taos_errstr(con), fname, lineNo); - /* free local resouce: allocated memory/metric-meta refcnt */ - taos_free_result(pSql); } + + /* free local resouce: allocated memory/metric-meta refcnt */ + taos_free_result(pSql); memset(cmd, 0, MAX_COMMAND_SIZE); cmd_len = 0; diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 9a5aedcdb7..305302b71a 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -520,9 +520,8 @@ int main(int argc, char *argv[]) { snprintf(command, BUFFER_SIZE, "create table if not exists %s.meters (ts timestamp%s tags (areaid int, loc binary(10))", db_name, cols); queryDB(taos, command); printf("meters created!\n"); - - taos_close(taos); } + taos_close(taos); /* Wait for table to create */ multiThreadCreateTable(cols, use_metric, threads, ntables, db_name, tb_prefix, ip_addr, port, user, pass); @@ -792,9 +791,6 @@ void * createTable(void *sarg) snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s%d (ts timestamp%s;", winfo->db_name, winfo->tb_prefix, i, winfo->cols); queryDB(winfo->taos, command); } - - taos_close(winfo->taos); - } else { /* Create all the tables; */ printf("Creating table from %d to %d\n", winfo->start_table_id, winfo->end_table_id); @@ -812,7 +808,6 @@ void * createTable(void *sarg) } queryDB(winfo->taos, command); } - taos_close(winfo->taos); } return NULL; @@ -857,7 +852,6 @@ void multiThreadCreateTable(char* cols, bool use_metric, int threads, int ntable for (int i = 0; i < threads; i++) { info *t_info = infos + i; - taos_close(t_info->taos); sem_destroy(&(t_info->mutex_sem)); sem_destroy(&(t_info->lock_sem)); } @@ -875,6 +869,11 @@ void *readTable(void *sarg) { int64_t sTime = rinfo->start_time; char *tb_prefix = rinfo->tb_prefix; FILE *fp = fopen(rinfo->fp, "a"); + if (NULL == fp) { + printf("fopen %s fail, reason:%s.\n", rinfo->fp, strerror(errno)); + return NULL; + } + int num_of_DPT = rinfo->nrecords_per_table; int num_of_tables = rinfo->end_table_id - rinfo->start_table_id + 1; int totalData = num_of_DPT * num_of_tables; @@ -930,6 +929,11 @@ void *readMetric(void *sarg) { TAOS *taos = rinfo->taos; char command[BUFFER_SIZE] = "\0"; FILE *fp = fopen(rinfo->fp, "a"); + if (NULL == fp) { + printf("fopen %s fail, reason:%s.\n", rinfo->fp, strerror(errno)); + return NULL; + } + int num_of_DPT = rinfo->nrecords_per_table; int num_of_tables = rinfo->end_table_id - rinfo->start_table_id + 1; int totalData = num_of_DPT * num_of_tables; diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c index 0b1890c5ab..70f18b32bd 100644 --- a/src/kit/taosdump/taosdump.c +++ b/src/kit/taosdump/taosdump.c @@ -27,19 +27,18 @@ #include #include #include +#include #include "taos.h" -#include "taosmsg.h" -#include "tsclient.h" #include "taosdef.h" +#include "taosmsg.h" +#include "tglobal.h" +#include "tsclient.h" +#include "tsdb.h" #include "tutil.h" -#include "tglobal.h" - #define COMMAND_SIZE 65536 -#define DEFAULT_DUMP_FILE "taosdump.sql" - -#define MAX_DBS 100 +//#define DEFAULT_DUMP_FILE "taosdump.sql" int converStringToReadable(char *str, int size, char *buf, int bufsize); int convertNCharToReadable(char *str, int size, char *buf, int bufsize); @@ -90,21 +89,21 @@ enum _describe_table_index { }; typedef struct { - char field[TSDB_COL_NAME_LEN]; + char field[TSDB_COL_NAME_LEN + 1]; char type[16]; int length; char note[128]; } SColDes; typedef struct { - char name[TSDB_COL_NAME_LEN]; + char name[TSDB_COL_NAME_LEN + 1]; SColDes cols[]; } STableDef; extern char version[]; typedef struct { - char name[TSDB_DB_NAME_LEN]; + char name[TSDB_DB_NAME_LEN + 1]; int32_t replica; int32_t days; int32_t keep; @@ -119,8 +118,8 @@ typedef struct { } SDbInfo; typedef struct { - char name[TSDB_TABLE_NAME_LEN]; - char metric[TSDB_TABLE_NAME_LEN]; + char name[TSDB_TABLE_NAME_LEN + 1]; + char metric[TSDB_TABLE_NAME_LEN + 1]; } STableRecord; typedef struct { @@ -128,6 +127,16 @@ typedef struct { STableRecord tableRecord; } STableRecordInfo; +typedef struct { + pthread_t threadID; + int32_t threadIndex; + int32_t totalThreads; + char dbName[TSDB_TABLE_NAME_LEN + 1]; + void *taosCon; +} SThreadParaObj; + +static int64_t totalDumpOutRows = 0; + SDbInfo **dbInfos = NULL; const char *argp_program_version = version; @@ -142,7 +151,7 @@ static char doc[] = ""; /* to force a line-break, e.g.\n<-- here."; */ /* A description of the arguments we accept. */ -static char args_doc[] = "dbname [tbname ...]\n--databases dbname ...\n--all-databases\n-i input_file"; +static char args_doc[] = "dbname [tbname ...]\n--databases dbname ...\n--all-databases\n-i inpath\n-o outpath"; /* Keys for options without short-options. */ #define OPT_ABORT 1 /* –abort */ @@ -150,60 +159,68 @@ static char args_doc[] = "dbname [tbname ...]\n--databases dbname ...\n--all-dat /* The options we understand. */ static struct argp_option options[] = { // connection option - {"host", 'h', "HOST", 0, "Server host dumping data from. Default is localhost.", 0}, - {"user", 'u', "USER", 0, "User name used to connect to server. Default is root.", 0}, - {"password", 'p', "PASSWORD", 0, "User password to connect to server. Default is taosdata.", 0}, - {"port", 'P', "PORT", 0, "Port to connect", 0}, + {"host", 'h', "HOST", 0, "Server host dumping data from. Default is localhost.", 0}, + {"user", 'u', "USER", 0, "User name used to connect to server. Default is root.", 0}, + {"password", 'p', "PASSWORD", 0, "User password to connect to server. Default is taosdata.", 0}, + {"port", 'P', "PORT", 0, "Port to connect", 0}, + {"cversion", 'v', "CVERION", 0, "client version", 0}, + {"mysqlFlag", 'q', "MYSQLFLAG", 0, "mysqlFlag, Default is 0", 0}, // input/output file - {"output", 'o', "OUTPUT", 0, "Output file name.", 1}, - {"input", 'i', "INPUT", 0, "Input file name.", 1}, - {"config", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/taos/taos.cfg.", 1}, - {"encode", 'e', "ENCODE", 0, "Input file encoding.", 1}, + {"outpath", 'o', "OUTPATH", 0, "Output file path.", 1}, + {"inpath", 'i', "INPATH", 0, "Input file path.", 1}, + {"config", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/taos/taos.cfg.", 1}, + {"encode", 'e', "ENCODE", 0, "Input file encoding.", 1}, // dump unit options - {"all-databases", 'A', 0, 0, "Dump all databases.", 2}, - {"databases", 'B', 0, 0, "Dump assigned databases", 2}, + {"all-databases", 'A', 0, 0, "Dump all databases.", 2}, + {"databases", 'B', 0, 0, "Dump assigned databases", 2}, // dump format options - {"schemaonly", 's', 0, 0, "Only dump schema.", 3}, - {"with-property", 'M', 0, 0, "Dump schema with properties.", 3}, - {"start-time", 'S', "START_TIME", 0, "Start time to dump.", 3}, - {"end-time", 'E', "END_TIME", 0, "End time to dump.", 3}, - {"data-batch", 'N', "DATA_BATCH", 0, "Number of data point per insert statement. Default is 1.", 3}, - {"allow-sys", 'a', 0, 0, "Allow to dump sys database", 3}, + {"schemaonly", 's', 0, 0, "Only dump schema.", 3}, + {"with-property", 'M', 0, 0, "Dump schema with properties.", 3}, + {"start-time", 'S', "START_TIME", 0, "Start time to dump.", 3}, + {"end-time", 'E', "END_TIME", 0, "End time to dump.", 3}, + {"data-batch", 'N', "DATA_BATCH", 0, "Number of data point per insert statement. Default is 1.", 3}, + {"table-batch", 'T', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3}, + {"thread_num", 't', "THREAD_NUM", 0, "Number of thread for dump in file. Default is 5.", 3}, + {"allow-sys", 'a', 0, 0, "Allow to dump sys database", 3}, {0}}; /* Used by main to communicate with parse_opt. */ -typedef struct SDumpArguments { +struct arguments { // connection option - char *host; - char *user; - char *password; - uint16_t port; + char *host; + char *user; + char *password; + uint16_t port; + char cversion[TSDB_FILENAME_LEN+1]; + uint16_t mysqlFlag; // output file - char output[TSDB_FILENAME_LEN]; - char input[TSDB_FILENAME_LEN]; - char *encode; + char outpath[TSDB_FILENAME_LEN+1]; + char inpath[TSDB_FILENAME_LEN+1]; + char *encode; // dump unit option - bool all_databases; - bool databases; + bool all_databases; + bool databases; // dump format option - bool schemaonly; - bool with_property; - int64_t start_time; - int64_t end_time; - int data_batch; - bool allow_sys; + bool schemaonly; + bool with_property; + int64_t start_time; + int64_t end_time; + int32_t data_batch; + int32_t table_batch; // num of table which will be dump into one output file. + bool allow_sys; // other options - int abort; - char **arg_list; - int arg_list_len; - bool isDumpIn; -} SDumpArguments; + int32_t thread_num; + int abort; + char **arg_list; + int arg_list_len; + bool isDumpIn; +}; /* Parse a single option. */ static error_t parse_opt(int key, char *arg, struct argp_state *state) { /* Get the input argument from argp_parse, which we know is a pointer to our arguments structure. */ - SDumpArguments *arguments = state->input; + struct arguments *arguments = state->input; wordexp_t full_path; switch (key) { @@ -223,13 +240,24 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { case 'P': arguments->port = atoi(arg); break; - // output file + case 'q': + arguments->mysqlFlag = atoi(arg); + break; + case 'v': + if (wordexp(arg, &full_path, 0) != 0) { + fprintf(stderr, "Invalid client vesion %s\n", arg); + return -1; + } + strcpy(arguments->cversion, full_path.we_wordv[0]); + wordfree(&full_path); + break; + // output file path case 'o': if (wordexp(arg, &full_path, 0) != 0) { fprintf(stderr, "Invalid path %s\n", arg); return -1; } - tstrncpy(arguments->output, full_path.we_wordv[0], TSDB_FILENAME_LEN); + strcpy(arguments->outpath, full_path.we_wordv[0]); wordfree(&full_path); break; case 'i': @@ -238,7 +266,7 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { fprintf(stderr, "Invalid path %s\n", arg); return -1; } - tstrncpy(arguments->input, full_path.we_wordv[0], TSDB_FILENAME_LEN); + strcpy(arguments->inpath, full_path.we_wordv[0]); wordfree(&full_path); break; case 'c': @@ -246,7 +274,7 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { fprintf(stderr, "Invalid path %s\n", arg); return -1; } - tstrncpy(configDir, full_path.we_wordv[0], TSDB_FILENAME_LEN); + strcpy(configDir, full_path.we_wordv[0]); wordfree(&full_path); break; case 'e': @@ -276,13 +304,19 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { case 'N': arguments->data_batch = atoi(arg); break; + case 'T': + arguments->table_batch = atoi(arg); + break; + case 't': + arguments->thread_num = atoi(arg); + break; case OPT_ABORT: arguments->abort = 1; break; case ARGP_KEY_ARG: - arguments->arg_list = &state->argv[state->next - 1]; + arguments->arg_list = &state->argv[state->next - 1]; arguments->arg_list_len = state->argc - state->next + 1; - state->next = state->argc; + state->next = state->argc; break; default: @@ -294,52 +328,70 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { /* Our argp parser. */ static struct argp argp = {options, parse_opt, args_doc, doc}; -TAOS *taos = NULL; -char *command = NULL; -char *lcommand = NULL; -char *buffer = NULL; - -int taosDumpOut(SDumpArguments *arguments); - -int taosDumpIn(SDumpArguments *arguments); - +int taosDumpOut(struct arguments *arguments); +int taosDumpIn(struct arguments *arguments); void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, FILE *fp); - -int taosDumpDb(SDbInfo *dbInfo, SDumpArguments *arguments, FILE *fp); - -void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, SDumpArguments *arguments, FILE *fp); - -void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, SDumpArguments *arguments, - FILE *fp); - -int32_t taosDumpTable(char *table, char *metric, SDumpArguments *arguments, FILE *fp); - -int32_t taosDumpMetric(char *metric, SDumpArguments *arguments, FILE *fp); - -int taosDumpTableData(FILE *fp, char *tbname, SDumpArguments *arguments); - -int taosCheckParam(SDumpArguments *arguments); - +int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *taosCon); +int32_t taosDumpStable(char *table, FILE *fp, TAOS* taosCon); +void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp); +void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, FILE *fp); +int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FILE *fp, TAOS* taosCon); +int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* taosCon); +int taosCheckParam(struct arguments *arguments); void taosFreeDbInfos(); +static void taosStartDumpOutWorkThreads(struct arguments* args, int32_t numOfThread, char *dbName); + +struct arguments tsArguments = { + // connection option + NULL, + "root", + "taosdata", + 0, + "", + 0, + // outpath and inpath + "", + "", + NULL, + // dump unit option + false, + false, + // dump format option + false, + false, + 0, + INT64_MAX, + 1, + 1, + false, + // other options + 5, + 0, + NULL, + 0, + false +}; + +int queryDB(TAOS *taos, char *command) { + TAOS_RES *pSql = NULL; + int32_t code = -1; + + pSql = taos_query(taos, command); + code = taos_errno(pSql); + if (code) { + fprintf(stderr, "sql error: %s, reason:%s\n", command, taos_errstr(pSql)); + } + taos_free_result(pSql); + return code; +} int main(int argc, char *argv[]) { - SDumpArguments arguments = { - // connection option - NULL, TSDB_DEFAULT_USER, TSDB_DEFAULT_PASS, 0, - // output file - DEFAULT_DUMP_FILE, DEFAULT_DUMP_FILE, NULL, - // dump unit option - false, false, - // dump format option - false, false, 0, INT64_MAX, 1, false, - // other options - 0, NULL, 0, false}; /* Parse our arguments; every option seen by parse_opt will be reflected in arguments. */ - argp_parse(&argp, argc, argv, 0, 0, &arguments); + argp_parse(&argp, argc, argv, 0, 0, &tsArguments); - if (arguments.abort) { + if (tsArguments.abort) { #ifndef _ALPINE error(10, 0, "ABORTED"); #else @@ -347,14 +399,48 @@ int main(int argc, char *argv[]) { #endif } - if (taosCheckParam(&arguments) < 0) { + printf("====== arguments config ======\n"); + { + printf("host: %s\n", tsArguments.host); + printf("user: %s\n", tsArguments.user); + printf("password: %s\n", tsArguments.password); + printf("port: %u\n", tsArguments.port); + printf("cversion: %s\n", tsArguments.cversion); + printf("mysqlFlag: %d", tsArguments.mysqlFlag); + printf("outpath: %s\n", tsArguments.outpath); + printf("inpath: %s\n", tsArguments.inpath); + printf("encode: %s\n", tsArguments.encode); + printf("all_databases: %d\n", tsArguments.all_databases); + printf("databases: %d\n", tsArguments.databases); + printf("schemaonly: %d\n", tsArguments.schemaonly); + printf("with_property: %d\n", tsArguments.with_property); + printf("start_time: %" PRId64 "\n", tsArguments.start_time); + printf("end_time: %" PRId64 "\n", tsArguments.end_time); + printf("data_batch: %d\n", tsArguments.data_batch); + printf("table_batch: %d\n", tsArguments.table_batch); + printf("allow_sys: %d\n", tsArguments.allow_sys); + printf("abort: %d\n", tsArguments.abort); + printf("isDumpIn: %d\n", tsArguments.isDumpIn); + printf("arg_list_len: %d\n", tsArguments.arg_list_len); + + for (int32_t i = 0; i < tsArguments.arg_list_len; i++) { + printf("arg_list[%d]: %s\n", i, tsArguments.arg_list[i]); + } + } + printf("==============================\n"); + + if (tsArguments.cversion[0] != 0){ + strcpy(version, tsArguments.cversion); + } + + if (taosCheckParam(&tsArguments) < 0) { exit(EXIT_FAILURE); } - if (arguments.isDumpIn) { - if (taosDumpIn(&arguments) < 0) return -1; + if (tsArguments.isDumpIn) { + if (taosDumpIn(&tsArguments) < 0) return -1; } else { - if (taosDumpOut(&arguments) < 0) return -1; + if (taosDumpOut(&tsArguments) < 0) return -1; } return 0; @@ -362,96 +448,214 @@ int main(int argc, char *argv[]) { void taosFreeDbInfos() { if (dbInfos == NULL) return; - for (int i = 0; i < MAX_DBS; i++) tfree(dbInfos[i]); + for (int i = 0; i < 128; i++) tfree(dbInfos[i]); tfree(dbInfos); } -int taosGetTableRecordInfo(char *table, STableRecordInfo *pTableRecordInfo) { +// check table is normal table or super table +int taosGetTableRecordInfo(char *table, STableRecordInfo *pTableRecordInfo, TAOS *taosCon) { TAOS_ROW row = NULL; bool isSet = false; + TAOS_RES *result = NULL; memset(pTableRecordInfo, 0, sizeof(STableRecordInfo)); - sprintf(command, "show tables like %s", table); - TAOS_RES *result = taos_query(taos, command);\ - int32_t code = taos_errno(result); + char* tempCommand = (char *)malloc(COMMAND_SIZE); + if (tempCommand == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + return -1; + } + sprintf(tempCommand, "show tables like %s", table); + + result = taos_query(taosCon, tempCommand); + int32_t code = taos_errno(result); + if (code != 0) { - fprintf(stderr, "failed to run command %s, error: %s\n", command, taos_errstr(result)); + fprintf(stderr, "failed to run command %s\n", tempCommand); + free(tempCommand); taos_free_result(result); return -1; } TAOS_FIELD *fields = taos_fetch_fields(result); - if ((row = taos_fetch_row(result)) != NULL) { + while ((row = taos_fetch_row(result)) != NULL) { isSet = true; pTableRecordInfo->isMetric = false; strncpy(pTableRecordInfo->tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes); strncpy(pTableRecordInfo->tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes); + break; } taos_free_result(result); result = NULL; - if (isSet) return 0; - - sprintf(command, "show stables like %s", table); - - result = taos_query(taos, command); + if (isSet) { + free(tempCommand); + return 0; + } + + sprintf(tempCommand, "show stables like %s", table); + + result = taos_query(taosCon, tempCommand); code = taos_errno(result); + if (code != 0) { - fprintf(stderr, "failed to run command %s, error: %s\n", command, taos_errstr(result)); + fprintf(stderr, "failed to run command %s\n", tempCommand); + free(tempCommand); taos_free_result(result); return -1; } - if ((row = taos_fetch_row(result)) != NULL) { + while ((row = taos_fetch_row(result)) != NULL) { isSet = true; pTableRecordInfo->isMetric = true; - tstrncpy(pTableRecordInfo->tableRecord.metric, table, TSDB_TABLE_NAME_LEN); + strcpy(pTableRecordInfo->tableRecord.metric, table); + break; } taos_free_result(result); result = NULL; - if (isSet) return 0; - + if (isSet) { + free(tempCommand); + return 0; + } fprintf(stderr, "invalid table/metric %s\n", table); + free(tempCommand); return -1; } -int taosDumpOut(SDumpArguments *arguments) { - TAOS_ROW row; - TAOS_RES* result = NULL; - char *temp = NULL; - FILE *fp = NULL; - int count = 0; - STableRecordInfo tableRecordInfo; - fp = fopen(arguments->output, "w"); - if (fp == NULL) { - fprintf(stderr, "failed to open file %s\n", arguments->output); +int32_t taosSaveAllNormalTableToTempFile(TAOS *taosCon, char*meter, char* metric, int* fd) { + STableRecord tableRecord; + + if (-1 == *fd) { + *fd = open(".tables.tmp.0", O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); + if (*fd == -1) { + fprintf(stderr, "failed to open temp file: .tables.tmp.0\n"); + return -1; + } + } + + memset(tableRecord.name, 0, sizeof(STableRecord)); + strcpy(tableRecord.name, meter); + strcpy(tableRecord.metric, metric); + + twrite(*fd, &tableRecord, sizeof(STableRecord)); + return 0; +} + + +int32_t taosSaveTableOfMetricToTempFile(TAOS *taosCon, char* metric, struct arguments *arguments, int32_t* totalNumOfThread) { + TAOS_ROW row; + int fd = -1; + STableRecord tableRecord; + + char* tmpCommand = (char *)malloc(COMMAND_SIZE); + if (tmpCommand == NULL) { + fprintf(stderr, "failed to allocate memory\n"); return -1; } - dbInfos = (SDbInfo **)calloc(MAX_DBS, sizeof(SDbInfo *)); + sprintf(tmpCommand, "select tbname from %s", metric); + + TAOS_RES *result = taos_query(taosCon, tmpCommand); + int32_t code = taos_errno(result); + if (code != 0) { + fprintf(stderr, "failed to run command %s\n", tmpCommand); + free(tmpCommand); + taos_free_result(result); + return -1; + } + + TAOS_FIELD *fields = taos_fetch_fields(result); + + int32_t numOfTable = 0; + int32_t numOfThread = *totalNumOfThread; + char tmpFileName[TSDB_FILENAME_LEN + 1]; + while ((row = taos_fetch_row(result)) != NULL) { + if (0 == numOfTable) { + memset(tmpFileName, 0, TSDB_FILENAME_LEN); + sprintf(tmpFileName, ".tables.tmp.%d", numOfThread); + fd = open(tmpFileName, O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); + if (fd == -1) { + fprintf(stderr, "failed to open temp file: %s\n", tmpFileName); + taos_free_result(result); + for (int32_t loopCnt = 0; loopCnt < numOfThread; loopCnt++) { + sprintf(tmpFileName, ".tables.tmp.%d", loopCnt); + remove(tmpFileName); + } + free(tmpCommand); + return -1; + } + + numOfThread++; + } + + memset(tableRecord.name, 0, sizeof(STableRecord)); + strncpy(tableRecord.name, (char *)row[0], fields[0].bytes); + strcpy(tableRecord.metric, metric); + + twrite(fd, &tableRecord, sizeof(STableRecord)); + + numOfTable++; + + if (numOfTable >= arguments->table_batch) { + numOfTable = 0; + tclose(fd); + fd = -1; + } + } + tclose(fd); + fd = -1; + taos_free_result(result); + + *totalNumOfThread = numOfThread; + + free(tmpCommand); + + return 0; +} + +int taosDumpOut(struct arguments *arguments) { + TAOS *taos = NULL; + TAOS_RES *result = NULL; + char *command = NULL; + + TAOS_ROW row; + FILE *fp = NULL; + int32_t count = 0; + STableRecordInfo tableRecordInfo; + + char tmpBuf[TSDB_FILENAME_LEN+9] = {0}; + if (arguments->outpath[0] != 0) { + sprintf(tmpBuf, "%s/dbs.sql", arguments->outpath); + } else { + sprintf(tmpBuf, "dbs.sql"); + } + + fp = fopen(tmpBuf, "w"); + if (fp == NULL) { + fprintf(stderr, "failed to open file %s\n", tmpBuf); + return -1; + } + + dbInfos = (SDbInfo **)calloc(128, sizeof(SDbInfo *)); if (dbInfos == NULL) { fprintf(stderr, "failed to allocate memory\n"); goto _exit_failure; } - temp = (char *)malloc(2 * COMMAND_SIZE); - if (temp == NULL) { + command = (char *)malloc(COMMAND_SIZE); + if (command == NULL) { fprintf(stderr, "failed to allocate memory\n"); goto _exit_failure; } - command = temp; - buffer = command + COMMAND_SIZE; - /* Connect to server */ taos = taos_connect(arguments->host, arguments->user, arguments->password, NULL, arguments->port); if (taos == NULL) { @@ -465,29 +669,30 @@ int taosDumpOut(SDumpArguments *arguments) { taosDumpCharset(fp); sprintf(command, "show databases"); - result = taos_query(taos, command); + result = taos_query(taos, command); int32_t code = taos_errno(result); + if (code != 0) { - fprintf(stderr, "failed to run command: %s, reason: %s\n", command, taos_errstr(result)); - taos_free_result(result); + fprintf(stderr, "failed to run command: %s, reason: %s\n", command, taos_errstr(taos)); goto _exit_failure; } TAOS_FIELD *fields = taos_fetch_fields(result); while ((row = taos_fetch_row(result)) != NULL) { + // sys database name : 'monitor', but subsequent version changed to 'log' if (strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "monitor", fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0 && (!arguments->allow_sys)) continue; - if (arguments->databases) { + if (arguments->databases) { // input multi dbs for (int i = 0; arguments->arg_list[i]; i++) { if (strncasecmp(arguments->arg_list[i], (char *)row[TSDB_SHOW_DB_NAME_INDEX], fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) goto _dump_db_point; } continue; - } else if (!arguments->all_databases) { + } else if (!arguments->all_databases) { // only input one db if (strncasecmp(arguments->arg_list[0], (char *)row[TSDB_SHOW_DB_NAME_INDEX], fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) goto _dump_db_point; @@ -504,19 +709,19 @@ int taosDumpOut(SDumpArguments *arguments) { } strncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], fields[TSDB_SHOW_DB_NAME_INDEX].bytes); - if (strcmp(arguments->user, TSDB_DEFAULT_USER) == 0) { - dbInfos[count]->replica = (int)(*((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX])); - dbInfos[count]->days = (int)(*((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX])); - dbInfos[count]->keep = *((int *)row[TSDB_SHOW_DB_KEEP_INDEX]); - dbInfos[count]->tables = *((int *)row[TSDB_SHOW_DB_TABLES_INDEX]); - dbInfos[count]->rows = *((int *)row[TSDB_SHOW_DB_ROWS_INDEX]); - dbInfos[count]->cache = *((int *)row[TSDB_SHOW_DB_CACHE_INDEX]); - dbInfos[count]->ablocks = *((int *)row[TSDB_SHOW_DB_ABLOCKS_INDEX]); - dbInfos[count]->tblocks = (int)(*((int16_t *)row[TSDB_SHOW_DB_TBLOCKS_INDEX])); - dbInfos[count]->ctime = *((int *)row[TSDB_SHOW_DB_CTIME_INDEX]); - dbInfos[count]->clog = (int)(*((int8_t *)row[TSDB_SHOW_DB_CLOG_INDEX])); - dbInfos[count]->comp = (int)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX])); - } + #if 0 + dbInfos[count]->replica = (int)(*((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX])); + dbInfos[count]->days = (int)(*((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX])); + dbInfos[count]->keep = *((int *)row[TSDB_SHOW_DB_KEEP_INDEX]); + dbInfos[count]->tables = *((int *)row[TSDB_SHOW_DB_TABLES_INDEX]); + dbInfos[count]->rows = *((int *)row[TSDB_SHOW_DB_ROWS_INDEX]); + dbInfos[count]->cache = *((int *)row[TSDB_SHOW_DB_CACHE_INDEX]); + dbInfos[count]->ablocks = *((int *)row[TSDB_SHOW_DB_ABLOCKS_INDEX]); + dbInfos[count]->tblocks = (int)(*((int16_t *)row[TSDB_SHOW_DB_TBLOCKS_INDEX])); + dbInfos[count]->ctime = *((int *)row[TSDB_SHOW_DB_CTIME_INDEX]); + dbInfos[count]->clog = (int)(*((int8_t *)row[TSDB_SHOW_DB_CLOG_INDEX])); + dbInfos[count]->comp = (int)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX])); +#endif count++; @@ -528,42 +733,71 @@ int taosDumpOut(SDumpArguments *arguments) { } } - // taos_free_result(result); - if (count == 0) { fprintf(stderr, "No databases valid to dump\n"); goto _exit_failure; } - if (arguments->databases || arguments->all_databases) { + if (arguments->databases || arguments->all_databases) { // case: taosdump --databases dbx dby ... OR taosdump --all-databases for (int i = 0; i < count; i++) { - (void)taosDumpDb(dbInfos[i], arguments, fp); + taosDumpDb(dbInfos[i], arguments, fp, taos); } } else { - if (arguments->arg_list_len == 1) { - (void)taosDumpDb(dbInfos[0], arguments, fp); - } else { + if (arguments->arg_list_len == 1) { // case: taosdump + taosDumpDb(dbInfos[0], arguments, fp, taos); + } else { // case: taosdump tablex tabley ... taosDumpCreateDbClause(dbInfos[0], arguments->with_property, fp); sprintf(command, "use %s", dbInfos[0]->name); - if (taos_query(taos, command) == NULL ) { + + result = taos_query(taos, command); + int32_t code = taos_errno(result); + if (code != 0) { fprintf(stderr, "invalid database %s\n", dbInfos[0]->name); goto _exit_failure; } fprintf(fp, "USE %s;\n\n", dbInfos[0]->name); + int32_t totalNumOfThread = 1; // 0: all normal talbe into .tables.tmp.0 + int normalTblFd = -1; + int32_t retCode; for (int i = 1; arguments->arg_list[i]; i++) { - if (taosGetTableRecordInfo(arguments->arg_list[i], &tableRecordInfo) < 0) { - fprintf(stderr, "invalide table %s\n", arguments->arg_list[i]); + if (taosGetTableRecordInfo(arguments->arg_list[i], &tableRecordInfo, taos) < 0) { + fprintf(stderr, "input the invalide table %s\n", arguments->arg_list[i]); continue; } - if (tableRecordInfo.isMetric) { // dump whole metric - (void)taosDumpMetric(tableRecordInfo.tableRecord.metric, arguments, fp); - } else { // dump MTable and NTable - (void)taosDumpTable(tableRecordInfo.tableRecord.name, tableRecordInfo.tableRecord.metric, arguments, fp); + if (tableRecordInfo.isMetric) { // dump all table of this metric + (void)taosDumpStable(tableRecordInfo.tableRecord.metric, fp, taos); + retCode = taosSaveTableOfMetricToTempFile(taos, tableRecordInfo.tableRecord.metric, arguments, &totalNumOfThread); + } else { + if (tableRecordInfo.tableRecord.metric[0] != '\0') { // dump this sub table and it's metric + (void)taosDumpStable(tableRecordInfo.tableRecord.metric, fp, taos); + } + retCode = taosSaveAllNormalTableToTempFile(taos, tableRecordInfo.tableRecord.name, tableRecordInfo.tableRecord.metric, &normalTblFd); } + + if (retCode < 0) { + if (-1 != normalTblFd){ + tclose(normalTblFd); + } + goto _clean_tmp_file; + } + } + + if (-1 != normalTblFd){ + tclose(normalTblFd); + } + + // start multi threads to dumpout + taosStartDumpOutWorkThreads(arguments, totalNumOfThread, dbInfos[0]->name); + + char tmpFileName[TSDB_FILENAME_LEN + 1]; + _clean_tmp_file: + for (int loopCnt = 0; loopCnt < totalNumOfThread; loopCnt++) { + sprintf(tmpFileName, ".tables.tmp.%d", loopCnt); + remove(tmpFileName); } } } @@ -572,21 +806,120 @@ int taosDumpOut(SDumpArguments *arguments) { fclose(fp); taos_close(taos); taos_free_result(result); - tfree(temp); - taosFreeDbInfos(); + tfree(command); + taosFreeDbInfos(); + fprintf(stderr, "dump out rows: %" PRId64 "\n", totalDumpOutRows); return 0; _exit_failure: fclose(fp); taos_close(taos); taos_free_result(result); - tfree(temp); + tfree(command); taosFreeDbInfos(); + fprintf(stderr, "dump out rows: %" PRId64 "\n", totalDumpOutRows); return -1; } +int taosGetTableDes(char *table, STableDef *tableDes, TAOS* taosCon) { + TAOS_ROW row = NULL; + TAOS_RES *tmpResult = NULL; + int count = 0; + + char* tempCommand = (char *)malloc(COMMAND_SIZE); + if (tempCommand == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + return -1; + } + + sprintf(tempCommand, "describe %s", table); + + tmpResult = taos_query(taosCon, tempCommand); + int32_t code = taos_errno(tmpResult); + if (code != 0) { + fprintf(stderr, "failed to run command %s\n", tempCommand); + free(tempCommand); + taos_free_result(tmpResult); + return -1; + } + + TAOS_FIELD *fields = taos_fetch_fields(tmpResult); + + strcpy(tableDes->name, table); + + while ((row = taos_fetch_row(tmpResult)) != NULL) { + strncpy(tableDes->cols[count].field, (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], + fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes); + strncpy(tableDes->cols[count].type, (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], + fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes); + tableDes->cols[count].length = *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); + strncpy(tableDes->cols[count].note, (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], + fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes); + + count++; + } + + taos_free_result(tmpResult); + tmpResult = NULL; + + free(tempCommand); + + return count; +} + +int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FILE *fp, TAOS* taosCon) { + int count = 0; + + STableDef *tableDes = (STableDef *)calloc(1, sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS); + + if (metric != NULL && metric[0] != '\0') { // dump table schema which is created by using super table + /* + count = taosGetTableDes(metric, tableDes, taosCon); + + if (count < 0) { + free(tableDes); + return -1; + } + + taosDumpCreateTableClause(tableDes, count, fp); + + memset(tableDes, 0, sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS); + */ + + count = taosGetTableDes(table, tableDes, taosCon); + + if (count < 0) { + free(tableDes); + return -1; + } + + taosDumpCreateMTableClause(tableDes, metric, count, fp); + + } else { // dump table definition + count = taosGetTableDes(table, tableDes, taosCon); + + if (count < 0) { + free(tableDes); + return -1; + } + + taosDumpCreateTableClause(tableDes, count, fp); + } + + free(tableDes); + + return taosDumpTableData(fp, table, arguments, taosCon); +} + void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, FILE *fp) { - char *pstr = buffer; + + char* tmpCommand = (char *)malloc(COMMAND_SIZE); + if (tmpCommand == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + return; + } + + char *pstr = tmpCommand; pstr += sprintf(pstr, "CREATE DATABASE IF NOT EXISTS %s", dbInfo->name); if (isDumpProperty) { @@ -596,78 +929,304 @@ void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, FILE *fp) { dbInfo->ablocks, dbInfo->tblocks, dbInfo->ctime, dbInfo->clog, dbInfo->comp); } - fprintf(fp, "%s\n\n", buffer); + pstr += sprintf(pstr, ";"); + + fprintf(fp, "%s\n\n", tmpCommand); + free(tmpCommand); } -int taosDumpDb(SDbInfo *dbInfo, SDumpArguments *arguments, FILE *fp) { +void* taosDumpOutWorkThreadFp(void *arg) +{ + SThreadParaObj *pThread = (SThreadParaObj*)arg; + STableRecord tableRecord; + int fd; + + char tmpFileName[TSDB_FILENAME_LEN*4] = {0}; + sprintf(tmpFileName, ".tables.tmp.%d", pThread->threadIndex); + fd = open(tmpFileName, O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); + if (fd == -1) { + fprintf(stderr, "taosDumpTableFp() failed to open temp file: %s\n", tmpFileName); + return NULL; + } + + FILE *fp = NULL; + memset(tmpFileName, 0, TSDB_FILENAME_LEN + 128); + + if (tsArguments.outpath[0] != 0) { + sprintf(tmpFileName, "%s/%s.tables.%d.sql", tsArguments.outpath, pThread->dbName, pThread->threadIndex); + } else { + sprintf(tmpFileName, "%s.tables.%d.sql", pThread->dbName, pThread->threadIndex); + } + + fp = fopen(tmpFileName, "w"); + if (fp == NULL) { + fprintf(stderr, "failed to open file %s\n", tmpFileName); + return NULL; + } + + memset(tmpFileName, 0, TSDB_FILENAME_LEN); + sprintf(tmpFileName, "use %s", pThread->dbName); + + TAOS_RES* tmpResult = taos_query(pThread->taosCon, tmpFileName); + int32_t code = taos_errno(tmpResult); + if (code != 0) { + fprintf(stderr, "invalid database %s\n", pThread->dbName); + taos_free_result(tmpResult); + return NULL; + } + + fprintf(fp, "USE %s\n\n", pThread->dbName); + while (read(fd, &tableRecord, sizeof(STableRecord)) > 0) { + taosDumpTable(tableRecord.name, tableRecord.metric, &tsArguments, fp, pThread->taosCon); + } + + taos_free_result(tmpResult); + tclose(fd); + fclose(fp); + + return NULL; +} + +static void taosStartDumpOutWorkThreads(struct arguments* args, int32_t numOfThread, char *dbName) +{ + pthread_attr_t thattr; + SThreadParaObj *threadObj = (SThreadParaObj *)calloc(numOfThread, sizeof(SThreadParaObj)); + for (int t = 0; t < numOfThread; ++t) { + SThreadParaObj *pThread = threadObj + t; + pThread->threadIndex = t; + pThread->totalThreads = numOfThread; + strcpy(pThread->dbName, dbName); + pThread->taosCon = taos_connect(args->host, args->user, args->password, NULL, args->port); + + if (pThread->taosCon == NULL) { + fprintf(stderr, "ERROR: thread:%d failed connect to TDengine, error:%s\n", pThread->threadIndex, taos_errstr(pThread->taosCon)); + exit(0); + } + + pthread_attr_init(&thattr); + pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); + + if (pthread_create(&(pThread->threadID), &thattr, taosDumpOutWorkThreadFp, (void*)pThread) != 0) { + fprintf(stderr, "ERROR: thread:%d failed to start\n", pThread->threadIndex); + exit(0); + } + } + + for (int32_t t = 0; t < numOfThread; ++t) { + pthread_join(threadObj[t].threadID, NULL); + } + + for (int32_t t = 0; t < numOfThread; ++t) { + taos_close(threadObj[t].taosCon); + } + free(threadObj); +} + + + +int32_t taosDumpStable(char *table, FILE *fp, TAOS* taosCon) { + int count = 0; + + STableDef *tableDes = (STableDef *)calloc(1, sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS); + if (NULL == tableDes) { + fprintf(stderr, "failed to allocate memory\n"); + exit(-1); + } + + count = taosGetTableDes(table, tableDes, taosCon); + + if (count < 0) { + free(tableDes); + fprintf(stderr, "failed to get stable schema\n"); + exit(-1); + } + + taosDumpCreateTableClause(tableDes, count, fp); + + free(tableDes); + return 0; +} + + +int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp) +{ + TAOS_ROW row; + int fd = -1; + STableRecord tableRecord; + + char* tmpCommand = (char *)malloc(COMMAND_SIZE); + if (tmpCommand == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + exit(-1); + } + + sprintf(tmpCommand, "use %s", dbName); + + TAOS_RES* tmpResult = taos_query(taosCon, tmpCommand); + int32_t code = taos_errno(tmpResult); + if (code != 0) { + fprintf(stderr, "invalid database %s, error: %s\n", dbName, taos_errstr(taosCon)); + free(tmpCommand); + taos_free_result(tmpResult); + exit(-1); + } + + taos_free_result(tmpResult); + + sprintf(tmpCommand, "show stables"); + + tmpResult = taos_query(taosCon, tmpCommand); + code = taos_errno(tmpResult); + if (code != 0) { + fprintf(stderr, "failed to run command %s, error: %s\n", tmpCommand, taos_errstr(taosCon)); + free(tmpCommand); + taos_free_result(tmpResult); + exit(-1); + } + taos_free_result(tmpResult); + + TAOS_FIELD *fields = taos_fetch_fields(tmpResult); + + char tmpFileName[TSDB_FILENAME_LEN + 1]; + memset(tmpFileName, 0, TSDB_FILENAME_LEN); + sprintf(tmpFileName, ".stables.tmp"); + fd = open(tmpFileName, O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); + if (fd == -1) { + fprintf(stderr, "failed to open temp file: %s\n", tmpFileName); + taos_free_result(tmpResult); + free(tmpCommand); + remove(".stables.tmp"); + exit(-1); + } + + while ((row = taos_fetch_row(tmpResult)) != NULL) { + memset(&tableRecord, 0, sizeof(STableRecord)); + strncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes); + twrite(fd, &tableRecord, sizeof(STableRecord)); + } + + taos_free_result(tmpResult); + lseek(fd, 0, SEEK_SET); + + while (read(fd, &tableRecord, sizeof(STableRecord)) > 0) { + (void)taosDumpStable(tableRecord.name, fp, taosCon); + } + + tclose(fd); + remove(".stables.tmp"); + + free(tmpCommand); + return 0; +} + + +int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *taosCon) { TAOS_ROW row; int fd = -1; STableRecord tableRecord; taosDumpCreateDbClause(dbInfo, arguments->with_property, fp); - sprintf(command, "use %s", dbInfo->name); - if (taos_errno(taos_query(taos, command)) != 0) { - fprintf(stderr, "invalid database %s\n", dbInfo->name); + char* tmpCommand = (char *)malloc(COMMAND_SIZE); + if (tmpCommand == NULL) { + fprintf(stderr, "failed to allocate memory\n"); return -1; } + sprintf(tmpCommand, "use %s", dbInfo->name); + + TAOS_RES* tmpResult = taos_query(taosCon, tmpCommand); + int32_t code = taos_errno(tmpResult); + if (code != 0) { + fprintf(stderr, "invalid database %s\n", dbInfo->name); + free(tmpCommand); + taos_free_result(tmpResult); + return -1; + } + taos_free_result(tmpResult); + fprintf(fp, "USE %s\n\n", dbInfo->name); + + (void)taosDumpCreateSuperTableClause(taosCon, dbInfo->name, fp); - sprintf(command, "show tables"); - TAOS_RES* result = taos_query(taos,command); - int32_t code = taos_errno(result); + sprintf(tmpCommand, "show tables"); + + tmpResult = taos_query(taosCon, tmpCommand); + code = taos_errno(tmpResult); if (code != 0) { - fprintf(stderr, "failed to run command %s, error: %s\n", command, taos_errstr(result)); - taos_free_result(result); + fprintf(stderr, "failed to run command %s\n", tmpCommand); + free(tmpCommand); + taos_free_result(tmpResult); return -1; } - TAOS_FIELD *fields = taos_fetch_fields(result); + TAOS_FIELD *fields = taos_fetch_fields(tmpResult); - fd = open(".table.tmp", O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); - if (fd == -1) { - fprintf(stderr, "failed to open temp file\n"); - taos_free_result(result); - return -1; - } + int32_t numOfTable = 0; + int32_t numOfThread = 0; + char tmpFileName[TSDB_FILENAME_LEN + 1]; + while ((row = taos_fetch_row(tmpResult)) != NULL) { + if (0 == numOfTable) { + memset(tmpFileName, 0, TSDB_FILENAME_LEN); + sprintf(tmpFileName, ".tables.tmp.%d", numOfThread); + fd = open(tmpFileName, O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); + if (fd == -1) { + fprintf(stderr, "failed to open temp file: %s\n", tmpFileName); + taos_free_result(tmpResult); + for (int32_t loopCnt = 0; loopCnt < numOfThread; loopCnt++) { + sprintf(tmpFileName, ".tables.tmp.%d", loopCnt); + remove(tmpFileName); + } + free(tmpCommand); + return -1; + } - while ((row = taos_fetch_row(result)) != NULL) { + numOfThread++; + } + memset(&tableRecord, 0, sizeof(STableRecord)); strncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes); strncpy(tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes); twrite(fd, &tableRecord, sizeof(STableRecord)); + + numOfTable++; + + if (numOfTable >= arguments->table_batch) { + numOfTable = 0; + tclose(fd); + fd = -1; + } } + tclose(fd); + fd = -1; + taos_free_result(tmpResult); - taos_free_result(result); - - (void)lseek(fd, 0, SEEK_SET); - - STableRecord tableInfo; - while (1) { - memset(&tableInfo, 0, sizeof(STableRecord)); - ssize_t ret = read(fd, &tableInfo, sizeof(STableRecord)); - if (ret <= 0) break; - - tableInfo.name[sizeof(tableInfo.name) - 1] = 0; - tableInfo.metric[sizeof(tableInfo.metric) - 1] = 0; - taosDumpTable(tableInfo.name, tableInfo.metric, arguments, fp); + // start multi threads to dumpout + taosStartDumpOutWorkThreads(arguments, numOfThread, dbInfo->name); + for (int loopCnt = 0; loopCnt < numOfThread; loopCnt++) { + sprintf(tmpFileName, ".tables.tmp.%d", loopCnt); + remove(tmpFileName); } - - close(fd); - (void)remove(".table.tmp"); + + free(tmpCommand); return 0; } -void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, SDumpArguments *arguments, FILE *fp) { - char *pstr = NULL; - pstr = buffer; +void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp) { int counter = 0; int count_temp = 0; - pstr += sprintf(buffer, "CREATE TABLE IF NOT EXISTS %s", tableDes->name); + char* tmpBuf = (char *)malloc(COMMAND_SIZE); + if (tmpBuf == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + return; + } + + char* pstr = tmpBuf; + + pstr += sprintf(tmpBuf, "CREATE TABLE IF NOT EXISTS %s", tableDes->name); for (; counter < numOfCols; counter++) { if (tableDes->cols[counter].note[0] != '\0') break; @@ -699,19 +1258,27 @@ void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, SDumpArgument } } - pstr += sprintf(pstr, ")"); + pstr += sprintf(pstr, ");"); - fprintf(fp, "%s\n\n", buffer); + fprintf(fp, "%s\n", tmpBuf); + + free(tmpBuf); } -void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, SDumpArguments *arguments, - FILE *fp) { - char *pstr = NULL; - pstr = buffer; +void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, FILE *fp) { int counter = 0; int count_temp = 0; - pstr += sprintf(buffer, "CREATE TABLE IF NOT EXISTS %s USING %s TAGS (", tableDes->name, metric); + char* tmpBuf = (char *)malloc(COMMAND_SIZE); + if (tmpBuf == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + return; + } + + char *pstr = NULL; + pstr = tmpBuf; + + pstr += sprintf(tmpBuf, "CREATE TABLE IF NOT EXISTS %s USING %s TAGS (", tableDes->name, metric); for (; counter < numOfCols; counter++) { if (tableDes->cols[counter].note[0] != '\0') break; @@ -721,54 +1288,6 @@ void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols count_temp = counter; for (; counter < numOfCols; counter++) { - TAOS_ROW row = NULL; - - sprintf(command, "select %s from %s limit 1", tableDes->cols[counter].field, tableDes->name); - - TAOS_RES* result = taos_query(taos, command); - int32_t code = taos_errno(result); - if (code != 0) { - fprintf(stderr, "failed to run command %s, error: %s\n", command, taos_errstr(result)); - return; - } - - TAOS_FIELD *fields = taos_fetch_fields(result); - - row = taos_fetch_row(result); - switch (fields[0].type) { - case TSDB_DATA_TYPE_BOOL: - sprintf(tableDes->cols[counter].note, "%d", ((((int)(*((char *)row[0]))) == 1) ? 1 : 0)); - break; - case TSDB_DATA_TYPE_TINYINT: - sprintf(tableDes->cols[counter].note, "%d", (int)(*((char *)row[0]))); - break; - case TSDB_DATA_TYPE_SMALLINT: - sprintf(tableDes->cols[counter].note, "%d", (int)(*((short *)row[0]))); - break; - case TSDB_DATA_TYPE_INT: - sprintf(tableDes->cols[counter].note, "%d", *((int *)row[0])); - break; - case TSDB_DATA_TYPE_BIGINT: - sprintf(tableDes->cols[counter].note, "%" PRId64 "", *((int64_t *)row[0])); - break; - case TSDB_DATA_TYPE_FLOAT: - sprintf(tableDes->cols[counter].note, "%f", GET_FLOAT_VAL(row[0])); - break; - case TSDB_DATA_TYPE_DOUBLE: - sprintf(tableDes->cols[counter].note, "%f", GET_DOUBLE_VAL(row[0])); - break; - case TSDB_DATA_TYPE_TIMESTAMP: - sprintf(tableDes->cols[counter].note, "%" PRId64 "", *(int64_t *)row[0]); - break; - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_NCHAR: - default: - strncpy(tableDes->cols[counter].note, (char *)row[0], fields[0].bytes); - break; - } - - taos_free_result(result); - if (counter != count_temp) { if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || strcasecmp(tableDes->cols[counter].type, "nchar") == 0) { @@ -792,193 +1311,95 @@ void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols /* } */ } - pstr += sprintf(pstr, ")"); + pstr += sprintf(pstr, ");"); - fprintf(fp, "%s\n\n", buffer); + fprintf(fp, "%s\n", tmpBuf); + free(tmpBuf); } -int taosGetTableDes(char *table, STableDef *tableDes) { - TAOS_ROW row = NULL; - int count = 0; - - sprintf(command, "describe %s", table); - - TAOS_RES* result = taos_query(taos, command); - int32_t code = taos_errno(result); - if (code != 0) { - fprintf(stderr, "failed to run command %s, error: %s\n", command, taos_errstr(result)); - taos_free_result(result); - return -1; - } - - TAOS_FIELD *fields = taos_fetch_fields(result); - - tstrncpy(tableDes->name, table, TSDB_COL_NAME_LEN); - - while ((row = taos_fetch_row(result)) != NULL) { - strncpy(tableDes->cols[count].field, (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], - fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes); - strncpy(tableDes->cols[count].type, (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes); - tableDes->cols[count].length = *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); - strncpy(tableDes->cols[count].note, (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], - fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes); - - count++; - } - - taos_free_result(result); - result = NULL; - - return count; -} - -int32_t taosDumpTable(char *table, char *metric, SDumpArguments *arguments, FILE *fp) { - int count = 0; - - STableDef *tableDes = (STableDef *)calloc(1, sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS); - - if (metric != NULL && metric[0] != '\0') { // dump metric definition - count = taosGetTableDes(metric, tableDes); - - if (count < 0) { - free(tableDes); - return -1; - } - - taosDumpCreateTableClause(tableDes, count, arguments, fp); - - memset(tableDes, 0, sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS); - - count = taosGetTableDes(table, tableDes); - - if (count < 0) { - free(tableDes); - return -1; - } - - taosDumpCreateMTableClause(tableDes, metric, count, arguments, fp); - - } else { // dump table definition - count = taosGetTableDes(table, tableDes); - - if (count < 0) { - free(tableDes); - return -1; - } - - taosDumpCreateTableClause(tableDes, count, arguments, fp); - } - - free(tableDes); - - return taosDumpTableData(fp, table, arguments); -} - -int32_t taosDumpMetric(char *metric, SDumpArguments *arguments, FILE *fp) { - TAOS_ROW row = NULL; - int fd = -1; - STableRecord tableRecord; - - //tstrncpy(tableRecord.metric, metric, TSDB_TABLE_NAME_LEN); - - sprintf(command, "select tbname from %s", metric); - TAOS_RES* result = taos_query(taos, command); - int32_t code = taos_errno(result); - if (code != 0) { - fprintf(stderr, "failed to run command %s, error: %s\n", command, taos_errstr(result)); - taos_free_result(result); - return -1; - } - - fd = open(".table.tmp", O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); - if (fd < 0) { - fprintf(stderr, "failed to open temp file"); - return -1; - } - - TAOS_FIELD *fields = taos_fetch_fields(result); - - while ((row = taos_fetch_row(result)) != NULL) { - memset(&tableRecord, 0, sizeof(STableRecord)); - tstrncpy(tableRecord.name, (char *)row[0], fields[0].bytes); - tstrncpy(tableRecord.metric, metric, TSDB_TABLE_NAME_LEN); - twrite(fd, &tableRecord, sizeof(STableRecord)); - } - - taos_free_result(result); - result = NULL; - - (void)lseek(fd, 0, SEEK_SET); - - //STableRecord tableInfo; - char tableName[TSDB_TABLE_NAME_LEN] ; - char metricName[TSDB_TABLE_NAME_LEN]; - ssize_t ret; - while (1) { - //memset(&tableInfo, 0, sizeof(STableRecord)); - memset(tableName, 0, TSDB_TABLE_NAME_LEN); - memset(metricName, 0, TSDB_TABLE_NAME_LEN); - //ssize_t ret = read(fd, &tableInfo, sizeof(STableRecord)); - //if (ret <= 0) break; - ret = read(fd, tableName, TSDB_TABLE_NAME_LEN); - if (ret <= 0) break; - - ret = read(fd, metricName, TSDB_TABLE_NAME_LEN); - if (ret <= 0) break; - - //tableInfo.name[sizeof(tableInfo.name) - 1] = 0; - //tableInfo.metric[sizeof(tableInfo.metric) - 1] = 0; - //taosDumpTable(tableInfo.name, tableInfo.metric, arguments, fp); - //tstrncpy(tableName, tableInfo.name, TSDB_TABLE_NAME_LEN-1); - //tstrncpy(metricName, tableInfo.metric, TSDB_TABLE_NAME_LEN-1); - taosDumpTable(tableName, metricName, arguments, fp); - } - - close(fd); - (void)remove(".table.tmp"); - - return 0; -} - -int taosDumpTableData(FILE *fp, char *tbname, SDumpArguments *arguments) { +int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* taosCon) { /* char temp[MAX_COMMAND_SIZE] = "\0"; */ + int64_t totalRows = 0; int count = 0; char *pstr = NULL; TAOS_ROW row = NULL; int numFields = 0; char *tbuf = NULL; - if (arguments->schemaonly) return 0; - - sprintf(command, "select * from %s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc", tbname, arguments->start_time, - arguments->end_time); - - TAOS_RES* result = taos_query(taos, command); - int32_t code = taos_errno(result); - if (code != 0) { - fprintf(stderr, "failed to run command %s, reason: %s\n", command, taos_errstr(result)); - taos_free_result(result); + char* tmpCommand = (char *)calloc(1, COMMAND_SIZE); + if (tmpCommand == NULL) { + fprintf(stderr, "failed to allocate memory\n"); return -1; } - numFields = taos_field_count(result); + char* tmpBuffer = (char *)calloc(1, COMMAND_SIZE); + if (tmpBuffer == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + free(tmpCommand); + return -1; + } + + pstr = tmpBuffer; + + if (arguments->schemaonly) { + free(tmpCommand); + free(tmpBuffer); + return 0; + } + + sprintf(tmpCommand, + "select * from %s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc", + tbname, + arguments->start_time, + arguments->end_time); + + TAOS_RES* tmpResult = taos_query(taosCon, tmpCommand); + int32_t code = taos_errno(tmpResult); + if (code != 0) { + fprintf(stderr, "failed to run command %s, reason: %s\n", tmpCommand, taos_errstr(taosCon)); + free(tmpCommand); + free(tmpBuffer); + taos_free_result(tmpResult); + return -1; + } + + numFields = taos_field_count(taosCon); assert(numFields > 0); - TAOS_FIELD *fields = taos_fetch_fields(result); + TAOS_FIELD *fields = taos_fetch_fields(tmpResult); tbuf = (char *)malloc(COMMAND_SIZE); if (tbuf == NULL) { fprintf(stderr, "No enough memory\n"); + free(tmpCommand); + free(tmpBuffer); + taos_free_result(tmpResult); return -1; } + char sqlStr[8] = "\0"; + if (arguments->mysqlFlag) { + sprintf(sqlStr, "INSERT"); + } else { + sprintf(sqlStr, "IMPORT"); + } + + int rowFlag = 0; count = 0; - while ((row = taos_fetch_row(result)) != NULL) { - pstr = buffer; + while ((row = taos_fetch_row(tmpResult)) != NULL) { + pstr = tmpBuffer; if (count == 0) { - pstr += sprintf(pstr, "INSERT INTO %s VALUES (", tbname); - } else { - pstr += sprintf(pstr, "("); + pstr += sprintf(pstr, "%s INTO %s VALUES (", sqlStr, tbname); + } else { + if (arguments->mysqlFlag) { + if (0 == rowFlag) { + pstr += sprintf(pstr, "("); + rowFlag++; + } else { + pstr += sprintf(pstr, ", ("); + } + } else { + pstr += sprintf(pstr, "("); + } } for (int col = 0; col < numFields; col++) { @@ -1003,7 +1424,7 @@ int taosDumpTableData(FILE *fp, char *tbname, SDumpArguments *arguments) { pstr += sprintf(pstr, "%d", *((int *)row[col])); break; case TSDB_DATA_TYPE_BIGINT: - pstr += sprintf(pstr, "%" PRId64, *((int64_t *)row[col])); + pstr += sprintf(pstr, "%" PRId64 "", *((int64_t *)row[col])); break; case TSDB_DATA_TYPE_FLOAT: pstr += sprintf(pstr, "%f", GET_FLOAT_VAL(row[col])); @@ -1022,34 +1443,52 @@ int taosDumpTableData(FILE *fp, char *tbname, SDumpArguments *arguments) { pstr += sprintf(pstr, "\'%s\'", tbuf); break; case TSDB_DATA_TYPE_TIMESTAMP: - pstr += sprintf(pstr, "%" PRId64, *(int64_t *)row[col]); + if (!arguments->mysqlFlag) { + pstr += sprintf(pstr, "%" PRId64 "", *(int64_t *)row[col]); + } else { + char buf[64] = "\0"; + int64_t ts = *((int64_t *)row[col]); + time_t tt = (time_t)(ts / 1000); + struct tm *ptm = localtime(&tt); + strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm); + pstr += sprintf(pstr, "\'%s.%03d\'", buf, (int)(ts % 1000)); + } break; default: break; } } - sprintf(pstr, ")"); + pstr += sprintf(pstr, ") "); + + totalRows++; count++; - fprintf(fp, "%s", buffer); + fprintf(fp, "%s", tmpBuffer); if (count >= arguments->data_batch) { - fprintf(fp, "\n"); + fprintf(fp, ";\n"); count = 0; - } else { - fprintf(fp, "\\\n"); - } + } //else { + //fprintf(fp, "\\\n"); + //} } + atomic_add_fetch_64(&totalDumpOutRows, totalRows); + fprintf(fp, "\n"); - if (tbuf) free(tbuf); - taos_free_result(result); - result = NULL; + if (tbuf) { + free(tbuf); + } + + taos_free_result(tmpResult); + tmpResult = NULL; + free(tmpCommand); + free(tmpBuffer); return 0; } -int taosCheckParam(SDumpArguments *arguments) { +int taosCheckParam(struct arguments *arguments) { if (arguments->all_databases && arguments->databases) { fprintf(stderr, "conflict option --all-databases and --databases\n"); return -1; @@ -1059,19 +1498,25 @@ int taosCheckParam(SDumpArguments *arguments) { fprintf(stderr, "start time is larger than end time\n"); return -1; } + if (arguments->arg_list_len == 0) { if ((!arguments->all_databases) && (!arguments->isDumpIn)) { fprintf(stderr, "taosdump requires parameters\n"); return -1; } } - - if (arguments->isDumpIn && (strcmp(arguments->output, DEFAULT_DUMP_FILE) != 0)) { - fprintf(stderr, "duplicate parameter input and output file\n"); +/* + if (arguments->isDumpIn && (strcmp(arguments->outpath, DEFAULT_DUMP_FILE) != 0)) { + fprintf(stderr, "duplicate parameter input and output file path\n"); + return -1; + } +*/ + if (!arguments->isDumpIn && arguments->encode != NULL) { + fprintf(stderr, "invalid option in dump out\n"); return -1; } - if (!arguments->isDumpIn && arguments->encode != NULL) { + if (arguments->table_batch <= 0) { fprintf(stderr, "invalid option in dump out\n"); return -1; } @@ -1134,177 +1579,6 @@ void taosReplaceCtrlChar(char *str) { *pstr = '\0'; } -int taosDumpIn(SDumpArguments *arguments) { - assert(arguments->isDumpIn); - - int tsize = 0; - FILE * fp = NULL; - char * line = NULL; - _Bool isRun = true; - size_t line_size = 0; - char * pstr = NULL, *lstr = NULL; - iconv_t cd = (iconv_t)-1; - size_t inbytesleft = 0; - size_t outbytesleft = COMMAND_SIZE; - char fcharset[64]; - char * tcommand = NULL; - - fp = fopen(arguments->input, "r"); - if (fp == NULL) { - fprintf(stderr, "failed to open input file %s\n", arguments->input); - return -1; - } - - taosLoadFileCharset(fp, fcharset); - - taos = taos_connect(arguments->host, arguments->user, arguments->password, NULL, arguments->port); - if (taos == NULL) { - fprintf(stderr, "failed to connect to TDengine server\n"); - goto _dumpin_exit_failure; - } - - command = (char *)malloc(COMMAND_SIZE); - lcommand = (char *)malloc(COMMAND_SIZE); - if (command == NULL || lcommand == NULL) { - fprintf(stderr, "failed to connect to allocate memory\n"); - goto _dumpin_exit_failure; - } - - // Resolve locale - if (*fcharset != '\0') { - arguments->encode = fcharset; - } - - if (arguments->encode != NULL && strcasecmp(tsCharset, arguments->encode) != 0) { - cd = iconv_open(tsCharset, arguments->encode); - if (cd == (iconv_t)-1) { - fprintf(stderr, "Failed to open iconv handle\n"); - goto _dumpin_exit_failure; - } - } - - pstr = command; - int64_t linenu = 0; - while (1) { - ssize_t size = getline(&line, &line_size, fp); - linenu++; - if (size <= 0) break; - if (size == 1) { - if (pstr != command) { - inbytesleft = pstr - command; - memset(lcommand, 0, COMMAND_SIZE); - pstr = command; - lstr = lcommand; - outbytesleft = COMMAND_SIZE; - if (cd != (iconv_t)-1) { - iconv(cd, &pstr, &inbytesleft, &lstr, &outbytesleft); - tcommand = lcommand; - } else { - tcommand = command; - } - taosReplaceCtrlChar(tcommand); - - TAOS_RES* result = taos_query(taos, tcommand); - if (taos_errno(result) != 0){ - fprintf(stderr, "linenu: %" PRId64 " failed to run command %s reason:%s \ncontinue...\n", linenu, command, - taos_errstr(result)); - taos_free_result(result); - } - - pstr = command; - pstr[0] = '\0'; - tsize = 0; - isRun = true; - } - - continue; - } - - /* if (line[0] == '-' && line[1] == '-') continue; */ - - line[size - 1] = 0; - - if (tsize + size - 1 > COMMAND_SIZE) { - fprintf(stderr, "command is too long\n"); - goto _dumpin_exit_failure; - } - - if (line[size - 2] == '\\') { - line[size - 2] = ' '; - isRun = false; - } else { - isRun = true; - } - - memcpy(pstr, line, size - 1); - pstr += (size - 1); - *pstr = '\0'; - - if (!isRun) continue; - - if (command != pstr && !isEmptyCommand(command)) { - inbytesleft = pstr - command; - memset(lcommand, 0, COMMAND_SIZE); - pstr = command; - lstr = lcommand; - outbytesleft = COMMAND_SIZE; - if (cd != (iconv_t)-1) { - iconv(cd, &pstr, &inbytesleft, &lstr, &outbytesleft); - tcommand = lcommand; - } else { - tcommand = command; - } - taosReplaceCtrlChar(tcommand); - TAOS_RES* result = taos_query(taos, tcommand); - int32_t code = taos_errno(result); - if (code != 0) - { - fprintf(stderr, "linenu:%" PRId64 " failed to run command %s reason: %s \ncontinue...\n", linenu, command, - taos_errstr(result)); - } - taos_free_result(result); - } - - pstr = command; - *pstr = '\0'; - tsize = 0; - } - - if (pstr != command) { - inbytesleft = pstr - command; - memset(lcommand, 0, COMMAND_SIZE); - pstr = command; - lstr = lcommand; - outbytesleft = COMMAND_SIZE; - if (cd != (iconv_t)-1) { - iconv(cd, &pstr, &inbytesleft, &lstr, &outbytesleft); - tcommand = lcommand; - } else { - tcommand = command; - } - taosReplaceCtrlChar(lcommand); - if (taos_query(taos, tcommand) == NULL) - fprintf(stderr, "linenu:%" PRId64 " failed to run command %s reason:%s \ncontinue...\n", linenu, command, - taos_errstr(taos)); - } - - if (cd != (iconv_t)-1) iconv_close(cd); - tfree(line); - tfree(command); - tfree(lcommand); - taos_close(taos); - fclose(fp); - return 0; - -_dumpin_exit_failure: - if (cd != (iconv_t)-1) iconv_close(cd); - tfree(command); - tfree(lcommand); - taos_close(taos); - fclose(fp); - return -1; -} - char *ascii_literal_list[] = { "\\x00", "\\x01", "\\x02", "\\x03", "\\x04", "\\x05", "\\x06", "\\x07", "\\x08", "\\t", "\\n", "\\x0b", "\\x0c", "\\r", "\\x0e", "\\x0f", "\\x10", "\\x11", "\\x12", "\\x13", "\\x14", "\\x15", "\\x16", "\\x17", "\\x18", "\\x19", @@ -1351,7 +1625,7 @@ int convertNCharToReadable(char *str, int size, char *buf, int bufsize) { if ((int)wc < 256) { pbuf = stpcpy(pbuf, ascii_literal_list[(int)wc]); - } else if (byte_width > 0) { + } else { memcpy(pbuf, pstr, byte_width); pbuf += byte_width; } @@ -1399,3 +1673,453 @@ _exit_no_charset: tfree(line); return; } + +// ======== dumpIn support multi threads functions ================================// + +static char **tsDumpInSqlFiles = NULL; +static int32_t tsSqlFileNum = 0; +static char tsDbSqlFile[TSDB_FILENAME_LEN] = {0}; +static char tsfCharset[64] = {0}; +static int taosGetFilesNum(const char *directoryName, const char *prefix) +{ + char cmd[1024] = { 0 }; + sprintf(cmd, "ls %s/*.%s | wc -l ", directoryName, prefix); + + FILE *fp = popen(cmd, "r"); + if (fp == NULL) { + fprintf(stderr, "ERROR: failed to execute:%s, error:%s\n", cmd, strerror(errno)); + exit(0); + } + + int fileNum = 0; + if (fscanf(fp, "%d", &fileNum) != 1) { + fprintf(stderr, "ERROR: failed to execute:%s, parse result error\n", cmd); + exit(0); + } + + if (fileNum <= 0) { + fprintf(stderr, "ERROR: directory:%s is empry\n", directoryName); + exit(0); + } + + pclose(fp); + return fileNum; +} + +static void taosParseDirectory(const char *directoryName, const char *prefix, char **fileArray, int totalFiles) +{ + char cmd[1024] = { 0 }; + sprintf(cmd, "ls %s/*.%s | sort", directoryName, prefix); + + FILE *fp = popen(cmd, "r"); + if (fp == NULL) { + fprintf(stderr, "ERROR: failed to execute:%s, error:%s\n", cmd, strerror(errno)); + exit(0); + } + + int fileNum = 0; + while (fscanf(fp, "%s", fileArray[fileNum++])) { + if (strcmp(fileArray[fileNum-1], tsDbSqlFile) == 0) { + fileNum--; + } + if (fileNum >= totalFiles) { + break; + } + } + + if (fileNum != totalFiles) { + fprintf(stderr, "ERROR: directory:%s changed while read\n", directoryName); + exit(0); + } + + pclose(fp); +} + +static void taosCheckTablesSQLFile(const char *directoryName) +{ + char cmd[1024] = { 0 }; + sprintf(cmd, "ls %s/dbs.sql", directoryName); + + FILE *fp = popen(cmd, "r"); + if (fp == NULL) { + fprintf(stderr, "ERROR: failed to execute:%s, error:%s\n", cmd, strerror(errno)); + exit(0); + } + + while (fscanf(fp, "%s", tsDbSqlFile)) { + break; + } + + pclose(fp); +} + +static void taosMallocSQLFiles() +{ + tsDumpInSqlFiles = (char**)calloc(tsSqlFileNum, sizeof(char*)); + for (int i = 0; i < tsSqlFileNum; i++) { + tsDumpInSqlFiles[i] = calloc(1, TSDB_FILENAME_LEN); + } +} + +static void taosFreeSQLFiles() +{ + for (int i = 0; i < tsSqlFileNum; i++) { + tfree(tsDumpInSqlFiles[i]); + } + tfree(tsDumpInSqlFiles); +} + +static void taosGetDirectoryFileList(char *inputDir) +{ + struct stat fileStat; + if (stat(inputDir, &fileStat) < 0) { + fprintf(stderr, "ERROR: %s not exist\n", inputDir); + exit(0); + } + + if (fileStat.st_mode & S_IFDIR) { + taosCheckTablesSQLFile(inputDir); + tsSqlFileNum = taosGetFilesNum(inputDir, "sql"); + int totalSQLFileNum = tsSqlFileNum; + if (tsDbSqlFile[0] != 0) { + tsSqlFileNum--; + } + taosMallocSQLFiles(); + taosParseDirectory(inputDir, "sql", tsDumpInSqlFiles, tsSqlFileNum); + fprintf(stdout, "\nstart to dispose %d files in %s\n", totalSQLFileNum, inputDir); + } + else { + fprintf(stderr, "ERROR: %s is not a directory\n", inputDir); + exit(0); + } +} + +static FILE* taosOpenDumpInFile(char *fptr) { + wordexp_t full_path; + + if (wordexp(fptr, &full_path, 0) != 0) { + fprintf(stderr, "ERROR: illegal file name: %s\n", fptr); + return NULL; + } + + char *fname = full_path.we_wordv[0]; + + if (access(fname, F_OK) != 0) { + fprintf(stderr, "ERROR: file %s is not exist\n", fptr); + + wordfree(&full_path); + return NULL; + } + + if (access(fname, R_OK) != 0) { + fprintf(stderr, "ERROR: file %s is not readable\n", fptr); + + wordfree(&full_path); + return NULL; + } + + FILE *f = fopen(fname, "r"); + if (f == NULL) { + fprintf(stderr, "ERROR: failed to open file %s\n", fname); + wordfree(&full_path); + return NULL; + } + + wordfree(&full_path); + + return f; +} + +int taosDumpInOneFile_old(TAOS * taos, FILE* fp, char* fcharset, char* encode) { + char *command = NULL; + char *lcommand = NULL; + int tsize = 0; + char *line = NULL; + _Bool isRun = true; + size_t line_size = 0; + char *pstr = NULL; + char *lstr = NULL; + size_t inbytesleft = 0; + size_t outbytesleft = COMMAND_SIZE; + char *tcommand = NULL; + char *charsetOfFile = NULL; + iconv_t cd = (iconv_t)(-1); + + command = (char *)malloc(COMMAND_SIZE); + lcommand = (char *)malloc(COMMAND_SIZE); + if (command == NULL || lcommand == NULL) { + fprintf(stderr, "failed to connect to allocate memory\n"); + goto _dumpin_exit_failure; + } + + // Resolve locale + if (*fcharset != '\0') { + charsetOfFile = fcharset; + } else { + charsetOfFile = encode; + } + + if (charsetOfFile != NULL && strcasecmp(tsCharset, charsetOfFile) != 0) { + cd = iconv_open(tsCharset, charsetOfFile); + if (cd == ((iconv_t)(-1))) { + fprintf(stderr, "Failed to open iconv handle\n"); + goto _dumpin_exit_failure; + } + } + + pstr = command; + int64_t linenu = 0; + while (1) { + ssize_t size = getline(&line, &line_size, fp); + linenu++; + if (size <= 0) break; + if (size == 1) { + if (pstr != command) { + inbytesleft = pstr - command; + memset(lcommand, 0, COMMAND_SIZE); + pstr = command; + lstr = lcommand; + outbytesleft = COMMAND_SIZE; + if (cd != ((iconv_t)(-1))) { + iconv(cd, &pstr, &inbytesleft, &lstr, &outbytesleft); + tcommand = lcommand; + } else { + tcommand = command; + } + + taosReplaceCtrlChar(tcommand); + + if (queryDB(taos, tcommand) != 0) { + fprintf(stderr, "error sql: linenu: %" PRId64 " failed\n", linenu); + exit(0); + } + + pstr = command; + pstr[0] = '\0'; + tsize = 0; + isRun = true; + } + + continue; + } + + /* if (line[0] == '-' && line[1] == '-') continue; */ + + line[size - 1] = 0; + + if (tsize + size - 1 > COMMAND_SIZE) { + fprintf(stderr, "command is too long\n"); + goto _dumpin_exit_failure; + } + + if (line[size - 2] == '\\') { + line[size - 2] = ' '; + isRun = false; + } else { + isRun = true; + } + + memcpy(pstr, line, size - 1); + pstr += (size - 1); + *pstr = '\0'; + + if (!isRun) continue; + + if (command != pstr && !isEmptyCommand(command)) { + inbytesleft = pstr - command; + memset(lcommand, 0, COMMAND_SIZE); + pstr = command; + lstr = lcommand; + outbytesleft = COMMAND_SIZE; + if (cd != ((iconv_t)(-1))) { + iconv(cd, &pstr, &inbytesleft, &lstr, &outbytesleft); + tcommand = lcommand; + } else { + tcommand = command; + } + taosReplaceCtrlChar(tcommand); + if (queryDB(taos, tcommand) != 0) { + fprintf(stderr, "error sql: linenu:%" PRId64 " failed\n", linenu); + exit(0); + } + } + + pstr = command; + *pstr = '\0'; + tsize = 0; + } + + if (pstr != command) { + inbytesleft = pstr - command; + memset(lcommand, 0, COMMAND_SIZE); + pstr = command; + lstr = lcommand; + outbytesleft = COMMAND_SIZE; + if (cd != ((iconv_t)(-1))) { + iconv(cd, &pstr, &inbytesleft, &lstr, &outbytesleft); + tcommand = lcommand; + } else { + tcommand = command; + } + taosReplaceCtrlChar(lcommand); + if (queryDB(taos, tcommand) != 0) + fprintf(stderr, "error sql: linenu:%" PRId64 " failed \n", linenu); + } + + if (cd != ((iconv_t)(-1))) iconv_close(cd); + tfree(line); + tfree(command); + tfree(lcommand); + taos_close(taos); + fclose(fp); + return 0; + +_dumpin_exit_failure: + if (cd != ((iconv_t)(-1))) iconv_close(cd); + tfree(command); + tfree(lcommand); + taos_close(taos); + fclose(fp); + return -1; +} + +int taosDumpInOneFile(TAOS * taos, FILE* fp, char* fcharset, char* encode, char* fileName) { + int read_len = 0; + char * cmd = NULL; + size_t cmd_len = 0; + char * line = NULL; + size_t line_len = 0; + + cmd = (char *)malloc(COMMAND_SIZE); + if (cmd == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + return -1; + } + + int lineNo = 0; + while ((read_len = getline(&line, &line_len, fp)) != -1) { + ++lineNo; + if (read_len >= COMMAND_SIZE) continue; + line[--read_len] = '\0'; + + //if (read_len == 0 || isCommentLine(line)) { // line starts with # + if (read_len == 0 ) { + continue; + } + + if (line[read_len - 1] == '\\') { + line[read_len - 1] = ' '; + memcpy(cmd + cmd_len, line, read_len); + cmd_len += read_len; + continue; + } + + memcpy(cmd + cmd_len, line, read_len); + if (queryDB(taos, cmd)) { + fprintf(stderr, "error sql: linenu:%d, file:%s\n", lineNo, fileName); + } + + memset(cmd, 0, COMMAND_SIZE); + cmd_len = 0; + } + + tfree(cmd); + tfree(line); + fclose(fp); + return 0; +} + +void* taosDumpInWorkThreadFp(void *arg) +{ + SThreadParaObj *pThread = (SThreadParaObj*)arg; + for (int32_t f = 0; f < tsSqlFileNum; ++f) { + if (f % pThread->totalThreads == pThread->threadIndex) { + char *SQLFileName = tsDumpInSqlFiles[f]; + FILE* fp = taosOpenDumpInFile(SQLFileName); + if (NULL == fp) { + continue; + } + fprintf(stderr, "Success Open input file: %s\n", SQLFileName); + taosDumpInOneFile(pThread->taosCon, fp, tsfCharset, tsArguments.encode, SQLFileName); + } + } + + return NULL; +} + +static void taosStartDumpInWorkThreads(struct arguments *args) +{ + pthread_attr_t thattr; + SThreadParaObj *pThread; + int32_t totalThreads = args->thread_num; + + if (totalThreads > tsSqlFileNum) { + totalThreads = tsSqlFileNum; + } + + SThreadParaObj *threadObj = (SThreadParaObj *)calloc(totalThreads, sizeof(SThreadParaObj)); + for (int32_t t = 0; t < totalThreads; ++t) { + pThread = threadObj + t; + pThread->threadIndex = t; + pThread->totalThreads = totalThreads; + pThread->taosCon = taos_connect(args->host, args->user, args->password, NULL, args->port); + if (pThread->taosCon == NULL) { + fprintf(stderr, "ERROR: thread:%d failed connect to TDengine, error:%s\n", pThread->threadIndex, taos_errstr(pThread->taosCon)); + exit(0); + } + + pthread_attr_init(&thattr); + pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); + + if (pthread_create(&(pThread->threadID), &thattr, taosDumpInWorkThreadFp, (void*)pThread) != 0) { + fprintf(stderr, "ERROR: thread:%d failed to start\n", pThread->threadIndex); + exit(0); + } + } + + for (int t = 0; t < totalThreads; ++t) { + pthread_join(threadObj[t].threadID, NULL); + } + + for (int t = 0; t < totalThreads; ++t) { + taos_close(threadObj[t].taosCon); + } + free(threadObj); +} + + +int taosDumpIn(struct arguments *arguments) { + assert(arguments->isDumpIn); + + TAOS *taos = NULL; + FILE *fp = NULL; + + taos = taos_connect(arguments->host, arguments->user, arguments->password, NULL, arguments->port); + if (taos == NULL) { + fprintf(stderr, "failed to connect to TDengine server\n"); + return -1; + } + + taosGetDirectoryFileList(arguments->inpath); + + if (tsDbSqlFile[0] != 0) { + fp = taosOpenDumpInFile(tsDbSqlFile); + if (NULL == fp) { + fprintf(stderr, "failed to open input file %s\n", tsDbSqlFile); + return -1; + } + fprintf(stderr, "Success Open input file: %s\n", tsDbSqlFile); + + taosLoadFileCharset(fp, tsfCharset); + + taosDumpInOneFile(taos, fp, tsfCharset, arguments->encode, tsDbSqlFile); + } + + taosStartDumpInWorkThreads(arguments); + + taos_close(taos); + taosFreeSQLFiles(); + return 0; +} + + diff --git a/src/kit/taosmigrate/taosmigrate.c b/src/kit/taosmigrate/taosmigrate.c index b7bf6fc1ba..b80ca44a10 100644 --- a/src/kit/taosmigrate/taosmigrate.c +++ b/src/kit/taosmigrate/taosmigrate.c @@ -40,7 +40,7 @@ struct arguments { static error_t parse_opt(int key, char *arg, struct argp_state *state) { struct arguments *arguments = state->input; switch (key) { - case 'w': + case 'r': arguments->dataDir = arg; break; case 'd': @@ -51,6 +51,7 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { break; case 'f': arguments->fqdn = arg; + break; case 'g': arguments->dnodeGroups = arg; break; diff --git a/src/kit/taosmigrate/taosmigrateMnodeWal.c b/src/kit/taosmigrate/taosmigrateMnodeWal.c index 6315ff99f7..28e2b7772b 100644 --- a/src/kit/taosmigrate/taosmigrateMnodeWal.c +++ b/src/kit/taosmigrate/taosmigrateMnodeWal.c @@ -96,6 +96,7 @@ void walModWalFile(char* walfile) { if (wfd < 0) { printf("wal:%s, failed to open(%s)\n", newWalFile, strerror(errno)); free(buffer); + close(rfd); return ; } @@ -116,6 +117,11 @@ void walModWalFile(char* walfile) { break; } + if (pHead->len >= 1024000 - sizeof(SWalHead)) { + printf("wal:%s, SWalHead.len(%d) overflow, skip the rest of file\n", walfile, pHead->len); + break; + } + ret = read(rfd, pHead->cont, pHead->len); if ( ret != pHead->len) { printf("wal:%s, failed to read body, skip, len:%d ret:%d\n", walfile, pHead->len, ret); diff --git a/src/kit/taosmigrate/taosmigrateVnodeCfg.c b/src/kit/taosmigrate/taosmigrateVnodeCfg.c index 1cb2fee353..b925fb10aa 100644 --- a/src/kit/taosmigrate/taosmigrateVnodeCfg.c +++ b/src/kit/taosmigrate/taosmigrateVnodeCfg.c @@ -99,6 +99,8 @@ static int32_t readVnodeCfg(SVnodeObj *pVnode, char* cfgFile) goto PARSE_OVER; } + content[maxLen] = (char)0; + root = cJSON_Parse(content); if (root == NULL) { printf("failed to json parse %s, invalid json format\n", cfgFile); diff --git a/src/mnode/inc/mnodeMnode.h b/src/mnode/inc/mnodeMnode.h index c75deac594..1060907234 100644 --- a/src/mnode/inc/mnodeMnode.h +++ b/src/mnode/inc/mnodeMnode.h @@ -44,6 +44,7 @@ void mnodeDecMnodeRef(struct SMnodeObj *pMnode); char * mnodeGetMnodeRoleStr(); void mnodeGetMnodeIpSetForPeer(SRpcIpSet *ipSet); void mnodeGetMnodeIpSetForShell(SRpcIpSet *ipSet); +char* mnodeGetMnodeMasterEp(); void mnodeGetMnodeInfos(void *mnodes); void mnodeUpdateMnodeIpSet(); diff --git a/src/mnode/inc/mnodeSdb.h b/src/mnode/inc/mnodeSdb.h index ca2fffe24c..eec6d45e23 100644 --- a/src/mnode/inc/mnodeSdb.h +++ b/src/mnode/inc/mnodeSdb.h @@ -53,6 +53,7 @@ typedef struct { void * rowData; int32_t rowSize; int32_t retCode; // for callback in sdb queue + int32_t processedCount; // for sync fwd callback int32_t (*cb)(struct SMnodeMsg *pMsg, int32_t code); struct SMnodeMsg *pMsg; } SSdbOper; diff --git a/src/mnode/src/mnodeAcct.c b/src/mnode/src/mnodeAcct.c index d0c0f01d63..2d6e1ae007 100644 --- a/src/mnode/src/mnodeAcct.c +++ b/src/mnode/src/mnodeAcct.c @@ -128,6 +128,7 @@ int32_t mnodeInitAccts() { void mnodeCleanupAccts() { acctCleanUp(); sdbCloseTable(tsAcctSdb); + tsAcctSdb = NULL; } void *mnodeGetAcct(char *name) { diff --git a/src/mnode/src/mnodeDb.c b/src/mnode/src/mnodeDb.c index 69821e3483..8c74c9413d 100644 --- a/src/mnode/src/mnodeDb.c +++ b/src/mnode/src/mnodeDb.c @@ -179,9 +179,14 @@ void mnodeDecDbRef(SDbObj *pDb) { SDbObj *mnodeGetDbByTableId(char *tableId) { char db[TSDB_TABLE_ID_LEN], *pos; - + + // tableId format should be : acct.db.table pos = strstr(tableId, TS_PATH_DELIMITER); + assert(NULL != pos); + pos = strstr(pos + 1, TS_PATH_DELIMITER); + assert(NULL != pos); + memset(db, 0, sizeof(db)); strncpy(db, tableId, pos - tableId); @@ -459,6 +464,7 @@ void mnodeMoveVgroupToHead(SVgObj *pVgroup) { void mnodeCleanupDbs() { sdbCloseTable(tsDbSdb); + tsDbSdb = NULL; } static int32_t mnodeGetDbMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) { @@ -964,6 +970,11 @@ static int32_t mnodeProcessAlterDbMsg(SMnodeMsg *pMsg) { mError("db:%s, failed to alter, invalid db", pAlter->db); return TSDB_CODE_MND_INVALID_DB; } + + if (pMsg->pDb->status != TSDB_DB_STATUS_READY) { + mError("db:%s, status:%d, in dropping", pAlter->db, pMsg->pDb->status); + return TSDB_CODE_MND_DB_IN_DROPPING; + } return mnodeAlterDb(pMsg->pDb, pAlter, pMsg); } diff --git a/src/mnode/src/mnodeDnode.c b/src/mnode/src/mnodeDnode.c index c1b8256a06..29272fbd4f 100644 --- a/src/mnode/src/mnodeDnode.c +++ b/src/mnode/src/mnodeDnode.c @@ -88,13 +88,13 @@ static int32_t mnodeDnodeActionDelete(SSdbOper *pOper) { } static int32_t mnodeDnodeActionUpdate(SSdbOper *pOper) { - SDnodeObj *pDnode = pOper->pObj; - SDnodeObj *pSaved = mnodeGetDnode(pDnode->dnodeId); - if (pSaved != NULL && pDnode != pSaved) { - memcpy(pSaved, pDnode, pOper->rowSize); - free(pDnode); - mnodeDecDnodeRef(pSaved); + SDnodeObj *pNew = pOper->pObj; + SDnodeObj *pDnode = mnodeGetDnode(pNew->dnodeId); + if (pDnode != NULL && pNew != pDnode) { + memcpy(pDnode, pNew, pOper->rowSize); + free(pNew); } + mnodeDecDnodeRef(pDnode); return TSDB_CODE_SUCCESS; } @@ -176,6 +176,7 @@ int32_t mnodeInitDnodes() { void mnodeCleanupDnodes() { sdbCloseTable(tsDnodeSdb); + tsDnodeSdb = NULL; } void *mnodeGetNextDnode(void *pIter, SDnodeObj **pDnode) { @@ -334,7 +335,7 @@ static int32_t mnodeProcessDnodeStatusMsg(SMnodeMsg *pMsg) { if (pStatus->dnodeId == 0) { mDebug("dnode:%d %s, first access", pDnode->dnodeId, pDnode->dnodeEp); } else { - //mDebug("dnode:%d, status received, access times %d", pDnode->dnodeId, pDnode->lastAccess); + mTrace("dnode:%d, status received, access times %d", pDnode->dnodeId, pDnode->lastAccess); } int32_t openVnodes = htons(pStatus->openVnodes); @@ -468,7 +469,7 @@ static int32_t mnodeDropDnodeByEp(char *ep, SMnodeMsg *pMsg) { } mnodeDecDnodeRef(pDnode); - if (strcmp(pDnode->dnodeEp, dnodeGetMnodeMasterEp()) == 0) { + if (strcmp(pDnode->dnodeEp, mnodeGetMnodeMasterEp()) == 0) { mError("dnode:%d, can't drop dnode:%s which is master", pDnode->dnodeId, ep); return TSDB_CODE_MND_NO_REMOVE_MASTER; } diff --git a/src/mnode/src/mnodeMain.c b/src/mnode/src/mnodeMain.c index ba0089c61e..042e356442 100644 --- a/src/mnode/src/mnodeMain.c +++ b/src/mnode/src/mnodeMain.c @@ -41,7 +41,7 @@ typedef struct { void (*cleanup)(); } SMnodeComponent; -void *tsMnodeTmr; +void *tsMnodeTmr = NULL; static bool tsMgmtIsRunning = false; static const SMnodeComponent tsMnodeComponents[] = { @@ -121,9 +121,9 @@ void mnodeCleanupSystem() { dnodeFreeMnodeWqueue(); dnodeFreeMnodeRqueue(); dnodeFreeMnodePqueue(); - mnodeCleanupComponents(sizeof(tsMnodeComponents) / sizeof(tsMnodeComponents[0]) - 1); mnodeCleanupTimer(); - + mnodeCleanupComponents(sizeof(tsMnodeComponents) / sizeof(tsMnodeComponents[0]) - 1); + mInfo("mnode is cleaned up"); } diff --git a/src/mnode/src/mnodeMnode.c b/src/mnode/src/mnodeMnode.c index 4d785dd062..f74de2b325 100644 --- a/src/mnode/src/mnodeMnode.c +++ b/src/mnode/src/mnodeMnode.c @@ -165,6 +165,7 @@ int32_t mnodeInitMnodes() { void mnodeCleanupMnodes() { sdbCloseTable(tsMnodeSdb); + tsMnodeSdb = NULL; mnodeMnodeDestroyLock(); } @@ -267,6 +268,10 @@ void mnodeGetMnodeIpSetForShell(SRpcIpSet *ipSet) { mnodeMnodeUnLock(); } +char* mnodeGetMnodeMasterEp() { + return tsMnodeInfos.nodeInfos[tsMnodeInfos.inUse].nodeEp; +} + void mnodeGetMnodeInfos(void *mnodeInfos) { mnodeMnodeRdLock(); *(SDMMnodeInfos *)mnodeInfos = tsMnodeInfos; diff --git a/src/mnode/src/mnodeSdb.c b/src/mnode/src/mnodeSdb.c index 761dce6720..cdcb7357f1 100644 --- a/src/mnode/src/mnodeSdb.c +++ b/src/mnode/src/mnodeSdb.c @@ -72,8 +72,6 @@ typedef struct { void * sync; void * wal; SSyncCfg cfg; - sem_t sem; - int32_t code; int32_t numOfTables; SSdbTable *tableList[SDB_TABLE_MAX]; pthread_mutex_t mutex; @@ -201,7 +199,7 @@ static void sdbRestoreTables() { sdbDebug("table:%s, is restored, numOfRows:%" PRId64, pTable->tableName, pTable->numOfRows); } - sdbInfo("sdb is restored, version:%" PRId64 " totalRows:%d numOfTables:%d", tsSdbObj.version, totalRows, numOfTables); + sdbInfo("sdb is restored, ver:%" PRId64 " totalRows:%d numOfTables:%d", tsSdbObj.version, totalRows, numOfTables); } void sdbUpdateMnodeRoles() { @@ -244,27 +242,36 @@ static void sdbNotifyRole(void *ahandle, int8_t role) { sdbUpdateMnodeRoles(); } +FORCE_INLINE static void sdbConfirmForward(void *ahandle, void *param, int32_t code) { - tsSdbObj.code = code; - sem_post(&tsSdbObj.sem); - sdbDebug("forward request confirmed, version:%" PRIu64 ", result:%s", (int64_t)param, tstrerror(code)); -} + assert(param); + SSdbOper * pOper = param; + SMnodeMsg *pMsg = pOper->pMsg; + if (code <= 0) pOper->retCode = code; - static int32_t sdbForwardToPeer(SWalHead *pHead) { - if (tsSdbObj.sync == NULL) return TSDB_CODE_SUCCESS; + int32_t processedCount = atomic_add_fetch_32(&pOper->processedCount, 1); + if (processedCount <= 1) { + if (pMsg != NULL) { + sdbDebug("app:%p:%p, waiting for confirm this operation, count:%d", pMsg->rpcMsg.ahandle, pMsg, processedCount); + } + return; + } - int32_t code = syncForwardToPeer(tsSdbObj.sync, pHead, (void*)pHead->version, TAOS_QTYPE_RPC); - if (code > 0) { - sdbDebug("forward request is sent, version:%" PRIu64 ", code:%d", pHead->version, code); - sem_wait(&tsSdbObj.sem); - return tsSdbObj.code; - } - return code; + if (pMsg != NULL) { + sdbDebug("app:%p:%p, is confirmed and will do callback func", pMsg->rpcMsg.ahandle, pMsg); + } + + if (pOper->cb != NULL) { + pOper->retCode = (*pOper->cb)(pMsg, pOper->retCode); + } + + dnodeSendRpcMnodeWriteRsp(pMsg, pOper->retCode); + taosFreeQitem(pOper); } void sdbUpdateSync() { SSyncCfg syncCfg = {0}; - int32_t index = 0; + int32_t index = 0; SDMMnodeInfos *mnodes = dnodeGetMnodeInfos(); for (int32_t i = 0; i < mnodes->nodeNum; ++i) { @@ -298,7 +305,7 @@ void sdbUpdateSync() { } syncCfg.replica = index; - syncCfg.quorum = (syncCfg.replica == 1) ? 1:2; + syncCfg.quorum = (syncCfg.replica == 1) ? 1 : 2; bool hasThisDnode = false; for (int32_t i = 0; i < syncCfg.replica; ++i) { @@ -325,10 +332,10 @@ void sdbUpdateSync() { syncInfo.getWalInfo = sdbGetWalInfo; syncInfo.getFileInfo = sdbGetFileInfo; syncInfo.writeToCache = sdbWriteToQueue; - syncInfo.confirmForward = sdbConfirmForward; + syncInfo.confirmForward = sdbConfirmForward; syncInfo.notifyRole = sdbNotifyRole; tsSdbObj.cfg = syncCfg; - + if (tsSdbObj.sync) { syncReconfig(tsSdbObj.sync, &syncCfg); } else { @@ -339,7 +346,6 @@ void sdbUpdateSync() { int32_t sdbInit() { pthread_mutex_init(&tsSdbObj.mutex, NULL); - sem_init(&tsSdbObj.sem, 0, 0); if (sdbInitWriteWorker() != 0) { return -1; @@ -367,7 +373,7 @@ void sdbCleanUp() { tsSdbObj.status = SDB_STATUS_CLOSING; sdbCleanupWriteWorker(); - sdbDebug("sdb will be closed, version:%" PRId64, tsSdbObj.version); + sdbDebug("sdb will be closed, ver:%" PRId64, tsSdbObj.version); if (tsSdbObj.sync) { syncStop(tsSdbObj.sync); @@ -379,7 +385,6 @@ void sdbCleanUp() { tsSdbObj.wal = NULL; } - sem_destroy(&tsSdbObj.sem); pthread_mutex_destroy(&tsSdbObj.mutex); } @@ -466,8 +471,8 @@ static int32_t sdbInsertHash(SSdbTable *pTable, SSdbOper *pOper) { atomic_add_fetch_32(&pTable->autoIndex, 1); } - sdbDebug("table:%s, insert record:%s to hash, rowSize:%d numOfRows:%" PRId64 " version:%" PRIu64, pTable->tableName, - sdbGetKeyStrFromObj(pTable, pOper->pObj), pOper->rowSize, pTable->numOfRows, sdbGetVersion()); + sdbDebug("table:%s, insert record:%s to hash, rowSize:%d numOfRows:%" PRId64 ", msg:%p", pTable->tableName, + sdbGetKeyStrFromObj(pTable, pOper->pObj), pOper->rowSize, pTable->numOfRows, pOper->pMsg); (*pTable->insertFp)(pOper); return TSDB_CODE_SUCCESS; @@ -485,8 +490,8 @@ static int32_t sdbDeleteHash(SSdbTable *pTable, SSdbOper *pOper) { taosHashRemove(pTable->iHandle, key, keySize); atomic_sub_fetch_32(&pTable->numOfRows, 1); - sdbDebug("table:%s, delete record:%s from hash, numOfRows:%" PRId64 " version:%" PRIu64, pTable->tableName, - sdbGetKeyStrFromObj(pTable, pOper->pObj), pTable->numOfRows, sdbGetVersion()); + sdbDebug("table:%s, delete record:%s from hash, numOfRows:%" PRId64 ", msg:%p", pTable->tableName, + sdbGetKeyStrFromObj(pTable, pOper->pObj), pTable->numOfRows, pOper->pMsg); int8_t *updateEnd = pOper->pObj + pTable->refCountPos - 1; *updateEnd = 1; @@ -496,8 +501,8 @@ static int32_t sdbDeleteHash(SSdbTable *pTable, SSdbOper *pOper) { } static int32_t sdbUpdateHash(SSdbTable *pTable, SSdbOper *pOper) { - sdbDebug("table:%s, update record:%s in hash, numOfRows:%" PRId64 " version:%" PRIu64, pTable->tableName, - sdbGetKeyStrFromObj(pTable, pOper->pObj), pTable->numOfRows, sdbGetVersion()); + sdbDebug("table:%s, update record:%s in hash, numOfRows:%" PRId64 ", msg:%p", pTable->tableName, + sdbGetKeyStrFromObj(pTable, pOper->pObj), pTable->numOfRows, pOper->pMsg); (*pTable->updateFp)(pOper); return TSDB_CODE_SUCCESS; @@ -513,24 +518,22 @@ static int sdbWrite(void *param, void *data, int type) { assert(pTable != NULL); pthread_mutex_lock(&tsSdbObj.mutex); + if (pHead->version == 0) { - // assign version + // assign version tsSdbObj.version++; pHead->version = tsSdbObj.version; } else { // for data from WAL or forward, version may be smaller if (pHead->version <= tsSdbObj.version) { pthread_mutex_unlock(&tsSdbObj.mutex); - if (type == TAOS_QTYPE_FWD && tsSdbObj.sync != NULL) { - sdbDebug("forward request is received, version:%" PRIu64 " confirm it", pHead->version); - syncConfirmForward(tsSdbObj.sync, pHead->version, TSDB_CODE_SUCCESS); - } + sdbDebug("table:%s, failed to restore %s record:%s from source(%d), ver:%" PRId64 " too large, sdb ver:%" PRId64, + pTable->tableName, sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), type, pHead->version, tsSdbObj.version); return TSDB_CODE_SUCCESS; } else if (pHead->version != tsSdbObj.version + 1) { pthread_mutex_unlock(&tsSdbObj.mutex); - sdbError("table:%s, failed to restore %s record:%s from wal, version:%" PRId64 " too large, sdb version:%" PRId64, - pTable->tableName, sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version, - tsSdbObj.version); + sdbError("table:%s, failed to restore %s record:%s from source(%d), ver:%" PRId64 " too large, sdb ver:%" PRId64, + pTable->tableName, sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), type, pHead->version, tsSdbObj.version); return TSDB_CODE_MND_APP_ERROR; } else { tsSdbObj.version = pHead->version; @@ -542,28 +545,36 @@ static int sdbWrite(void *param, void *data, int type) { pthread_mutex_unlock(&tsSdbObj.mutex); return code; } - - code = sdbForwardToPeer(pHead); + pthread_mutex_unlock(&tsSdbObj.mutex); // from app, oper is created if (pOper != NULL) { - sdbTrace("record from app is disposed, table:%s action:%s record:%s version:%" PRIu64 " result:%s", - pTable->tableName, sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version, - tstrerror(code)); - return code; + // forward to peers + pOper->processedCount = 0; + int32_t syncCode = syncForwardToPeer(tsSdbObj.sync, pHead, pOper, TAOS_QTYPE_RPC); + if (syncCode <= 0) pOper->processedCount = 1; + + if (syncCode < 0) { + sdbError("table:%s, failed to forward request, result:%s action:%s record:%s ver:%" PRId64 ", msg:%p", pTable->tableName, + tstrerror(syncCode), sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version, pOper->pMsg); + } else if (syncCode > 0) { + sdbDebug("table:%s, forward request is sent, action:%s record:%s ver:%" PRId64 ", msg:%p", pTable->tableName, + sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version, pOper->pMsg); + } else { + sdbTrace("table:%s, no need to send fwd request, action:%s record:%s ver:%" PRId64 ", msg:%p", pTable->tableName, + sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version, pOper->pMsg); + } + return syncCode; } + sdbDebug("table:%s, record from wal/fwd is disposed, action:%s record:%s ver:%" PRId64, pTable->tableName, + sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version); + + // even it is WAL/FWD, it shall be called to update version in sync + syncForwardToPeer(tsSdbObj.sync, pHead, pOper, TAOS_QTYPE_RPC); + // from wal or forward msg, oper not created, should add into hash - if (tsSdbObj.sync != NULL) { - sdbTrace("record from wal forward is disposed, table:%s action:%s record:%s version:%" PRIu64 " confirm it", - pTable->tableName, sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version); - syncConfirmForward(tsSdbObj.sync, pHead->version, code); - } else { - sdbTrace("record from wal restore is disposed, table:%s action:%s record:%s version:%" PRIu64, pTable->tableName, - sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version); - } - if (action == SDB_ACTION_INSERT) { SSdbOper oper = {.rowSize = pHead->len, .rowData = pHead->cont, .table = pTable}; code = (*pTable->decodeFp)(&oper); @@ -627,7 +638,7 @@ int32_t sdbInsertRow(SSdbOper *pOper) { memcpy(pNewOper, pOper, sizeof(SSdbOper)); if (pNewOper->pMsg != NULL) { - sdbDebug("app:%p:%p, table:%s record:%p:%s, insert action is add to sdb queue, ", pNewOper->pMsg->rpcMsg.ahandle, + sdbDebug("app:%p:%p, table:%s record:%p:%s, insert action is add to sdb queue", pNewOper->pMsg->rpcMsg.ahandle, pNewOper->pMsg, pTable->tableName, pOper->pObj, sdbGetKeyStrFromObj(pTable, pOper->pObj)); } @@ -677,7 +688,7 @@ int32_t sdbDeleteRow(SSdbOper *pOper) { memcpy(pNewOper, pOper, sizeof(SSdbOper)); if (pNewOper->pMsg != NULL) { - sdbDebug("app:%p:%p, table:%s record:%p:%s, delete action is add to sdb queue, ", pNewOper->pMsg->rpcMsg.ahandle, + sdbDebug("app:%p:%p, table:%s record:%p:%s, delete action is add to sdb queue", pNewOper->pMsg->rpcMsg.ahandle, pNewOper->pMsg, pTable->tableName, pOper->pObj, sdbGetKeyStrFromObj(pTable, pOper->pObj)); } @@ -727,7 +738,7 @@ int32_t sdbUpdateRow(SSdbOper *pOper) { memcpy(pNewOper, pOper, sizeof(SSdbOper)); if (pNewOper->pMsg != NULL) { - sdbDebug("app:%p:%p, table:%s record:%p:%s, update action is add to sdb queue, ", pNewOper->pMsg->rpcMsg.ahandle, + sdbDebug("app:%p:%p, table:%s record:%p:%s, update action is add to sdb queue", pNewOper->pMsg->rpcMsg.ahandle, pNewOper->pMsg, pTable->tableName, pOper->pObj, sdbGetKeyStrFromObj(pTable, pOper->pObj)); } @@ -943,20 +954,24 @@ static void *sdbWorkerFp(void *param) { taosGetQitem(tsSdbWriteQall, &type, &item); if (type == TAOS_QTYPE_RPC) { pOper = (SSdbOper *)item; + pOper->processedCount = 1; pHead = (void *)pOper + sizeof(SSdbOper) + SDB_SYNC_HACK; + if (pOper->pMsg != NULL) { + sdbDebug("app:%p:%p, table:%s record:%p:%s ver:%" PRIu64 ", will be processed in sdb queue", + pOper->pMsg->rpcMsg.ahandle, pOper->pMsg, ((SSdbTable *)pOper->table)->tableName, pOper->pObj, + sdbGetKeyStr(pOper->table, pHead->cont), pHead->version); + } } else { pHead = (SWalHead *)item; pOper = NULL; } - if (pOper != NULL && pOper->pMsg != NULL) { - sdbDebug("app:%p:%p, table:%s record:%p:%s version:%" PRIu64 ", will be processed in sdb queue", - pOper->pMsg->rpcMsg.ahandle, pOper->pMsg, ((SSdbTable *)pOper->table)->tableName, pOper->pObj, - sdbGetKeyStr(pOper->table, pHead->cont), pHead->version); - } - int32_t code = sdbWrite(pOper, pHead, type); - if (pOper) pOper->retCode = code; + if (code > 0) code = 0; + if (pOper) + pOper->retCode = code; + else + pHead->len = code; // hackway } walFsync(tsSdbObj.wal); @@ -965,25 +980,18 @@ static void *sdbWorkerFp(void *param) { taosResetQitems(tsSdbWriteQall); for (int32_t i = 0; i < numOfMsgs; ++i) { taosGetQitem(tsSdbWriteQall, &type, &item); + if (type == TAOS_QTYPE_RPC) { pOper = (SSdbOper *)item; - if (pOper != NULL && pOper->cb != NULL) { - sdbTrace("app:%p:%p, will do callback func, index:%d", pOper->pMsg->rpcMsg.ahandle, pOper->pMsg, i); - pOper->retCode = (*pOper->cb)(pOper->pMsg, pOper->retCode); - } - - if (pOper != NULL && pOper->pMsg != NULL) { - sdbTrace("app:%p:%p, msg is processed, result:%s", pOper->pMsg->rpcMsg.ahandle, pOper->pMsg, - tstrerror(pOper->retCode)); - } - - if (pOper != NULL) { - sdbDecRef(pOper->table, pOper->pObj); - } - - dnodeSendRpcMnodeWriteRsp(pOper->pMsg, pOper->retCode); + sdbDecRef(pOper->table, pOper->pObj); + sdbConfirmForward(NULL, pOper, pOper->retCode); + } else if (type == TAOS_QTYPE_FWD) { + pHead = (SWalHead *)item; + syncConfirmForward(tsSdbObj.sync, pHead->version, pHead->len); + taosFreeQitem(item); + } else { + taosFreeQitem(item); } - taosFreeQitem(item); } } diff --git a/src/mnode/src/mnodeShow.c b/src/mnode/src/mnodeShow.c index 97ffe83914..a56ad34a25 100644 --- a/src/mnode/src/mnodeShow.c +++ b/src/mnode/src/mnodeShow.c @@ -307,6 +307,12 @@ static int32_t mnodeProcessConnectMsg(SMnodeMsg *pMsg) { code = TSDB_CODE_MND_INVALID_DB; goto connect_over; } + + if (pDb->status != TSDB_DB_STATUS_READY) { + mError("db:%s, status:%d, in dropping", pDb->name, pDb->status); + code = TSDB_CODE_MND_DB_IN_DROPPING; + goto connect_over; + } mnodeDecDbRef(pDb); } @@ -350,7 +356,12 @@ static int32_t mnodeProcessUseMsg(SMnodeMsg *pMsg) { int32_t code = TSDB_CODE_SUCCESS; if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDb(pUseDbMsg->db); if (pMsg->pDb == NULL) { - code = TSDB_CODE_MND_INVALID_DB; + return TSDB_CODE_MND_INVALID_DB; + } + + if (pMsg->pDb->status != TSDB_DB_STATUS_READY) { + mError("db:%s, status:%d, in dropping", pMsg->pDb->name, pMsg->pDb->status); + return TSDB_CODE_MND_DB_IN_DROPPING; } return code; @@ -403,4 +414,4 @@ void mnodeVacuumResult(char *data, int32_t numOfCols, int32_t rows, int32_t capa memmove(data + pShow->offset[i] * rows, data + pShow->offset[i] * capacity, pShow->bytes[i] * rows); } } -} \ No newline at end of file +} diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c index 47add8f7a3..523d7001c2 100644 --- a/src/mnode/src/mnodeTable.c +++ b/src/mnode/src/mnodeTable.c @@ -116,6 +116,11 @@ static int32_t mnodeChildTableActionInsert(SSdbOper *pOper) { mError("ctable:%s, vgId:%d not in db:%s", pTable->info.tableId, pVgroup->vgId, pVgroup->dbName); return TSDB_CODE_MND_INVALID_DB; } + + if (pDb->status != TSDB_DB_STATUS_READY) { + mError("db:%s, status:%d, in dropping", pDb->name, pDb->status); + return TSDB_CODE_MND_DB_IN_DROPPING; + } mnodeDecDbRef(pDb); SAcctObj *pAcct = mnodeGetAcct(pDb->acct); @@ -284,8 +289,8 @@ static int32_t mnodeChildTableActionRestored() { if (pTable == NULL) break; SDbObj *pDb = mnodeGetDbByTableId(pTable->info.tableId); - if (pDb == NULL) { - mError("ctable:%s, failed to get db, discard it", pTable->info.tableId); + if (pDb == NULL || pDb->status != TSDB_DB_STATUS_READY) { + mError("ctable:%s, failed to get db or db in dropping, discard it", pTable->info.tableId); SSdbOper desc = {.type = SDB_OPER_LOCAL, .pObj = pTable, .table = tsChildTableSdb}; sdbDeleteRow(&desc); mnodeDecTableRef(pTable); @@ -376,6 +381,7 @@ static int32_t mnodeInitChildTables() { static void mnodeCleanupChildTables() { sdbCloseTable(tsChildTableSdb); + tsChildTableSdb = NULL; } static void mnodeAddTableIntoStable(SSuperTableObj *pStable, SChildTableObj *pCtable) { @@ -422,7 +428,7 @@ static int32_t mnodeSuperTableActionDestroy(SSdbOper *pOper) { static int32_t mnodeSuperTableActionInsert(SSdbOper *pOper) { SSuperTableObj *pStable = pOper->pObj; SDbObj *pDb = mnodeGetDbByTableId(pStable->info.tableId); - if (pDb != NULL) { + if (pDb != NULL && pDb->status == TSDB_DB_STATUS_READY) { mnodeAddSuperTableIntoDb(pDb); } mnodeDecDbRef(pDb); @@ -554,6 +560,7 @@ static int32_t mnodeInitSuperTables() { static void mnodeCleanupSuperTables() { sdbCloseTable(tsSuperTableSdb); + tsSuperTableSdb = NULL; } int32_t mnodeInitTables() { @@ -683,10 +690,15 @@ static int32_t mnodeProcessCreateTableMsg(SMnodeMsg *pMsg) { SCMCreateTableMsg *pCreate = pMsg->rpcMsg.pCont; if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDb(pCreate->db); - if (pMsg->pDb == NULL || pMsg->pDb->status != TSDB_DB_STATUS_READY) { + if (pMsg->pDb == NULL) { mError("app:%p:%p, table:%s, failed to create, db not selected", pMsg->rpcMsg.ahandle, pMsg, pCreate->tableId); return TSDB_CODE_MND_DB_NOT_SELECTED; } + + if (pMsg->pDb->status != TSDB_DB_STATUS_READY) { + mError("db:%s, status:%d, in dropping", pMsg->pDb->name, pMsg->pDb->status); + return TSDB_CODE_MND_DB_IN_DROPPING; + } if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(pCreate->tableId); if (pMsg->pTable != NULL && pMsg->retry == 0) { @@ -717,10 +729,15 @@ static int32_t mnodeProcessCreateTableMsg(SMnodeMsg *pMsg) { static int32_t mnodeProcessDropTableMsg(SMnodeMsg *pMsg) { SCMDropTableMsg *pDrop = pMsg->rpcMsg.pCont; if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDbByTableId(pDrop->tableId); - if (pMsg->pDb == NULL || pMsg->pDb->status != TSDB_DB_STATUS_READY) { - mError("app:%p:%p, table:%s, failed to drop table, db not selected", pMsg->rpcMsg.ahandle, pMsg, pDrop->tableId); + if (pMsg->pDb == NULL) { + mError("app:%p:%p, table:%s, failed to drop table, db not selected or db in dropping", pMsg->rpcMsg.ahandle, pMsg, pDrop->tableId); return TSDB_CODE_MND_DB_NOT_SELECTED; } + + if (pMsg->pDb->status != TSDB_DB_STATUS_READY) { + mError("db:%s, status:%d, in dropping", pMsg->pDb->name, pMsg->pDb->status); + return TSDB_CODE_MND_DB_IN_DROPPING; + } if (mnodeCheckIsMonitorDB(pMsg->pDb->name, tsMonitorDbName)) { mError("app:%p:%p, table:%s, failed to drop table, in monitor database", pMsg->rpcMsg.ahandle, pMsg, @@ -755,11 +772,16 @@ static int32_t mnodeProcessTableMetaMsg(SMnodeMsg *pMsg) { pInfo->tableId, pMsg->rpcMsg.handle, pInfo->createFlag); if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDbByTableId(pInfo->tableId); - if (pMsg->pDb == NULL || pMsg->pDb->status != TSDB_DB_STATUS_READY) { + if (pMsg->pDb == NULL) { mError("app:%p:%p, table:%s, failed to get table meta, db not selected", pMsg->rpcMsg.ahandle, pMsg, pInfo->tableId); return TSDB_CODE_MND_DB_NOT_SELECTED; } + + if (pMsg->pDb->status != TSDB_DB_STATUS_READY) { + mError("db:%s, status:%d, in dropping", pMsg->pDb->name, pMsg->pDb->status); + return TSDB_CODE_MND_DB_IN_DROPPING; + } if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(pInfo->tableId); if (pMsg->pTable == NULL) { @@ -783,9 +805,15 @@ static int32_t mnodeProcessTableMetaMsg(SMnodeMsg *pMsg) { static int32_t mnodeCreateSuperTableCb(SMnodeMsg *pMsg, int32_t code) { SSuperTableObj *pTable = (SSuperTableObj *)pMsg->pTable; - if (pTable != NULL) { - mLInfo("app:%p:%p, stable:%s, is created in sdb, result:%s", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId, - tstrerror(code)); + assert(pTable); + + if (code == TSDB_CODE_SUCCESS) { + mLInfo("stable:%s, is created in sdb", pTable->info.tableId); + } else { + mError("app:%p:%p, stable:%s, failed to create in sdb, reason:%s", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId, + tstrerror(code)); + SSdbOper desc = {.type = SDB_OPER_GLOBAL, .pObj = pTable, .table = tsSuperTableSdb}; + sdbDeleteRow(&desc); } return code; @@ -1201,6 +1229,11 @@ static int32_t mnodeDropSuperTableColumn(SMnodeMsg *pMsg, char *colName) { static int32_t mnodeGetShowSuperTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) { SDbObj *pDb = mnodeGetDb(pShow->db); if (pDb == NULL) return TSDB_CODE_MND_DB_NOT_SELECTED; + + if (pDb->status != TSDB_DB_STATUS_READY) { + mError("db:%s, status:%d, in dropping", pDb->name, pDb->status); + return TSDB_CODE_MND_DB_IN_DROPPING; + } int32_t cols = 0; SSchema *pSchema = pMeta->schema; @@ -1260,6 +1293,11 @@ int32_t mnodeRetrieveShowSuperTables(SShowObj *pShow, char *data, int32_t rows, SDbObj *pDb = mnodeGetDb(pShow->db); if (pDb == NULL) return 0; + + if (pDb->status != TSDB_DB_STATUS_READY) { + mError("db:%s, status:%d, in dropping", pDb->name, pDb->status); + return 0; + } tstrncpy(prefix, pDb->name, 64); strcat(prefix, TS_PATH_DELIMITER); @@ -1561,10 +1599,16 @@ static int32_t mnodeDoCreateChildTableCb(SMnodeMsg *pMsg, int32_t code) { SChildTableObj *pTable = (SChildTableObj *)pMsg->pTable; assert(pTable); - mDebug("app:%p:%p, table:%s, create table in id:%d, uid:%" PRIu64 ", result:%s", pMsg->rpcMsg.ahandle, pMsg, - pTable->info.tableId, pTable->sid, pTable->uid, tstrerror(code)); - - if (code != TSDB_CODE_SUCCESS) return code; + if (code == TSDB_CODE_SUCCESS) { + mDebug("app:%p:%p, table:%s, create table in sid:%d, uid:%" PRIu64, pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId, + pTable->sid, pTable->uid); + } else { + mError("app:%p:%p, table:%s, failed to create table sid:%d, uid:%" PRIu64 ", reason:%s", pMsg->rpcMsg.ahandle, pMsg, + pTable->info.tableId, pTable->sid, pTable->uid, tstrerror(code)); + SSdbOper desc = {.type = SDB_OPER_GLOBAL, .pObj = pTable, .table = tsChildTableSdb}; + sdbDeleteRow(&desc); + return code; + } SCMCreateTableMsg *pCreate = pMsg->rpcMsg.pCont; SMDCreateTableMsg *pMDCreate = mnodeBuildCreateChildTableMsg(pCreate, pTable); @@ -2285,7 +2329,7 @@ static int32_t mnodeProcessMultiTableMetaMsg(SMnodeMsg *pMsg) { if (pTable == NULL) continue; if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDbByTableId(tableId); - if (pMsg->pDb == NULL) { + if (pMsg->pDb == NULL || pMsg->pDb->status != TSDB_DB_STATUS_READY) { mnodeDecTableRef(pTable); continue; } @@ -2323,6 +2367,11 @@ static int32_t mnodeProcessMultiTableMetaMsg(SMnodeMsg *pMsg) { static int32_t mnodeGetShowTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) { SDbObj *pDb = mnodeGetDb(pShow->db); if (pDb == NULL) return TSDB_CODE_MND_DB_NOT_SELECTED; + + if (pDb->status != TSDB_DB_STATUS_READY) { + mError("db:%s, status:%d, in dropping", pDb->name, pDb->status); + return TSDB_CODE_MND_DB_IN_DROPPING; + } int32_t cols = 0; SSchema *pSchema = pMeta->schema; @@ -2371,6 +2420,11 @@ static int32_t mnodeGetShowTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, void static int32_t mnodeRetrieveShowTables(SShowObj *pShow, char *data, int32_t rows, void *pConn) { SDbObj *pDb = mnodeGetDb(pShow->db); if (pDb == NULL) return 0; + + if (pDb->status != TSDB_DB_STATUS_READY) { + mError("db:%s, status:%d, in dropping", pDb->name, pDb->status); + return 0; + } int32_t numOfRows = 0; SChildTableObj *pTable = NULL; @@ -2462,10 +2516,15 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg) { pAlter->tableId, pMsg->rpcMsg.handle); if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDbByTableId(pAlter->tableId); - if (pMsg->pDb == NULL || pMsg->pDb->status != TSDB_DB_STATUS_READY) { + if (pMsg->pDb == NULL) { mError("app:%p:%p, table:%s, failed to alter table, db not selected", pMsg->rpcMsg.ahandle, pMsg, pAlter->tableId); return TSDB_CODE_MND_DB_NOT_SELECTED; } + + if (pMsg->pDb->status != TSDB_DB_STATUS_READY) { + mError("db:%s, status:%d, in dropping", pMsg->pDb->name, pMsg->pDb->status); + return TSDB_CODE_MND_DB_IN_DROPPING; + } if (mnodeCheckIsMonitorDB(pMsg->pDb->name, tsMonitorDbName)) { mError("app:%p:%p, table:%s, failed to alter table, its log db", pMsg->rpcMsg.ahandle, pMsg, pAlter->tableId); @@ -2525,6 +2584,11 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg) { static int32_t mnodeGetStreamTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) { SDbObj *pDb = mnodeGetDb(pShow->db); if (pDb == NULL) return TSDB_CODE_MND_DB_NOT_SELECTED; + + if (pDb->status != TSDB_DB_STATUS_READY) { + mError("db:%s, status:%d, in dropping", pDb->name, pDb->status); + return TSDB_CODE_MND_DB_IN_DROPPING; + } int32_t cols = 0; SSchema *pSchema = pMeta->schema; @@ -2572,7 +2636,11 @@ static int32_t mnodeGetStreamTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, vo static int32_t mnodeRetrieveStreamTables(SShowObj *pShow, char *data, int32_t rows, void *pConn) { SDbObj *pDb = mnodeGetDb(pShow->db); if (pDb == NULL) return 0; - + + if (pDb->status != TSDB_DB_STATUS_READY) { + mError("db:%s, status:%d, in dropping", pDb->name, pDb->status); + return 0; + } int32_t numOfRows = 0; SChildTableObj *pTable = NULL; diff --git a/src/mnode/src/mnodeUser.c b/src/mnode/src/mnodeUser.c index fa2a2634d6..84f5d6aa58 100644 --- a/src/mnode/src/mnodeUser.c +++ b/src/mnode/src/mnodeUser.c @@ -154,6 +154,7 @@ int32_t mnodeInitUsers() { void mnodeCleanupUsers() { sdbCloseTable(tsUserSdb); + tsUserSdb = NULL; } SUserObj *mnodeGetUser(char *name) { diff --git a/src/mnode/src/mnodeVgroup.c b/src/mnode/src/mnodeVgroup.c index 9a041aa4fd..966d4b0dd8 100644 --- a/src/mnode/src/mnodeVgroup.c +++ b/src/mnode/src/mnodeVgroup.c @@ -75,6 +75,11 @@ static int32_t mnodeVgroupActionInsert(SSdbOper *pOper) { if (pDb == NULL) { return TSDB_CODE_MND_INVALID_DB; } + + if (pDb->status != TSDB_DB_STATUS_READY) { + mError("db:%s, status:%d, in dropping", pDb->name, pDb->status); + return TSDB_CODE_MND_DB_IN_DROPPING; + } pVgroup->pDb = pDb; pVgroup->prev = NULL; @@ -165,10 +170,18 @@ static int32_t mnodeVgroupActionUpdate(SSdbOper *pOper) { } mnodeDecDnodeRef(pDnode); } + + free(pNew); } mnodeVgroupUpdateIdPool(pVgroup); + // reset vgid status on vgroup changed + mDebug("vgId:%d, reset sync status to unsynced", pVgroup->vgId); + for (int32_t v = 0; v < pVgroup->numOfVnodes; ++v) { + pVgroup->vnodeGid[v].role = TAOS_SYNC_ROLE_UNSYNCED; + } + mnodeDecVgroupRef(pVgroup); mDebug("vgId:%d, is updated, numOfVnode:%d", pVgroup->vgId, pVgroup->numOfVnodes); @@ -300,6 +313,7 @@ void mnodeUpdateVgroupStatus(SVgObj *pVgroup, SDnodeObj *pDnode, SVnodeLoad *pVl for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) { SVnodeGid *pVgid = &pVgroup->vnodeGid[i]; if (pVgid->pDnode == pDnode) { + mTrace("dnode:%d, receive status from dnode, vgId:%d status is %d", pVgroup->vgId, pDnode->dnodeId, pVgid->role); pVgid->role = pVload->role; if (pVload->role == TAOS_SYNC_ROLE_MASTER) { pVgroup->inUse = i; @@ -339,20 +353,25 @@ void *mnodeGetNextVgroup(void *pIter, SVgObj **pVgroup) { } static int32_t mnodeCreateVgroupCb(SMnodeMsg *pMsg, int32_t code) { + SVgObj *pVgroup = pMsg->pVgroup; + SDbObj *pDb = pMsg->pDb; + assert(pVgroup); + if (code != TSDB_CODE_SUCCESS) { - pMsg->pVgroup = NULL; + mError("app:%p:%p, vgId:%d, failed to create in sdb, reason:%s", pMsg->rpcMsg.ahandle, pMsg, pVgroup->vgId, + tstrerror(code)); + SSdbOper desc = {.type = SDB_OPER_GLOBAL, .pObj = pVgroup, .table = tsVgroupSdb}; + sdbDeleteRow(&desc); return code; } - SVgObj *pVgroup = pMsg->pVgroup; - SDbObj *pDb = pMsg->pDb; - - mInfo("vgId:%d, is created in mnode, db:%s replica:%d", pVgroup->vgId, pDb->name, pVgroup->numOfVnodes); + mInfo("app:%p:%p, vgId:%d, is created in mnode, db:%s replica:%d", pMsg->rpcMsg.ahandle, pMsg, pVgroup->vgId, + pDb->name, pVgroup->numOfVnodes); for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) { - mInfo("vgId:%d, index:%d, dnode:%d", pVgroup->vgId, i, pVgroup->vnodeGid[i].dnodeId); + mInfo("app:%p:%p, vgId:%d, index:%d, dnode:%d", pMsg->rpcMsg.ahandle, pMsg, pVgroup->vgId, i, + pVgroup->vnodeGid[i].dnodeId); } - mnodeIncVgroupRef(pVgroup); pMsg->expected = pVgroup->numOfVnodes; mnodeSendCreateVgroupMsg(pVgroup, pMsg); @@ -373,6 +392,9 @@ int32_t mnodeCreateVgroup(SMnodeMsg *pMsg, SDbObj *pDb) { return TSDB_CODE_MND_NO_ENOUGH_DNODES; } + pMsg->pVgroup = pVgroup; + mnodeIncVgroupRef(pVgroup); + SSdbOper oper = { .type = SDB_OPER_GLOBAL, .table = tsVgroupSdb, @@ -382,8 +404,6 @@ int32_t mnodeCreateVgroup(SMnodeMsg *pMsg, SDbObj *pDb) { .cb = mnodeCreateVgroupCb }; - pMsg->pVgroup = pVgroup; - int32_t code = sdbInsertRow(&oper); if (code != TSDB_CODE_SUCCESS) { pMsg->pVgroup = NULL; @@ -412,6 +432,7 @@ void mnodeDropVgroup(SVgObj *pVgroup, void *ahandle) { void mnodeCleanupVgroups() { sdbCloseTable(tsVgroupSdb); + tsVgroupSdb = NULL; } int32_t mnodeGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) { @@ -419,6 +440,11 @@ int32_t mnodeGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) { if (pDb == NULL) { return TSDB_CODE_MND_DB_NOT_SELECTED; } + + if (pDb->status != TSDB_DB_STATUS_READY) { + mError("db:%s, status:%d, in dropping", pDb->name, pDb->status); + return TSDB_CODE_MND_DB_IN_DROPPING; + } int32_t cols = 0; SSchema *pSchema = pMeta->schema; @@ -507,6 +533,11 @@ int32_t mnodeRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, void *pC SDbObj *pDb = mnodeGetDb(pShow->db); if (pDb == NULL) return 0; + + if (pDb->status != TSDB_DB_STATUS_READY) { + mError("db:%s, status:%d, in dropping", pDb->name, pDb->status); + return 0; + } pVgroup = pDb->pHead; while (pVgroup != NULL) { @@ -681,9 +712,9 @@ static void mnodeProcessCreateVnodeRsp(SRpcMsg *rpcMsg) { if (rpcMsg->ahandle == NULL) return; SMnodeMsg *mnodeMsg = rpcMsg->ahandle; - mnodeMsg->received++; + atomic_add_fetch_8(&mnodeMsg->received, 1); if (rpcMsg->code == TSDB_CODE_SUCCESS) { - mnodeMsg->successed++; + atomic_add_fetch_8(&mnodeMsg->successed, 1); } else { mnodeMsg->code = rpcMsg->code; } @@ -783,19 +814,20 @@ static int32_t mnodeProcessVnodeCfgMsg(SMnodeMsg *pMsg) { mDebug("dnode:%s, vgId:%d, invalid dnode", taosIpStr(pCfg->dnodeId), pCfg->vgId); return TSDB_CODE_MND_VGROUP_NOT_EXIST; } - mnodeDecDnodeRef(pDnode); SVgObj *pVgroup = mnodeGetVgroup(pCfg->vgId); if (pVgroup == NULL) { mDebug("dnode:%s, vgId:%d, no vgroup info", taosIpStr(pCfg->dnodeId), pCfg->vgId); + mnodeDecDnodeRef(pDnode); return TSDB_CODE_MND_VGROUP_NOT_EXIST; } - mnodeDecVgroupRef(pVgroup); mDebug("vgId:%d, send create vnode msg to dnode %s for vnode cfg msg", pVgroup->vgId, pDnode->dnodeEp); SRpcIpSet ipSet = mnodeGetIpSetFromIp(pDnode->dnodeEp); mnodeSendCreateVnodeMsg(pVgroup, &ipSet, NULL); + mnodeDecDnodeRef(pDnode); + mnodeDecVgroupRef(pVgroup); return TSDB_CODE_SUCCESS; } diff --git a/src/plugins/http/inc/httpInt.h b/src/plugins/http/inc/httpInt.h index 5d94e8456e..8ca1c2ff11 100644 --- a/src/plugins/http/inc/httpInt.h +++ b/src/plugins/http/inc/httpInt.h @@ -206,7 +206,7 @@ typedef struct HttpThread { pthread_mutex_t threadMutex; bool stop; int pollFd; - int numOfFds; + int numOfContexts; int threadId; char label[HTTP_LABEL_SIZE]; bool (*processData)(HttpContext *pContext); diff --git a/src/plugins/http/inc/httpLog.h b/src/plugins/http/inc/httpLog.h index 3712360a1c..f4c20a40d5 100644 --- a/src/plugins/http/inc/httpLog.h +++ b/src/plugins/http/inc/httpLog.h @@ -26,8 +26,6 @@ extern int32_t httpDebugFlag; #define httpInfo(...) { if (httpDebugFlag & DEBUG_INFO) { taosPrintLog("HTP INFO ", 255, __VA_ARGS__); }} #define httpDebug(...) { if (httpDebugFlag & DEBUG_DEBUG) { taosPrintLog("HTP DEBUG ", httpDebugFlag, __VA_ARGS__); }} #define httpTrace(...) { if (httpDebugFlag & DEBUG_TRACE) { taosPrintLog("HTP TRACE ", httpDebugFlag, __VA_ARGS__); }} - -#define httpDebugDump(...) { if (httpDebugFlag & DEBUG_DEBUG) { taosPrintLongString("HTP DEBUG ", httpDebugFlag, __VA_ARGS__); }} -#define httpTraceDump(...) { if (httpDebugFlag & DEBUG_TRACE) { taosPrintLongString("HTP TRACE ", httpDebugFlag, __VA_ARGS__); }} +#define httpTraceL(...){ if (httpDebugFlag & DEBUG_TRACE) { taosPrintLongString("HTP TRACE ", httpDebugFlag, __VA_ARGS__); }} #endif diff --git a/src/plugins/http/src/httpContext.c b/src/plugins/http/src/httpContext.c index cdaee53c38..98fba9cb3b 100644 --- a/src/plugins/http/src/httpContext.c +++ b/src/plugins/http/src/httpContext.c @@ -44,7 +44,7 @@ static void httpDestroyContext(void *data) { HttpThread *pThread = pContext->pThread; httpRemoveContextFromEpoll(pContext); httpReleaseSession(pContext); - atomic_sub_fetch_32(&pThread->numOfFds, 1); + atomic_sub_fetch_32(&pThread->numOfContexts, 1); pContext->pThread = 0; pContext->state = HTTP_CONTEXT_STATE_CLOSED; @@ -171,38 +171,39 @@ bool httpInitContext(HttpContext *pContext) { void httpCloseContextByApp(HttpContext *pContext) { pContext->parsed = false; - bool keepAlive = true; + if (pContext->httpVersion == HTTP_VERSION_10 && pContext->httpKeepAlive != HTTP_KEEPALIVE_ENABLE) { keepAlive = false; } else if (pContext->httpVersion != HTTP_VERSION_10 && pContext->httpKeepAlive == HTTP_KEEPALIVE_DISABLE) { keepAlive = false; - } else {} + } else { + } if (keepAlive) { if (httpAlterContextState(pContext, HTTP_CONTEXT_STATE_HANDLING, HTTP_CONTEXT_STATE_READY)) { - httpDebug("context:%p, fd:%d, ip:%s, last state:handling, keepAlive:true, reuse connect", - pContext, pContext->fd, pContext->ipstr); + httpDebug("context:%p, fd:%d, ip:%s, last state:handling, keepAlive:true, reuse context", pContext, pContext->fd, + pContext->ipstr); } else if (httpAlterContextState(pContext, HTTP_CONTEXT_STATE_DROPPING, HTTP_CONTEXT_STATE_CLOSED)) { httpRemoveContextFromEpoll(pContext); - httpDebug("context:%p, fd:%d, ip:%s, last state:dropping, keepAlive:true, close connect", - pContext, pContext->fd, pContext->ipstr); + httpDebug("context:%p, fd:%d, ip:%s, last state:dropping, keepAlive:true, close connect", pContext, pContext->fd, + pContext->ipstr); } else if (httpAlterContextState(pContext, HTTP_CONTEXT_STATE_READY, HTTP_CONTEXT_STATE_READY)) { - httpDebug("context:%p, fd:%d, ip:%s, last state:ready, keepAlive:true, reuse connect", - pContext, pContext->fd, pContext->ipstr); + httpDebug("context:%p, fd:%d, ip:%s, last state:ready, keepAlive:true, reuse context", pContext, pContext->fd, + pContext->ipstr); } else if (httpAlterContextState(pContext, HTTP_CONTEXT_STATE_CLOSED, HTTP_CONTEXT_STATE_CLOSED)) { httpRemoveContextFromEpoll(pContext); - httpDebug("context:%p, fd:%d, ip:%s, last state:ready, keepAlive:true, close connect", - pContext, pContext->fd, pContext->ipstr); + httpDebug("context:%p, fd:%d, ip:%s, last state:ready, keepAlive:true, close connect", pContext, pContext->fd, + pContext->ipstr); } else { httpRemoveContextFromEpoll(pContext); - httpError("context:%p, fd:%d, ip:%s, last state:%s:%d, keepAlive:true, close connect", - pContext, pContext->fd, pContext->ipstr, httpContextStateStr(pContext->state), pContext->state); + httpError("context:%p, fd:%d, ip:%s, last state:%s:%d, keepAlive:true, close connect", pContext, pContext->fd, + pContext->ipstr, httpContextStateStr(pContext->state), pContext->state); } } else { httpRemoveContextFromEpoll(pContext); - httpDebug("context:%p, fd:%d, ip:%s, last state:%s:%d, keepAlive:false, close connect", - pContext, pContext->fd, pContext->ipstr, httpContextStateStr(pContext->state), pContext->state); + httpDebug("context:%p, fd:%d, ip:%s, last state:%s:%d, keepAlive:false, close context", pContext, pContext->fd, + pContext->ipstr, httpContextStateStr(pContext->state), pContext->state); } httpReleaseContext(pContext); @@ -214,7 +215,7 @@ void httpCloseContextByServer(HttpContext *pContext) { } else if (httpAlterContextState(pContext, HTTP_CONTEXT_STATE_DROPPING, HTTP_CONTEXT_STATE_DROPPING)) { httpDebug("context:%p, fd:%d, ip:%s, epoll already finished, wait app finished", pContext, pContext->fd, pContext->ipstr); } else if (httpAlterContextState(pContext, HTTP_CONTEXT_STATE_READY, HTTP_CONTEXT_STATE_CLOSED)) { - httpDebug("context:%p, fd:%d, ip:%s, epoll finished, close context", pContext, pContext->fd, pContext->ipstr); + httpDebug("context:%p, fd:%d, ip:%s, epoll finished, close connect", pContext, pContext->fd, pContext->ipstr); } else if (httpAlterContextState(pContext, HTTP_CONTEXT_STATE_CLOSED, HTTP_CONTEXT_STATE_CLOSED)) { httpDebug("context:%p, fd:%d, ip:%s, epoll finished, will be closed soon", pContext, pContext->fd, pContext->ipstr); } else { diff --git a/src/plugins/http/src/httpHandle.c b/src/plugins/http/src/httpHandle.c index 056fe425d4..2c94f61950 100644 --- a/src/plugins/http/src/httpHandle.c +++ b/src/plugins/http/src/httpHandle.c @@ -313,9 +313,9 @@ bool httpParseRequest(HttpContext* pContext) { return true; } - httpTraceDump("context:%p, fd:%d, ip:%s, thread:%s, numOfFds:%d, read size:%d, raw data:\n%s", pContext, pContext->fd, - pContext->ipstr, pContext->pThread->label, pContext->pThread->numOfFds, pContext->parser.bufsize, - pContext->parser.buffer); + httpTraceL("context:%p, fd:%d, ip:%s, thread:%s, numOfContexts:%d, read size:%d, raw data:\n%s", pContext, + pContext->fd, pContext->ipstr, pContext->pThread->label, pContext->pThread->numOfContexts, + pContext->parser.bufsize, pContext->parser.buffer); if (!httpGetHttpMethod(pContext)) { return false; diff --git a/src/plugins/http/src/httpServer.c b/src/plugins/http/src/httpServer.c index a5009c2347..dbe299cef7 100644 --- a/src/plugins/http/src/httpServer.c +++ b/src/plugins/http/src/httpServer.c @@ -108,7 +108,7 @@ bool httpReadDataImp(HttpContext *pContext) { static bool httpDecompressData(HttpContext *pContext) { if (pContext->contentEncoding != HTTP_COMPRESS_GZIP) { - httpTraceDump("context:%p, fd:%d, ip:%s, content:%s", pContext, pContext->fd, pContext->ipstr, pContext->parser.data.pos); + httpTraceL("context:%p, fd:%d, ip:%s, content:%s", pContext, pContext->fd, pContext->ipstr, pContext->parser.data.pos); return true; } @@ -124,8 +124,8 @@ static bool httpDecompressData(HttpContext *pContext) { if (ret == 0) { memcpy(pContext->parser.data.pos, decompressBuf, decompressBufLen); pContext->parser.data.pos[decompressBufLen] = 0; - httpTraceDump("context:%p, fd:%d, ip:%s, rawSize:%d, decompressSize:%d, content:%s", - pContext, pContext->fd, pContext->ipstr, pContext->parser.data.len, decompressBufLen, decompressBuf); + httpTraceL("context:%p, fd:%d, ip:%s, rawSize:%d, decompressSize:%d, content:%s", pContext, pContext->fd, + pContext->ipstr, pContext->parser.data.len, decompressBufLen, decompressBuf); pContext->parser.data.len = decompressBufLen; } else { httpError("context:%p, fd:%d, ip:%s, failed to decompress data, rawSize:%d, error:%d", @@ -293,7 +293,7 @@ static void *httpAcceptHttpConnection(void *arg) { totalFds = 1; for (int i = 0; i < pServer->numOfThreads; ++i) { - totalFds += pServer->pThreads[i].numOfFds; + totalFds += pServer->pThreads[i].numOfContexts; } if (totalFds > tsHttpCacheSessions * 100) { @@ -332,9 +332,9 @@ static void *httpAcceptHttpConnection(void *arg) { } // notify the data process, add into the FdObj list - atomic_add_fetch_32(&pThread->numOfFds, 1); - httpDebug("context:%p, fd:%d, ip:%s, thread:%s numOfFds:%d totalFds:%d, accept a new connection", pContext, connFd, - pContext->ipstr, pThread->label, pThread->numOfFds, totalFds); + atomic_add_fetch_32(&pThread->numOfContexts, 1); + httpDebug("context:%p, fd:%d, ip:%s, thread:%s numOfContexts:%d totalFds:%d, accept a new connection", pContext, + connFd, pContext->ipstr, pThread->label, pThread->numOfContexts, totalFds); // pick up next thread for next connection threadId++; diff --git a/src/plugins/http/src/httpSql.c b/src/plugins/http/src/httpSql.c index 9d3efca01d..7a515d124e 100644 --- a/src/plugins/http/src/httpSql.c +++ b/src/plugins/http/src/httpSql.c @@ -166,8 +166,8 @@ void httpProcessMultiSql(HttpContext *pContext) { HttpSqlCmd *cmd = multiCmds->cmds + multiCmds->pos; char *sql = httpGetCmdsString(pContext, cmd->sql); - httpTraceDump("context:%p, fd:%d, ip:%s, user:%s, process pos:%d, start query, sql:%s", pContext, pContext->fd, - pContext->ipstr, pContext->user, multiCmds->pos, sql); + httpTraceL("context:%p, fd:%d, ip:%s, user:%s, process pos:%d, start query, sql:%s", pContext, pContext->fd, + pContext->ipstr, pContext->user, multiCmds->pos, sql); taosNotePrintHttp(sql); taos_query_a(pContext->session->taos, sql, httpProcessMultiSqlCallBack, (void *)pContext); } @@ -233,10 +233,11 @@ void httpProcessSingleSqlRetrieveCallBack(void *param, TAOS_RES *result, int num } } -void httpProcessSingleSqlCallBack(void *param, TAOS_RES *result, int code) { +void httpProcessSingleSqlCallBack(void *param, TAOS_RES *result, int unUsedCode) { HttpContext *pContext = (HttpContext *)param; if (pContext == NULL) return; + int32_t code = taos_errno(result); HttpEncodeMethod *encode = pContext->encodeMethod; if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { @@ -260,8 +261,8 @@ void httpProcessSingleSqlCallBack(void *param, TAOS_RES *result, int code) { return; } - int num_fields = taos_field_count(result); - if (num_fields == 0) { + bool isUpdate = tscIsUpdateQuery(result); + if (isUpdate) { // not select or show commands int affectRows = taos_affected_rows(result); @@ -306,8 +307,8 @@ void httpProcessSingleSqlCmd(HttpContext *pContext) { return; } - httpTraceDump("context:%p, fd:%d, ip:%s, user:%s, start query, sql:%s", pContext, pContext->fd, pContext->ipstr, - pContext->user, sql); + httpTraceL("context:%p, fd:%d, ip:%s, user:%s, start query, sql:%s", pContext, pContext->fd, pContext->ipstr, + pContext->user, sql); taosNotePrintHttp(sql); taos_query_a(pSession->taos, sql, httpProcessSingleSqlCallBack, (void *)pContext); } diff --git a/src/plugins/monitor/src/monitorMain.c b/src/plugins/monitor/src/monitorMain.c index 11b701e0ea..36468b3fdb 100644 --- a/src/plugins/monitor/src/monitorMain.c +++ b/src/plugins/monitor/src/monitorMain.c @@ -35,9 +35,6 @@ #define monitorDebug(...) { if (monitorDebugFlag & DEBUG_DEBUG) { taosPrintLog("MON DEBUG ", monitorDebugFlag, __VA_ARGS__); }} #define monitorTrace(...) { if (monitorDebugFlag & DEBUG_TRACE) { taosPrintLog("MON TRACE ", monitorDebugFlag, __VA_ARGS__); }} -#define monitorDebugDump(...) { if (monitorDebugFlag & DEBUG_DEBUG) { taosPrintLongString("MON DEBUG ", monitorDebugFlag, __VA_ARGS__); }} -#define monitorTraceDump(...) { if (monitorDebugFlag & DEBUG_TRACE) { taosPrintLongString("MON TRACE ", monitorDebugFlag, __VA_ARGS__); }} - #define SQL_LENGTH 1024 #define LOG_LEN_STR 100 #define IP_LEN_STR 18 diff --git a/src/plugins/mqtt/inc/mqttLog.h b/src/plugins/mqtt/inc/mqttLog.h index c0515c2c26..5d5f98a13b 100644 --- a/src/plugins/mqtt/inc/mqttLog.h +++ b/src/plugins/mqtt/inc/mqttLog.h @@ -27,7 +27,4 @@ extern int32_t mqttDebugFlag; #define mqttDebug(...) { if (mqttDebugFlag & DEBUG_DEBUG) { taosPrintLog("MQT DEBUG ", mqttDebugFlag, __VA_ARGS__); }} #define mqttTrace(...) { if (mqttDebugFlag & DEBUG_TRACE) { taosPrintLog("MQT TRACE ", mqttDebugFlag, __VA_ARGS__); }} -#define mqttDebugDump(...) { if (mqttDebugFlag & DEBUG_DEBUG) { taosPrintLongString("MQT DEBUG ", mqttDebugFlag, __VA_ARGS__); }} -#define mqttTraceDump(...) { if (mqttDebugFlag & DEBUG_TRACE) { taosPrintLongString("MQT DEBUG ", mqttDebugFlag, __VA_ARGS__); }} - #endif diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index 3aa1b60be5..6cd6edd6db 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -154,6 +154,7 @@ typedef struct SQuery { } SQuery; typedef struct SQueryRuntimeEnv { + jmp_buf env; SResultInfo* resultInfo; // todo refactor to merge with SWindowResInfo SQuery* pQuery; SQLFunctionCtx* pCtx; @@ -169,6 +170,9 @@ typedef struct SQueryRuntimeEnv { void* pSecQueryHandle; // another thread for bool stableQuery; // super table query or not bool topBotQuery; // false + bool groupbyNormalCol; // denote if this is a groupby normal column query + bool hasTagResults; // if there are tag values in final result or not + int32_t interBufSize; // intermediate buffer sizse int32_t prevGroupId; // previous executed group id SDiskbasedResultBuf* pResultBuf; // query result buffer based on blocked-wised disk file } SQueryRuntimeEnv; @@ -197,8 +201,10 @@ typedef struct SQInfo { */ int32_t tableIndex; int32_t numOfGroupResultPages; - _qinfo_free_fn_t freeFn; - jmp_buf env; + _qinfo_free_fn_t freeFn; //todo remove it + + void* pBuf; // allocated buffer for STableQueryInfo, sizeof(STableQueryInfo)*numOfTables; + } SQInfo; #endif // TDENGINE_QUERYEXECUTOR_H diff --git a/src/query/inc/qUtil.h b/src/query/inc/qUtil.h index 1ecca102ba..9b818b367f 100644 --- a/src/query/inc/qUtil.h +++ b/src/query/inc/qUtil.h @@ -15,6 +15,8 @@ #ifndef TDENGINE_QUERYUTIL_H #define TDENGINE_QUERYUTIL_H +int32_t getOutputInterResultBufSize(SQuery* pQuery); + void clearTimeWindowResBuf(SQueryRuntimeEnv* pRuntimeEnv, SWindowResult* pOneOutputRes); void copyTimeWindowResBuf(SQueryRuntimeEnv* pRuntimeEnv, SWindowResult* dst, const SWindowResult* src); @@ -35,7 +37,7 @@ SWindowResult *getWindowResult(SWindowResInfo *pWindowResInfo, int32_t slot); #define curTimeWindow(_winres) ((_winres)->curIndex) bool isWindowResClosed(SWindowResInfo *pWindowResInfo, int32_t slot); -void createQueryResultInfo(SQuery *pQuery, SWindowResult *pResultRow, bool isSTableQuery, SPosInfo *posInfo); +void createQueryResultInfo(SQuery *pQuery, SWindowResult *pResultRow, bool isSTableQuery, SPosInfo *posInfo, size_t interBufSize); char *getPosInResultPage(SQueryRuntimeEnv *pRuntimeEnv, int32_t columnIndex, SWindowResult *pResult); diff --git a/src/query/inc/qast.h b/src/query/inc/qast.h index 410b2ac9d2..918604f8c9 100644 --- a/src/query/inc/qast.h +++ b/src/query/inc/qast.h @@ -16,16 +16,16 @@ #ifndef TDENGINE_TAST_H #define TDENGINE_TAST_H -#include #ifdef __cplusplus extern "C" { #endif -#include #include "os.h" #include "taosmsg.h" #include "taosdef.h" +#include "tskiplist.h" +#include "tbuffer.h" #include "tvariant.h" struct tExprNode; @@ -75,10 +75,6 @@ typedef struct tExprNode { }; } tExprNode; -void tSQLBinaryExprFromString(tExprNode **pExpr, SSchema *pSchema, int32_t numOfCols, char *src, int32_t len); - -void tSQLBinaryExprToString(tExprNode *pExpr, char *dst, int32_t *len); - void tExprTreeDestroy(tExprNode **pExprs, void (*fp)(void*)); void tExprTreeTraverse(tExprNode *pExpr, SSkipList *pSkipList, SArray *result, SExprTraverseSupp *param); @@ -86,12 +82,9 @@ void tExprTreeTraverse(tExprNode *pExpr, SSkipList *pSkipList, SArray *result, S void tExprTreeCalcTraverse(tExprNode *pExprs, int32_t numOfRows, char *pOutput, void *param, int32_t order, char *(*cb)(void *, const char*, int32_t)); -// todo refactor: remove it -void tSQLBinaryExprTrv(tExprNode *pExprs, SArray* res); - uint8_t getBinaryExprOptr(SSQLToken *pToken); -void tExprNodeDestroy(tExprNode *pNode, void (*fp)(void *)); +void tExprNodeDestroy(tExprNode *pNode, void (*fp)(void *)); void exprTreeToBinary(SBufferWriter* bw, tExprNode* pExprTree); tExprNode* exprTreeFromBinary(const void* data, size_t size); diff --git a/src/query/inc/qfill.h b/src/query/inc/qfill.h index da1cd8e5de..ee5974708a 100644 --- a/src/query/inc/qfill.h +++ b/src/query/inc/qfill.h @@ -60,8 +60,6 @@ typedef struct SPoint { void * val; } SPoint; -int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, char timeUnit, int16_t precision); - SFillInfo* taosInitFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_t capacity, int32_t numOfCols, int64_t slidingTime, int8_t slidingUnit, int8_t precision, int32_t fillType, SFillColInfo* pFillCol); diff --git a/src/query/inc/tsqlfunction.h b/src/query/inc/tsqlfunction.h index 340f6bc4f3..63b7abb379 100644 --- a/src/query/inc/tsqlfunction.h +++ b/src/query/inc/tsqlfunction.h @@ -272,7 +272,7 @@ bool top_bot_datablock_filter(SQLFunctionCtx *pCtx, int32_t functionId, char *mi bool stableQueryFunctChanged(int32_t funcId); void resetResultInfo(SResultInfo *pResInfo); -void setResultInfoBuf(SResultInfo *pResInfo, int32_t size, bool superTable); +void setResultInfoBuf(SResultInfo *pResInfo, int32_t size, bool superTable, char* buf); static FORCE_INLINE void initResultInfo(SResultInfo *pResInfo) { pResInfo->initialized = true; // the this struct has been initialized flag diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 55cb35fdf9..53a32a2356 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -18,18 +18,18 @@ #include "qfill.h" #include "taosmsg.h" +#include "exception.h" #include "hash.h" #include "qExecutor.h" #include "qUtil.h" -#include "qast.h" #include "qresultBuf.h" #include "query.h" #include "queryLog.h" +#include "qast.h" +#include "tfile.h" #include "tlosertree.h" -#include "exception.h" #include "tscompression.h" #include "ttime.h" -#include "tfile.h" /** * check if the primary column is load by default, otherwise, the program will @@ -49,6 +49,8 @@ #define GET_COL_DATA_POS(query, index, step) ((query)->pos + (index) * (step)) #define SWITCH_ORDER(n) (((n) = ((n) == TSDB_ORDER_ASC) ? TSDB_ORDER_DESC : TSDB_ORDER_ASC)) +#define SDATA_BLOCK_INITIALIZER (SDataBlockInfo) {{0}, 0} + /* get the qinfo struct address from the query struct address */ #define GET_COLUMN_BYTES(query, colidx) \ ((query)->colList[(query)->pSelectExpr[colidx].base.colInfo.colIndex].bytes) @@ -100,6 +102,16 @@ static UNUSED_FUNC void *u_malloc (size_t __size) { } } +static UNUSED_FUNC void* u_calloc(size_t num, size_t __size) { + uint32_t v = rand(); + if (v % 5 <= 1) { + return NULL; + } else { + return calloc(num, __size); + } +} + +#define calloc u_calloc #define malloc u_malloc #endif @@ -109,7 +121,15 @@ static UNUSED_FUNC void *u_malloc (size_t __size) { static void setQueryStatus(SQuery *pQuery, int8_t status); -static bool isIntervalQuery(SQuery *pQuery) { return pQuery->intervalTime > 0; } +#define QUERY_IS_INTERVAL_QUERY(_q) ((_q)->intervalTime > 0) + +// previous time window may not be of the same size of pQuery->intervalTime +#define GET_NEXT_TIMEWINDOW(_q, tw) \ + do { \ + int32_t factor = GET_FORWARD_DIRECTION_FACTOR((_q)->order.order); \ + (tw)->skey += ((_q)->slidingTime * factor); \ + (tw)->ekey = (tw)->skey + ((_q)->intervalTime - 1); \ + } while (0) // todo move to utility static int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *group); @@ -118,7 +138,6 @@ static void setWindowResOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult * static void setWindowResOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *pResult); static void resetMergeResultBuf(SQuery *pQuery, SQLFunctionCtx *pCtx, SResultInfo *pResultInfo); static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx, int32_t functionId); -static void getNextTimeWindow(SQuery *pQuery, STimeWindow *pTimeWindow); static void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, void* inputData, TSKEY *tsCol, SDataBlockInfo* pBlockInfo, SDataStatis *pStatis, void *param, int32_t colIndex); @@ -314,6 +333,24 @@ static bool isTopBottomQuery(SQuery *pQuery) { return false; } +static bool hasTagValOutput(SQuery* pQuery) { + SExprInfo *pExprInfo = &pQuery->pSelectExpr[0]; + if (pQuery->numOfOutput == 1 && pExprInfo->base.functionId == TSDB_FUNC_TS_COMP) { + return true; + } else { // set tag value, by which the results are aggregated. + for (int32_t idx = 0; idx < pQuery->numOfOutput; ++idx) { + SExprInfo *pLocalExprInfo = &pQuery->pSelectExpr[idx]; + + // ts_comp column required the tag value for join filter + if (TSDB_COL_IS_TAG(pLocalExprInfo->base.colInfo.flag)) { + return true; + } + } + } + + return false; +} + static SDataStatis *getStatisInfo(SQuery *pQuery, SDataStatis *pStatis, int32_t numOfCols, int32_t index) { // for a tag column, no corresponding field info SColIndex *pColIndex = &pQuery->pSelectExpr[index].base.colInfo; @@ -368,34 +405,38 @@ static bool hasNullValue(SQuery *pQuery, int32_t col, int32_t numOfCols, SDataSt } static SWindowResult *doSetTimeWindowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SWindowResInfo *pWindowResInfo, char *pData, - int16_t bytes) { + int16_t bytes, bool masterscan) { SQuery *pQuery = pRuntimeEnv->pQuery; int32_t *p1 = (int32_t *) taosHashGet(pWindowResInfo->hashList, pData, bytes); if (p1 != NULL) { pWindowResInfo->curIndex = *p1; - } else { // more than the capacity, reallocate the resources - if (pWindowResInfo->size >= pWindowResInfo->capacity) { - int64_t newCap = pWindowResInfo->capacity * 2; + } else { + if (masterscan) { // more than the capacity, reallocate the resources + if (pWindowResInfo->size >= pWindowResInfo->capacity) { + int64_t newCap = pWindowResInfo->capacity * 2; - char *t = realloc(pWindowResInfo->pResult, newCap * sizeof(SWindowResult)); - if (t != NULL) { - pWindowResInfo->pResult = (SWindowResult *)t; - memset(&pWindowResInfo->pResult[pWindowResInfo->capacity], 0, sizeof(SWindowResult) * pWindowResInfo->capacity); - } else { - // todo + char *t = realloc(pWindowResInfo->pResult, newCap * sizeof(SWindowResult)); + if (t != NULL) { + pWindowResInfo->pResult = (SWindowResult *)t; + memset(&pWindowResInfo->pResult[pWindowResInfo->capacity], 0, sizeof(SWindowResult) * pWindowResInfo->capacity); + } else { + // todo + } + + for (int32_t i = pWindowResInfo->capacity; i < newCap; ++i) { + SPosInfo pos = {-1, -1}; + createQueryResultInfo(pQuery, &pWindowResInfo->pResult[i], pRuntimeEnv->stableQuery, &pos, pRuntimeEnv->interBufSize); + } + pWindowResInfo->capacity = newCap; } - for (int32_t i = pWindowResInfo->capacity; i < newCap; ++i) { - SPosInfo pos = {-1, -1}; - createQueryResultInfo(pQuery, &pWindowResInfo->pResult[i], pRuntimeEnv->stableQuery, &pos); - } - pWindowResInfo->capacity = newCap; + // add a new result set for a new group + pWindowResInfo->curIndex = pWindowResInfo->size++; + taosHashPut(pWindowResInfo->hashList, pData, bytes, (char *)&pWindowResInfo->curIndex, sizeof(int32_t)); + } else { + return NULL; } - - // add a new result set for a new group - pWindowResInfo->curIndex = pWindowResInfo->size++; - taosHashPut(pWindowResInfo->hashList, pData, bytes, (char *)&pWindowResInfo->curIndex, sizeof(int32_t)); } return getWindowResult(pWindowResInfo, pWindowResInfo->curIndex); @@ -482,15 +523,19 @@ static int32_t addNewWindowResultBuf(SWindowResult *pWindowRes, SDiskbasedResult } static int32_t setWindowOutputBufByKey(SQueryRuntimeEnv *pRuntimeEnv, SWindowResInfo *pWindowResInfo, int32_t sid, - STimeWindow *win) { + STimeWindow *win, bool masterscan, bool* newWind) { assert(win->skey <= win->ekey); SDiskbasedResultBuf *pResultBuf = pRuntimeEnv->pResultBuf; - SWindowResult *pWindowRes = doSetTimeWindowFromKey(pRuntimeEnv, pWindowResInfo, (char *)&win->skey, TSDB_KEYSIZE); + SWindowResult *pWindowRes = doSetTimeWindowFromKey(pRuntimeEnv, pWindowResInfo, (char *)&win->skey, + TSDB_KEYSIZE, masterscan); if (pWindowRes == NULL) { - return -1; + *newWind = false; + + return masterscan? -1:0; } + *newWind = true; // not assign result buffer yet, add new result buffer if (pWindowRes->pos.pageId == -1) { int32_t ret = addNewWindowResultBuf(pWindowRes, pResultBuf, sid, pRuntimeEnv->numOfRowsPerPage); @@ -513,19 +558,29 @@ static SWindowStatus *getTimeWindowResStatus(SWindowResInfo *pWindowResInfo, int static int32_t getForwardStepsInBlock(int32_t numOfRows, __block_search_fn_t searchFn, TSKEY ekey, int16_t pos, int16_t order, int64_t *pData) { - int32_t endPos = searchFn((char *)pData, numOfRows, ekey, order); int32_t forwardStep = 0; - if (endPos >= 0) { - forwardStep = (order == TSDB_ORDER_ASC) ? (endPos - pos) : (pos - endPos); - assert(forwardStep >= 0); + if (order == TSDB_ORDER_ASC) { + int32_t end = searchFn((char*) &pData[pos], numOfRows - pos, ekey, order); + if (end >= 0) { + forwardStep = end; - // endPos data is equalled to the key so, we do need to read the element in endPos - if (pData[endPos] == ekey) { - forwardStep += 1; + if (pData[end + pos] == ekey) { + forwardStep += 1; + } + } + } else { + int32_t end = searchFn((char *)pData, pos + 1, ekey, order); + if (end >= 0) { + forwardStep = pos - end; + + if (pData[end] == ekey) { + forwardStep += 1; + } } } + assert(forwardStep > 0); return forwardStep; } @@ -534,7 +589,7 @@ static int32_t getForwardStepsInBlock(int32_t numOfRows, __block_search_fn_t sea */ static int32_t doCheckQueryCompleted(SQueryRuntimeEnv *pRuntimeEnv, TSKEY lastKey, SWindowResInfo *pWindowResInfo) { SQuery *pQuery = pRuntimeEnv->pQuery; - if (pRuntimeEnv->scanFlag != MASTER_SCAN || (!isIntervalQuery(pQuery))) { + if (pRuntimeEnv->scanFlag != MASTER_SCAN || (!QUERY_IS_INTERVAL_QUERY(pQuery))) { return pWindowResInfo->size; } @@ -648,7 +703,7 @@ static int32_t getNumOfRowsInTimeWindow(SQuery *pQuery, SDataBlockInfo *pDataBlo } } - assert(num >= 0); + assert(num > 0); return num; } @@ -673,7 +728,7 @@ static void doBlockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SWindowStat if (forwardStep != numOfTotal) { pCtx[k].preAggVals.isSet = false; } - + if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) { aAggs[functionId].xFunction(&pCtx[k]); } @@ -698,59 +753,60 @@ static void doRowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SWindowStatus } } -static int32_t getNextQualifiedWindow(SQueryRuntimeEnv *pRuntimeEnv, STimeWindow *pNextWin, - SDataBlockInfo *pDataBlockInfo, TSKEY *primaryKeys, - __block_search_fn_t searchFn) { +static int32_t getNextQualifiedWindow(SQueryRuntimeEnv *pRuntimeEnv, STimeWindow *pNext, SDataBlockInfo *pDataBlockInfo, + TSKEY *primaryKeys, __block_search_fn_t searchFn, int32_t prevPosition) { SQuery *pQuery = pRuntimeEnv->pQuery; - // tumbling time window query, a special case of sliding time window query - if (pQuery->slidingTime == pQuery->intervalTime) { - // todo opt - } - - getNextTimeWindow(pQuery, pNextWin); + GET_NEXT_TIMEWINDOW(pQuery, pNext); // next time window is not in current block - if ((pNextWin->skey > pDataBlockInfo->window.ekey && QUERY_IS_ASC_QUERY(pQuery)) || - (pNextWin->ekey < pDataBlockInfo->window.skey && !QUERY_IS_ASC_QUERY(pQuery))) { + if ((pNext->skey > pDataBlockInfo->window.ekey && QUERY_IS_ASC_QUERY(pQuery)) || + (pNext->ekey < pDataBlockInfo->window.skey && !QUERY_IS_ASC_QUERY(pQuery))) { return -1; } TSKEY startKey = -1; if (QUERY_IS_ASC_QUERY(pQuery)) { - startKey = pNextWin->skey; + startKey = pNext->skey; if (startKey < pQuery->window.skey) { startKey = pQuery->window.skey; } } else { - startKey = pNextWin->ekey; + startKey = pNext->ekey; if (startKey > pQuery->window.skey) { startKey = pQuery->window.skey; } } - int32_t startPos = searchFn((char *)primaryKeys, pDataBlockInfo->rows, startKey, pQuery->order.order); + int32_t startPos = 0; + // tumbling time window query, a special case of sliding time window query + if (pQuery->slidingTime == pQuery->intervalTime && prevPosition != -1) { + int32_t factor = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); + startPos = prevPosition + factor; + } else { + startPos = searchFn((char *)primaryKeys, pDataBlockInfo->rows, startKey, pQuery->order.order); + } /* * This time window does not cover any data, try next time window, * this case may happen when the time window is too small */ - if (QUERY_IS_ASC_QUERY(pQuery) && primaryKeys[startPos] > pNextWin->ekey) { + if (QUERY_IS_ASC_QUERY(pQuery) && primaryKeys[startPos] > pNext->ekey) { TSKEY next = primaryKeys[startPos]; - pNextWin->ekey += ((next - pNextWin->ekey + pQuery->slidingTime - 1)/pQuery->slidingTime) * pQuery->slidingTime; - pNextWin->skey = pNextWin->ekey - pQuery->intervalTime + 1; - } else if ((!QUERY_IS_ASC_QUERY(pQuery)) && primaryKeys[startPos] < pNextWin->skey) { + pNext->ekey += ((next - pNext->ekey + pQuery->slidingTime - 1)/pQuery->slidingTime) * pQuery->slidingTime; + pNext->skey = pNext->ekey - pQuery->intervalTime + 1; + } else if ((!QUERY_IS_ASC_QUERY(pQuery)) && primaryKeys[startPos] < pNext->skey) { TSKEY next = primaryKeys[startPos]; - pNextWin->skey -= ((pNextWin->skey - next + pQuery->slidingTime - 1) / pQuery->slidingTime) * pQuery->slidingTime; - pNextWin->ekey = pNextWin->skey + pQuery->intervalTime - 1; + pNext->skey -= ((pNext->skey - next + pQuery->slidingTime - 1) / pQuery->slidingTime) * pQuery->slidingTime; + pNext->ekey = pNext->skey + pQuery->intervalTime - 1; } return startPos; } -static TSKEY reviseWindowEkey(SQuery *pQuery, STimeWindow *pWindow) { +static FORCE_INLINE TSKEY reviseWindowEkey(SQuery *pQuery, STimeWindow *pWindow) { TSKEY ekey = -1; if (QUERY_IS_ASC_QUERY(pQuery)) { ekey = pWindow->ekey; @@ -787,8 +843,8 @@ static char *getDataBlock(SQueryRuntimeEnv *pRuntimeEnv, SArithmeticSupport *sas return NULL; } char *dataBlock = NULL; - SQuery *pQuery = pRuntimeEnv->pQuery; + SQuery *pQuery = pRuntimeEnv->pQuery; SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx; int32_t functionId = pQuery->pSelectExpr[col].base.functionId; @@ -807,6 +863,10 @@ static char *getDataBlock(SQueryRuntimeEnv *pRuntimeEnv, SArithmeticSupport *sas sas->numOfCols = pQuery->numOfCols; sas->data = calloc(pQuery->numOfCols, POINTER_BYTES); + if (sas->data == NULL) { + longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + } + // here the pQuery->colList and sas->colList are identical int32_t numOfCols = taosArrayGetSize(pDataBlock); for (int32_t i = 0; i < pQuery->numOfCols; ++i) { @@ -851,7 +911,8 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis * SDataBlockInfo *pDataBlockInfo, SWindowResInfo *pWindowResInfo, __block_search_fn_t searchFn, SArray *pDataBlock) { SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx; - + bool masterScan = IS_MASTER_SCAN(pRuntimeEnv); + SQuery *pQuery = pRuntimeEnv->pQuery; TSKEY *tsCols = NULL; if (pDataBlock != NULL) { @@ -860,6 +921,9 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis * } SArithmeticSupport *sasArray = calloc((size_t)pQuery->numOfOutput, sizeof(SArithmeticSupport)); + if (sasArray == NULL) { + longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + } for (int32_t k = 0; k < pQuery->numOfOutput; ++k) { char *dataBlock = getDataBlock(pRuntimeEnv, &sasArray[k], k, pDataBlockInfo->rows, pDataBlock); @@ -867,41 +931,52 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis * } int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); - if (isIntervalQuery(pQuery) && tsCols != NULL) { + if (QUERY_IS_INTERVAL_QUERY(pQuery) && tsCols != NULL) { int32_t offset = GET_COL_DATA_POS(pQuery, 0, step); TSKEY ts = tsCols[offset]; + bool hasTimeWindow = false; STimeWindow win = getActiveTimeWindow(pWindowResInfo, ts, pQuery); - if (setWindowOutputBufByKey(pRuntimeEnv, pWindowResInfo, pDataBlockInfo->tid, &win) != TSDB_CODE_SUCCESS) { + if (setWindowOutputBufByKey(pRuntimeEnv, pWindowResInfo, pDataBlockInfo->tid, &win, masterScan, &hasTimeWindow) != TSDB_CODE_SUCCESS) { tfree(sasArray); return; } - TSKEY ekey = reviseWindowEkey(pQuery, &win); - int32_t forwardStep = - getNumOfRowsInTimeWindow(pQuery, pDataBlockInfo, tsCols, pQuery->pos, ekey, searchFn, true); + int32_t forwardStep = 0; + int32_t startPos = pQuery->pos; - SWindowStatus *pStatus = getTimeWindowResStatus(pWindowResInfo, curTimeWindow(pWindowResInfo)); - doBlockwiseApplyFunctions(pRuntimeEnv, pStatus, &win, pQuery->pos, forwardStep, tsCols, pDataBlockInfo->rows); + if (hasTimeWindow) { + TSKEY ekey = reviseWindowEkey(pQuery, &win); + forwardStep = getNumOfRowsInTimeWindow(pQuery, pDataBlockInfo, tsCols, pQuery->pos, ekey, searchFn, true); + + SWindowStatus *pStatus = getTimeWindowResStatus(pWindowResInfo, curTimeWindow(pWindowResInfo)); + doBlockwiseApplyFunctions(pRuntimeEnv, pStatus, &win, startPos, forwardStep, tsCols, pDataBlockInfo->rows); + } int32_t index = pWindowResInfo->curIndex; STimeWindow nextWin = win; while (1) { - int32_t startPos = getNextQualifiedWindow(pRuntimeEnv, &nextWin, pDataBlockInfo, tsCols, searchFn); + int32_t prevEndPos = (forwardStep - 1) * step + startPos; + startPos = getNextQualifiedWindow(pRuntimeEnv, &nextWin, pDataBlockInfo, tsCols, searchFn, prevEndPos); if (startPos < 0) { break; } // null data, failed to allocate more memory buffer - if (setWindowOutputBufByKey(pRuntimeEnv, pWindowResInfo, pDataBlockInfo->tid, &nextWin) != TSDB_CODE_SUCCESS) { + hasTimeWindow = false; + if (setWindowOutputBufByKey(pRuntimeEnv, pWindowResInfo, pDataBlockInfo->tid, &nextWin, masterScan, &hasTimeWindow) != TSDB_CODE_SUCCESS) { break; } - ekey = reviseWindowEkey(pQuery, &nextWin); + if (!hasTimeWindow) { + continue; + } + + TSKEY ekey = reviseWindowEkey(pQuery, &nextWin); forwardStep = getNumOfRowsInTimeWindow(pQuery, pDataBlockInfo, tsCols, startPos, ekey, searchFn, true); - pStatus = getTimeWindowResStatus(pWindowResInfo, curTimeWindow(pWindowResInfo)); + SWindowStatus* pStatus = getTimeWindowResStatus(pWindowResInfo, curTimeWindow(pWindowResInfo)); doBlockwiseApplyFunctions(pRuntimeEnv, pStatus, &nextWin, startPos, forwardStep, tsCols, pDataBlockInfo->rows); } @@ -951,7 +1026,7 @@ static int32_t setGroupResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, char *pDat } // assert(pRuntimeEnv->windowResInfo.hashList->size <= 2); - SWindowResult *pWindowRes = doSetTimeWindowFromKey(pRuntimeEnv, &pRuntimeEnv->windowResInfo, pData, bytes); + SWindowResult *pWindowRes = doSetTimeWindowFromKey(pRuntimeEnv, &pRuntimeEnv->windowResInfo, pData, bytes, true); if (pWindowRes == NULL) { return -1; } @@ -1051,6 +1126,11 @@ static int32_t doTSJoinFilter(SQueryRuntimeEnv *pRuntimeEnv, int32_t offset) { static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx, int32_t functionId) { SResultInfo *pResInfo = GET_RES_INFO(pCtx); SQuery* pQuery = pRuntimeEnv->pQuery; + + // in case of timestamp column, always generated results. + if (functionId == TSDB_FUNC_TS) { + return true; + } if (pResInfo->complete || functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) { return false; @@ -1063,7 +1143,6 @@ static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx // todo add comments if ((functionId == TSDB_FUNC_LAST_DST || functionId == TSDB_FUNC_LAST)) { return pCtx->param[0].i64Key == pQuery->order.order; -// return !QUERY_IS_ASC_QUERY(pQuery); } // in the supplementary scan, only the following functions need to be executed @@ -1077,6 +1156,7 @@ static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pStatis, SDataBlockInfo *pDataBlockInfo, SWindowResInfo *pWindowResInfo, SArray *pDataBlock) { SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx; + bool masterScan = IS_MASTER_SCAN(pRuntimeEnv); SQuery *pQuery = pRuntimeEnv->pQuery; STableQueryInfo* item = pQuery->current; @@ -1084,8 +1164,12 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS SColumnInfoData* pColumnInfoData = (SColumnInfoData *)taosArrayGet(pDataBlock, 0); TSKEY *tsCols = (pColumnInfoData->info.type == TSDB_DATA_TYPE_TIMESTAMP)? (TSKEY*) pColumnInfoData->pData:NULL; - bool groupbyColumnValue = isGroupbyNormalCol(pQuery->pGroupbyExpr); + bool groupbyColumnValue = pRuntimeEnv->groupbyNormalCol; + SArithmeticSupport *sasArray = calloc((size_t)pQuery->numOfOutput, sizeof(SArithmeticSupport)); + if (sasArray == NULL) { + longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + } int16_t type = 0; int16_t bytes = 0; @@ -1139,16 +1223,21 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS } // interval window query - if (isIntervalQuery(pQuery)) { + if (QUERY_IS_INTERVAL_QUERY(pQuery)) { // decide the time window according to the primary timestamp int64_t ts = tsCols[offset]; STimeWindow win = getActiveTimeWindow(pWindowResInfo, ts, pQuery); - int32_t ret = setWindowOutputBufByKey(pRuntimeEnv, pWindowResInfo, pDataBlockInfo->tid, &win); + bool hasTimeWindow = false; + int32_t ret = setWindowOutputBufByKey(pRuntimeEnv, pWindowResInfo, pDataBlockInfo->tid, &win, masterScan, &hasTimeWindow); if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code continue; } + if (!hasTimeWindow) { + continue; + } + SWindowStatus *pStatus = getTimeWindowResStatus(pWindowResInfo, curTimeWindow(pWindowResInfo)); doRowwiseApplyFunctions(pRuntimeEnv, pStatus, &win, offset); @@ -1156,7 +1245,7 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS int32_t index = pWindowResInfo->curIndex; while (1) { - getNextTimeWindow(pQuery, &nextWin); + GET_NEXT_TIMEWINDOW(pQuery, &nextWin); if (/*pWindowResInfo->startTime > nextWin.skey ||*/ (nextWin.skey > pQuery->window.ekey && QUERY_IS_ASC_QUERY(pQuery)) || (nextWin.skey < pQuery->window.ekey && !QUERY_IS_ASC_QUERY(pQuery))) { @@ -1168,12 +1257,15 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS } // null data, failed to allocate more memory buffer - if (setWindowOutputBufByKey(pRuntimeEnv, pWindowResInfo, pDataBlockInfo->tid, &nextWin) != TSDB_CODE_SUCCESS) { + hasTimeWindow = false; + if (setWindowOutputBufByKey(pRuntimeEnv, pWindowResInfo, pDataBlockInfo->tid, &nextWin, masterScan, &hasTimeWindow) != TSDB_CODE_SUCCESS) { break; } - pStatus = getTimeWindowResStatus(pWindowResInfo, curTimeWindow(pWindowResInfo)); - doRowwiseApplyFunctions(pRuntimeEnv, pStatus, &nextWin, offset); + if (hasTimeWindow) { + pStatus = getTimeWindowResStatus(pWindowResInfo, curTimeWindow(pWindowResInfo)); + doRowwiseApplyFunctions(pRuntimeEnv, pStatus, &nextWin, offset); + } } pWindowResInfo->curIndex = index; @@ -1231,7 +1323,7 @@ static int32_t tableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBl STableQueryInfo* pTableQInfo = pQuery->current; SWindowResInfo* pWindowResInfo = &pRuntimeEnv->windowResInfo; - if (pQuery->numOfFilterCols > 0 || pRuntimeEnv->pTSBuf != NULL || isGroupbyNormalCol(pQuery->pGroupbyExpr)) { + if (pQuery->numOfFilterCols > 0 || pRuntimeEnv->pTSBuf != NULL || pRuntimeEnv->groupbyNormalCol) { rowwiseApplyFunctions(pRuntimeEnv, pStatis, pDataBlockInfo, pWindowResInfo, pDataBlock); } else { blockwiseApplyFunctions(pRuntimeEnv, pStatis, pDataBlockInfo, pWindowResInfo, searchFn, pDataBlock); @@ -1243,7 +1335,7 @@ static int32_t tableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBl // interval query with limit applied int32_t numOfRes = 0; - if (isIntervalQuery(pQuery)) { + if (QUERY_IS_INTERVAL_QUERY(pQuery)) { numOfRes = doCheckQueryCompleted(pRuntimeEnv, lastKey, pWindowResInfo); } else { numOfRes = getNumOfResult(pRuntimeEnv); @@ -1352,14 +1444,16 @@ void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, void* inputData, TSKEY } // set the output buffer for the selectivity + tag query -static void setCtxTagColumnInfo(SQuery *pQuery, SQLFunctionCtx *pCtx) { +static void setCtxTagColumnInfo(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx) { + SQuery* pQuery = pRuntimeEnv->pQuery; + if (isSelectivityWithTagsQuery(pQuery)) { int32_t num = 0; int16_t tagLen = 0; SQLFunctionCtx *p = NULL; SQLFunctionCtx **pTagCtx = calloc(pQuery->numOfOutput, POINTER_BYTES); - + for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { SSqlFuncMsg *pSqlFuncMsg = &pQuery->pSelectExpr[i].base; @@ -1386,11 +1480,13 @@ static void setCtxTagColumnInfo(SQuery *pQuery, SQLFunctionCtx *pCtx) { } } -static void setWindowResultInfo(SResultInfo *pResultInfo, SQuery *pQuery, bool isStableQuery) { +static FORCE_INLINE void setWindowResultInfo(SResultInfo *pResultInfo, SQuery *pQuery, bool isStableQuery, char* buf) { + char* p = buf; for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - assert(pQuery->pSelectExpr[i].interBytes <= DEFAULT_INTERN_BUF_PAGE_SIZE); - - setResultInfoBuf(&pResultInfo[i], pQuery->pSelectExpr[i].interBytes, isStableQuery); + int32_t size = pQuery->pSelectExpr[i].interBytes; + setResultInfoBuf(&pResultInfo[i], size, isStableQuery, p); + + p += size; } } @@ -1469,15 +1565,17 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int16_t order } } + char* buf = calloc(1, pRuntimeEnv->interBufSize); + // set the intermediate result output buffer - setWindowResultInfo(pRuntimeEnv->resultInfo, pQuery, pRuntimeEnv->stableQuery); + setWindowResultInfo(pRuntimeEnv->resultInfo, pQuery, pRuntimeEnv->stableQuery, buf); // if it is group by normal column, do not set output buffer, the output buffer is pResult if (!isGroupbyNormalCol(pQuery->pGroupbyExpr) && !pRuntimeEnv->stableQuery) { resetCtxOutputBuf(pRuntimeEnv); } - setCtxTagColumnInfo(pQuery, pRuntimeEnv->pCtx); + setCtxTagColumnInfo(pRuntimeEnv, pRuntimeEnv->pCtx); return TSDB_CODE_SUCCESS; _clean: @@ -1508,9 +1606,9 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) { tVariantDestroy(&pCtx->tag); tfree(pCtx->tagInfo.pTagCtxList); - tfree(pRuntimeEnv->resultInfo[i].interResultBuf); } + tfree(pRuntimeEnv->resultInfo[0].interResultBuf); tfree(pRuntimeEnv->resultInfo); tfree(pRuntimeEnv->pCtx); } @@ -1637,29 +1735,21 @@ static bool onlyQueryTags(SQuery* pQuery) { ///////////////////////////////////////////////////////////////////////////////////////////// -void getAlignQueryTimeWindow(SQuery *pQuery, int64_t key, int64_t keyFirst, int64_t keyLast, STimeWindow *realWin, STimeWindow *win) { +void getAlignQueryTimeWindow(SQuery *pQuery, int64_t key, int64_t keyFirst, int64_t keyLast, STimeWindow *win) { assert(key >= keyFirst && key <= keyLast && pQuery->slidingTime <= pQuery->intervalTime); + win->skey = taosGetIntervalStartTimestamp(key, pQuery->slidingTime, pQuery->intervalTime, pQuery->slidingTimeUnit, pQuery->precision); - win->skey = taosGetIntervalStartTimestamp(key, pQuery->slidingTime, pQuery->slidingTimeUnit, pQuery->precision); - + /* + * if the realSkey > INT64_MAX - pQuery->intervalTime, the query duration between + * realSkey and realEkey must be less than one interval.Therefore, no need to adjust the query ranges. + */ if (keyFirst > (INT64_MAX - pQuery->intervalTime)) { - /* - * if the realSkey > INT64_MAX - pQuery->intervalTime, the query duration between - * realSkey and realEkey must be less than one interval.Therefore, no need to adjust the query ranges. - */ assert(keyLast - keyFirst < pQuery->intervalTime); - - realWin->skey = keyFirst; - realWin->ekey = keyLast; - win->ekey = INT64_MAX; return; + } else { + win->ekey = win->skey + pQuery->intervalTime - 1; } - - win->ekey = win->skey + pQuery->intervalTime - 1; - - realWin->skey = (win->skey < keyFirst)? keyFirst : win->skey; - realWin->ekey = (win->ekey < keyLast) ? win->ekey : keyLast; } static void setScanLimitationByResultBuffer(SQuery *pQuery) { @@ -1828,7 +1918,7 @@ static int32_t getInitialPageNum(SQInfo *pQInfo) { if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) { num = 128; - } else if (isIntervalQuery(pQuery)) { // time window query, allocate one page for each table + } else if (QUERY_IS_INTERVAL_QUERY(pQuery)) { // time window query, allocate one page for each table size_t s = pQInfo->tableqinfoGroupInfo.numOfTables; num = MAX(s, INITIAL_RESULT_ROWS_VALUE); } else { // for super table query, one page for each subset @@ -1952,14 +2042,6 @@ static bool needToLoadDataBlock(SQuery *pQuery, SDataStatis *pDataStatis, SQLFun return true; } -// previous time window may not be of the same size of pQuery->intervalTime -static void getNextTimeWindow(SQuery *pQuery, STimeWindow *pTimeWindow) { - int32_t factor = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); - - pTimeWindow->skey += (pQuery->slidingTime * factor); - pTimeWindow->ekey = pTimeWindow->skey + (pQuery->intervalTime - 1); -} - SArray *loadDataBlockOnDemand(SQueryRuntimeEnv *pRuntimeEnv, void* pQueryHandle, SDataBlockInfo* pBlockInfo, SDataStatis **pStatis) { SQuery *pQuery = pRuntimeEnv->pQuery; @@ -1978,7 +2060,7 @@ SArray *loadDataBlockOnDemand(SQueryRuntimeEnv *pRuntimeEnv, void* pQueryHandle, r |= aAggs[functionId].dataReqFunc(&pRuntimeEnv->pCtx[i], pQuery->window.skey, pQuery->window.ekey, colId); } - if (pRuntimeEnv->pTSBuf > 0 || isIntervalQuery(pQuery)) { + if (pRuntimeEnv->pTSBuf > 0 || QUERY_IS_INTERVAL_QUERY(pQuery)) { r |= BLK_DATA_ALL_NEEDED; } } @@ -2117,7 +2199,7 @@ static void ensureOutputBufferSimple(SQueryRuntimeEnv* pRuntimeEnv, int32_t capa static void ensureOutputBuffer(SQueryRuntimeEnv* pRuntimeEnv, SDataBlockInfo* pBlockInfo) { // in case of prj/diff query, ensure the output buffer is sufficient to accommodate the results of current block SQuery* pQuery = pRuntimeEnv->pQuery; - if (!isIntervalQuery(pQuery) && !isGroupbyNormalCol(pQuery->pGroupbyExpr) && !isFixedOutputQuery(pQuery)) { + if (!QUERY_IS_INTERVAL_QUERY(pQuery) && !pRuntimeEnv->groupbyNormalCol && !isFixedOutputQuery(pQuery)) { SResultRec *pRec = &pQuery->rec; if (pQuery->rec.capacity - pQuery->rec.rows < pBlockInfo->rows) { @@ -2132,6 +2214,7 @@ static void ensureOutputBuffer(SQueryRuntimeEnv* pRuntimeEnv, SDataBlockInfo* pB if (tmp == NULL) { // todo handle the oom assert(0); } else { + memset(tmp + sizeof(tFilePage) + bytes * pRec->rows, 0, (newSize - pRec->rows) * bytes); pQuery->sdata[i] = (tFilePage *)tmp; } @@ -2162,34 +2245,32 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { pQuery->order.order); TsdbQueryHandleT pQueryHandle = IS_MASTER_SCAN(pRuntimeEnv)? pRuntimeEnv->pQueryHandle : pRuntimeEnv->pSecQueryHandle; + + SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER; while (tsdbNextDataBlock(pQueryHandle)) { summary->totalBlocks += 1; if (isQueryKilled(GET_QINFO_ADDR(pRuntimeEnv))) { return 0; } - SDataBlockInfo blockInfo = tsdbRetrieveDataBlockInfo(pQueryHandle); + tsdbRetrieveDataBlockInfo(pQueryHandle, &blockInfo); // todo extract methods - if (isIntervalQuery(pQuery) && pRuntimeEnv->windowResInfo.prevSKey == TSKEY_INITIAL_VAL) { - STimeWindow realWin = TSWINDOW_INITIALIZER, w = TSWINDOW_INITIALIZER; + if (QUERY_IS_INTERVAL_QUERY(pQuery) && pRuntimeEnv->windowResInfo.prevSKey == TSKEY_INITIAL_VAL) { + STimeWindow w = TSWINDOW_INITIALIZER; SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo; if (QUERY_IS_ASC_QUERY(pQuery)) { - getAlignQueryTimeWindow(pQuery, blockInfo.window.skey, blockInfo.window.skey, pQuery->window.ekey, &realWin, &w); + getAlignQueryTimeWindow(pQuery, blockInfo.window.skey, blockInfo.window.skey, pQuery->window.ekey, &w); pWindowResInfo->startTime = w.skey; pWindowResInfo->prevSKey = w.skey; } else { // the start position of the first time window in the endpoint that spreads beyond the queried last timestamp - getAlignQueryTimeWindow(pQuery, blockInfo.window.ekey, pQuery->window.ekey, blockInfo.window.ekey, &realWin, &w); + getAlignQueryTimeWindow(pQuery, blockInfo.window.ekey, pQuery->window.ekey, blockInfo.window.ekey, &w); pWindowResInfo->startTime = pQuery->window.skey; pWindowResInfo->prevSKey = w.skey; } - - if (pRuntimeEnv->pFillInfo != NULL) { - pRuntimeEnv->pFillInfo->start = w.skey; - } } // in case of prj/diff query, ensure the output buffer is sufficient to accommodate the results of current block @@ -2217,12 +2298,12 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { setQueryStatus(pQuery, QUERY_COMPLETED); } - if (isIntervalQuery(pQuery) && IS_MASTER_SCAN(pRuntimeEnv)) { + if (QUERY_IS_INTERVAL_QUERY(pQuery) && IS_MASTER_SCAN(pRuntimeEnv)) { if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) { - int32_t step = QUERY_IS_ASC_QUERY(pQuery) ? QUERY_ASC_FORWARD_STEP : QUERY_DESC_FORWARD_STEP; +// int32_t step = QUERY_IS_ASC_QUERY(pQuery) ? QUERY_ASC_FORWARD_STEP : QUERY_DESC_FORWARD_STEP; closeAllTimeWindow(&pRuntimeEnv->windowResInfo); - removeRedundantWindow(&pRuntimeEnv->windowResInfo, pTableQueryInfo->lastKey - step, step); +// removeRedundantWindow(&pRuntimeEnv->windowResInfo, pTableQueryInfo->lastKey - step, step); pRuntimeEnv->windowResInfo.curIndex = pRuntimeEnv->windowResInfo.size - 1; // point to the last time window } else { assert(Q_STATUS_EQUAL(pQuery->status, QUERY_RESBUF_FULL)); @@ -2638,7 +2719,7 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) { tfree(pTableList); qError("QInfo:%p failed alloc memory", pQInfo); - longjmp(pQInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } // todo opt for the case of one table per group @@ -2646,7 +2727,7 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) { for (int32_t i = 0; i < size; ++i) { STableQueryInfo *item = taosArrayGetP(pGroup, i); - SIDList list = getDataBufPagesIdList(pRuntimeEnv->pResultBuf, tsdbGetTableId(item->pTable).tid); + SIDList list = getDataBufPagesIdList(pRuntimeEnv->pResultBuf, TSDB_TABLEID(item->pTable)->tid); if (list.size > 0 && item->windowResInfo.size > 0) { pTableList[numOfTables] = item; numOfTables += 1; @@ -2669,7 +2750,12 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) { tLoserTreeCreate(&pTree, numOfTables, &cs, tableResultComparFn); SResultInfo *pResultInfo = calloc(pQuery->numOfOutput, sizeof(SResultInfo)); - setWindowResultInfo(pResultInfo, pQuery, pRuntimeEnv->stableQuery); + if (pResultInfo == NULL) { + longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + char* buf = calloc(1, pRuntimeEnv->interBufSize); + setWindowResultInfo(pResultInfo, pQuery, pRuntimeEnv->stableQuery, buf); resetMergeResultBuf(pQuery, pRuntimeEnv->pCtx, pResultInfo); int64_t lastTimestamp = -1; @@ -2755,11 +2841,9 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) { tfree(pTree); pQInfo->offset = 0; - for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - tfree(pResultInfo[i].interResultBuf); - } tfree(pResultInfo); + tfree(buf); return pQInfo->numOfGroupResultPages; } @@ -2835,6 +2919,9 @@ static void updateTableQueryInfoForReverseScan(SQuery *pQuery, STableQueryInfo * SWITCH_ORDER(pTableQueryInfo->cur.order); pTableQueryInfo->cur.vgroupIndex = -1; + + // set the index at the end of time window + pTableQueryInfo->windowResInfo.curIndex = pTableQueryInfo->windowResInfo.size - 1; } static void disableFuncInReverseScanImpl(SQInfo* pQInfo, SWindowResInfo *pWindowResInfo, int32_t order) { @@ -2869,7 +2956,7 @@ void disableFuncInReverseScan(SQInfo *pQInfo) { // group by normal columns and interval query on normal table SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo; - if (isGroupbyNormalCol(pQuery->pGroupbyExpr) || isIntervalQuery(pQuery)) { + if (pRuntimeEnv->groupbyNormalCol || QUERY_IS_INTERVAL_QUERY(pQuery)) { disableFuncInReverseScanImpl(pQInfo, pWindowResInfo, order); } else { // for simple result of table query, for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { // todo refactor @@ -2909,14 +2996,16 @@ void switchCtxOrder(SQueryRuntimeEnv *pRuntimeEnv) { } } -void createQueryResultInfo(SQuery *pQuery, SWindowResult *pResultRow, bool isSTableQuery, SPosInfo *posInfo) { +void createQueryResultInfo(SQuery *pQuery, SWindowResult *pResultRow, bool isSTableQuery, SPosInfo *posInfo, size_t interBufSize) { int32_t numOfCols = pQuery->numOfOutput; pResultRow->resultInfo = calloc((size_t)numOfCols, sizeof(SResultInfo)); pResultRow->pos = *posInfo; + char* buf = calloc(1, interBufSize); + // set the intermediate result output buffer - setWindowResultInfo(pResultRow->resultInfo, pQuery, isSTableQuery); + setWindowResultInfo(pResultRow->resultInfo, pQuery, isSTableQuery, buf); } void resetCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv) { @@ -3044,7 +3133,7 @@ bool needScanDataBlocksAgain(SQueryRuntimeEnv *pRuntimeEnv) { SQuery *pQuery = pRuntimeEnv->pQuery; bool toContinue = false; - if (isGroupbyNormalCol(pQuery->pGroupbyExpr) || isIntervalQuery(pQuery)) { + if (pRuntimeEnv->groupbyNormalCol || QUERY_IS_INTERVAL_QUERY(pQuery)) { // for each group result, call the finalize function for each column SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo; @@ -3236,10 +3325,10 @@ void scanOneTableDataBlocks(SQueryRuntimeEnv *pRuntimeEnv, TSKEY start) { void finalizeQueryResult(SQueryRuntimeEnv *pRuntimeEnv) { SQuery *pQuery = pRuntimeEnv->pQuery; - if (isGroupbyNormalCol(pQuery->pGroupbyExpr) || isIntervalQuery(pQuery)) { + if (pRuntimeEnv->groupbyNormalCol || QUERY_IS_INTERVAL_QUERY(pQuery)) { // for each group result, call the finalize function for each column SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo; - if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) { + if (pRuntimeEnv->groupbyNormalCol) { closeAllTimeWindow(pWindowResInfo); } @@ -3281,10 +3370,10 @@ static bool hasMainOutput(SQuery *pQuery) { return false; } -static STableQueryInfo *createTableQueryInfo( SQueryRuntimeEnv *pRuntimeEnv, void* pTable, STimeWindow win) { - SQuery* pQuery = pRuntimeEnv->pQuery; +static STableQueryInfo *createTableQueryInfo(SQueryRuntimeEnv *pRuntimeEnv, void* pTable, STimeWindow win, void* buf) { + SQuery *pQuery = pRuntimeEnv->pQuery; - STableQueryInfo *pTableQueryInfo = calloc(1, sizeof(STableQueryInfo)); + STableQueryInfo *pTableQueryInfo = buf; pTableQueryInfo->win = win; pTableQueryInfo->lastKey = win.skey; @@ -3292,15 +3381,14 @@ static STableQueryInfo *createTableQueryInfo( SQueryRuntimeEnv *pRuntimeEnv, voi pTableQueryInfo->pTable = pTable; pTableQueryInfo->cur.vgroupIndex = -1; - int32_t initialSize = 1; - int32_t initialThreshold = 1; - - if (isIntervalQuery(pQuery) || isGroupbyNormalCol(pQuery->pGroupbyExpr)) { - initialSize = 20; - initialThreshold = 100; + // set more initial size of interval/groupby query + if (QUERY_IS_INTERVAL_QUERY(pQuery) || pRuntimeEnv->groupbyNormalCol) { + int32_t initialSize = 16; + int32_t initialThreshold = 100; + initWindowResInfo(&pTableQueryInfo->windowResInfo, pRuntimeEnv, initialSize, initialThreshold, TSDB_DATA_TYPE_INT); + } else { // in other aggregate query, do not initialize the windowResInfo } - initWindowResInfo(&pTableQueryInfo->windowResInfo, pRuntimeEnv, initialSize, initialThreshold, TSDB_DATA_TYPE_INT); return pTableQueryInfo; } @@ -3310,7 +3398,6 @@ void destroyTableQueryInfo(STableQueryInfo *pTableQueryInfo, int32_t numOfCols) } cleanupTimeWindowInfo(&pTableQueryInfo->windowResInfo, numOfCols); - free(pTableQueryInfo); } #define SET_CURRENT_QUERY_TABLE_INFO(_runtime, _tableInfo) \ @@ -3323,7 +3410,6 @@ void destroyTableQueryInfo(STableQueryInfo *pTableQueryInfo, int32_t numOfCols) /** * set output buffer for different group - * TODO opt performance if current group is identical to previous group * @param pRuntimeEnv * @param pDataBlockInfo */ @@ -3334,14 +3420,18 @@ void setExecutionContext(SQInfo *pQInfo, int32_t groupIndex, TSKEY nextKey) { // lastKey needs to be updated pTableQueryInfo->lastKey = nextKey; - setAdditionalInfo(pQInfo, pTableQueryInfo->pTable, pTableQueryInfo); + + if (pRuntimeEnv->hasTagResults || pRuntimeEnv->pTSBuf != NULL) { + setAdditionalInfo(pQInfo, pTableQueryInfo->pTable, pTableQueryInfo); + } if (pRuntimeEnv->prevGroupId != INT32_MIN && pRuntimeEnv->prevGroupId == groupIndex) { return; } int32_t GROUPRESULTID = 1; - SWindowResult *pWindowRes = doSetTimeWindowFromKey(pRuntimeEnv, pWindowResInfo, (char *)&groupIndex, sizeof(groupIndex)); + SWindowResult *pWindowRes = doSetTimeWindowFromKey(pRuntimeEnv, pWindowResInfo, (char *)&groupIndex, + sizeof(groupIndex), true); if (pWindowRes == NULL) { return; } @@ -3474,12 +3564,12 @@ void setIntervalQueryRange(SQInfo *pQInfo, TSKEY key) { * In ascending query, key is the first qualified timestamp. However, in the descending order query, additional * operations involve. */ - STimeWindow w = TSWINDOW_INITIALIZER, realWin = TSWINDOW_INITIALIZER; + STimeWindow w = TSWINDOW_INITIALIZER; SWindowResInfo *pWindowResInfo = &pTableQueryInfo->windowResInfo; TSKEY sk = MIN(win.skey, win.ekey); TSKEY ek = MAX(win.skey, win.ekey); - getAlignQueryTimeWindow(pQuery, win.skey, sk, ek, &realWin, &w); + getAlignQueryTimeWindow(pQuery, win.skey, sk, ek, &w); pWindowResInfo->startTime = pTableQueryInfo->win.skey; // windowSKey may be 0 in case of 1970 timestamp if (pWindowResInfo->prevSKey == TSKEY_INITIAL_VAL) { @@ -3519,20 +3609,7 @@ bool needPrimaryTimestampCol(SQuery *pQuery, SDataBlockInfo *pDataBlockInfo) { return loadPrimaryTS; } -static int32_t getNumOfSubset(SQInfo *pQInfo) { - SQuery *pQuery = pQInfo->runtimeEnv.pQuery; - - int32_t totalSubset = 0; - if (isGroupbyNormalCol(pQuery->pGroupbyExpr) || (isIntervalQuery(pQuery))) { - totalSubset = numOfClosedTimeWindow(&pQInfo->runtimeEnv.windowResInfo); - } else { - totalSubset = GET_NUM_OF_TABLEGROUP(pQInfo); - } - - return totalSubset; -} - -static int32_t doCopyToSData(SQInfo *pQInfo, SWindowResult *result, int32_t orderType) { +static int32_t doCopyToSData(SQInfo *pQInfo, SWindowResInfo *pResultInfo, int32_t orderType) { SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv; SQuery * pQuery = pRuntimeEnv->pQuery; @@ -3541,17 +3618,18 @@ static int32_t doCopyToSData(SQInfo *pQInfo, SWindowResult *result, int32_t orde int32_t step = -1; qDebug("QInfo:%p start to copy data from windowResInfo to query buf", pQInfo); - int32_t totalSubset = getNumOfSubset(pQInfo); + int32_t totalSet = numOfClosedTimeWindow(pResultInfo); + SWindowResult* result = pResultInfo->pResult; if (orderType == TSDB_ORDER_ASC) { startIdx = pQInfo->groupIndex; step = 1; } else { // desc order copy all data - startIdx = totalSubset - pQInfo->groupIndex - 1; + startIdx = totalSet - pQInfo->groupIndex - 1; step = -1; } - for (int32_t i = startIdx; (i < totalSubset) && (i >= 0); i += step) { + for (int32_t i = startIdx; (i < totalSet) && (i >= 0); i += step) { if (result[i].numOfRows == 0) { pQInfo->offset = 0; pQInfo->groupIndex += 1; @@ -3606,22 +3684,22 @@ static int32_t doCopyToSData(SQInfo *pQInfo, SWindowResult *result, int32_t orde * @param pQInfo * @param result */ -void copyFromWindowResToSData(SQInfo *pQInfo, SWindowResult *result) { +void copyFromWindowResToSData(SQInfo *pQInfo, SWindowResInfo *pResultInfo) { SQuery *pQuery = pQInfo->runtimeEnv.pQuery; int32_t orderType = (pQuery->pGroupbyExpr != NULL) ? pQuery->pGroupbyExpr->orderType : TSDB_ORDER_ASC; - int32_t numOfResult = doCopyToSData(pQInfo, result, orderType); + int32_t numOfResult = doCopyToSData(pQInfo, pResultInfo, orderType); pQuery->rec.rows += numOfResult; assert(pQuery->rec.rows <= pQuery->rec.capacity); } -static UNUSED_FUNC void updateWindowResNumOfRes(SQueryRuntimeEnv *pRuntimeEnv, STableQueryInfo *pTableQueryInfo) { +static void updateWindowResNumOfRes(SQueryRuntimeEnv *pRuntimeEnv, STableQueryInfo *pTableQueryInfo) { SQuery *pQuery = pRuntimeEnv->pQuery; // update the number of result for each, only update the number of rows for the corresponding window result. - if (pQuery->intervalTime == 0) { + if (!QUERY_IS_INTERVAL_QUERY(pQuery)) { for (int32_t i = 0; i < pRuntimeEnv->windowResInfo.size; ++i) { SWindowResult *pResult = &pRuntimeEnv->windowResInfo.pResult[i]; @@ -3635,14 +3713,6 @@ static UNUSED_FUNC void updateWindowResNumOfRes(SQueryRuntimeEnv *pRuntimeEnv, S pResult->numOfRows = MAX(pResult->numOfRows, pResult->resultInfo[j].numOfRes); } } - -// int32_t g = pTableQueryInfo->groupIndex; -// assert(pRuntimeEnv->windowResInfo.size > 0); -// -// SWindowResult *pWindowRes = doSetTimeWindowFromKey(pRuntimeEnv, &pRuntimeEnv->windowResInfo, (char *)&g, sizeof(g)); -// if (pWindowRes->numOfRows == 0) { -// pWindowRes->numOfRows = getNumOfResult(pRuntimeEnv); -// } } } @@ -3654,7 +3724,7 @@ void stableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBlockInfo * SWindowResInfo * pWindowResInfo = &pTableQueryInfo->windowResInfo; pQuery->pos = QUERY_IS_ASC_QUERY(pQuery)? 0 : pDataBlockInfo->rows - 1; - if (pQuery->numOfFilterCols > 0 || pRuntimeEnv->pTSBuf != NULL || isGroupbyNormalCol(pQuery->pGroupbyExpr)) { + if (pQuery->numOfFilterCols > 0 || pRuntimeEnv->pTSBuf != NULL || pRuntimeEnv->groupbyNormalCol) { rowwiseApplyFunctions(pRuntimeEnv, pStatis, pDataBlockInfo, pWindowResInfo, pDataBlock); } else { blockwiseApplyFunctions(pRuntimeEnv, pStatis, pDataBlockInfo, pWindowResInfo, searchFn, pDataBlock); @@ -3696,7 +3766,7 @@ bool queryHasRemainResults(SQueryRuntimeEnv* pRuntimeEnv) { } else { // there are results waiting for returned to client. if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED) && - (isGroupbyNormalCol(pQuery->pGroupbyExpr) || isIntervalQuery(pQuery)) && + (pRuntimeEnv->groupbyNormalCol || QUERY_IS_INTERVAL_QUERY(pQuery)) && (pRuntimeEnv->windowResInfo.size > 0)) { return true; } @@ -3819,7 +3889,6 @@ static void queryCostStatis(SQInfo *pQInfo) { // double total = pSummary->fileTimeUs + pSummary->cacheTimeUs; // double io = pSummary->loadCompInfoUs + pSummary->loadBlocksUs + pSummary->loadFieldUs; - // todo add the intermediate result save cost!! // double computing = total - io; // // qDebug( @@ -3879,12 +3948,13 @@ void skipBlocks(SQueryRuntimeEnv *pRuntimeEnv) { STableQueryInfo* pTableQueryInfo = pQuery->current; TsdbQueryHandleT pQueryHandle = pRuntimeEnv->pQueryHandle; + SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER; while (tsdbNextDataBlock(pQueryHandle)) { if (isQueryKilled(GET_QINFO_ADDR(pRuntimeEnv))) { return; } - SDataBlockInfo blockInfo = tsdbRetrieveDataBlockInfo(pQueryHandle); + tsdbRetrieveDataBlockInfo(pQueryHandle, &blockInfo); if (pQuery->limit.offset > blockInfo.rows) { pQuery->limit.offset -= blockInfo.rows; @@ -3916,22 +3986,23 @@ static bool skipTimeInterval(SQueryRuntimeEnv *pRuntimeEnv, TSKEY* start) { */ assert(pRuntimeEnv->windowResInfo.prevSKey == TSKEY_INITIAL_VAL); - STimeWindow w = TSWINDOW_INITIALIZER, realWin = TSWINDOW_INITIALIZER; + STimeWindow w = TSWINDOW_INITIALIZER; SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo; STableQueryInfo *pTableQueryInfo = pQuery->current; + SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER; while (tsdbNextDataBlock(pRuntimeEnv->pQueryHandle)) { - SDataBlockInfo blockInfo = tsdbRetrieveDataBlockInfo(pRuntimeEnv->pQueryHandle); + tsdbRetrieveDataBlockInfo(pRuntimeEnv->pQueryHandle, &blockInfo); if (QUERY_IS_ASC_QUERY(pQuery)) { if (pWindowResInfo->prevSKey == TSKEY_INITIAL_VAL) { - getAlignQueryTimeWindow(pQuery, blockInfo.window.skey, blockInfo.window.skey, pQuery->window.ekey, &realWin, &w); + getAlignQueryTimeWindow(pQuery, blockInfo.window.skey, blockInfo.window.skey, pQuery->window.ekey, &w); pWindowResInfo->startTime = w.skey; pWindowResInfo->prevSKey = w.skey; } } else { - getAlignQueryTimeWindow(pQuery, blockInfo.window.ekey, pQuery->window.ekey, blockInfo.window.ekey, &realWin, &w); + getAlignQueryTimeWindow(pQuery, blockInfo.window.ekey, pQuery->window.ekey, blockInfo.window.ekey, &w); pWindowResInfo->startTime = pQuery->window.skey; pWindowResInfo->prevSKey = w.skey; @@ -3948,7 +4019,7 @@ static bool skipTimeInterval(SQueryRuntimeEnv *pRuntimeEnv, TSKEY* start) { } STimeWindow tw = win; - getNextTimeWindow(pQuery, &tw); + GET_NEXT_TIMEWINDOW(pQuery, &tw); if (pQuery->limit.offset == 0) { if ((tw.skey <= blockInfo.window.ekey && QUERY_IS_ASC_QUERY(pQuery)) || @@ -3960,7 +4031,7 @@ static bool skipTimeInterval(SQueryRuntimeEnv *pRuntimeEnv, TSKEY* start) { tw = win; int32_t startPos = - getNextQualifiedWindow(pRuntimeEnv, &tw, &blockInfo, pColInfoData->pData, binarySearchForKey); + getNextQualifiedWindow(pRuntimeEnv, &tw, &blockInfo, pColInfoData->pData, binarySearchForKey, -1); assert(startPos >= 0); // set the abort info @@ -4003,7 +4074,7 @@ static bool skipTimeInterval(SQueryRuntimeEnv *pRuntimeEnv, TSKEY* start) { tw = win; int32_t startPos = - getNextQualifiedWindow(pRuntimeEnv, &tw, &blockInfo, pColInfoData->pData, binarySearchForKey); + getNextQualifiedWindow(pRuntimeEnv, &tw, &blockInfo, pColInfoData->pData, binarySearchForKey, -1); assert(startPos >= 0); // set the abort info @@ -4028,7 +4099,7 @@ static void setupQueryHandle(void* tsdb, SQInfo* pQInfo, bool isSTableQuery) { return; } - if (isSTableQuery && (!isIntervalQuery(pQuery)) && (!isFixedOutputQuery(pQuery))) { + if (isSTableQuery && (!QUERY_IS_INTERVAL_QUERY(pQuery)) && (!isFixedOutputQuery(pQuery))) { return; } @@ -4042,7 +4113,7 @@ static void setupQueryHandle(void* tsdb, SQInfo* pQInfo, bool isSTableQuery) { if (!isSTableQuery && (pQInfo->tableqinfoGroupInfo.numOfTables == 1) && (cond.order == TSDB_ORDER_ASC) - && (!isIntervalQuery(pQuery)) + && (!QUERY_IS_INTERVAL_QUERY(pQuery)) && (!isGroupbyNormalCol(pQuery->pGroupbyExpr)) && (!isFixedOutputQuery(pQuery)) ) { @@ -4101,6 +4172,7 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo pRuntimeEnv->cur.vgroupIndex = -1; pRuntimeEnv->stableQuery = isSTableQuery; pRuntimeEnv->prevGroupId = INT32_MIN; + pRuntimeEnv->groupbyNormalCol = isGroupbyNormalCol(pQuery->pGroupbyExpr); if (pTsBuf != NULL) { int16_t order = (pQuery->order.order == pRuntimeEnv->pTSBuf->tsOrder) ? TSDB_ORDER_ASC : TSDB_ORDER_DESC; @@ -4125,16 +4197,16 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo if (pQuery->intervalTime == 0) { int16_t type = TSDB_DATA_TYPE_NULL; - if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) { // group by columns not tags; + if (pRuntimeEnv->groupbyNormalCol) { // group by columns not tags; type = getGroupbyColumnType(pQuery, pQuery->pGroupbyExpr); } else { type = TSDB_DATA_TYPE_INT; // group id } - initWindowResInfo(&pRuntimeEnv->windowResInfo, pRuntimeEnv, 512, 4096, type); + initWindowResInfo(&pRuntimeEnv->windowResInfo, pRuntimeEnv, 32, 4096, type); } - } else if (isGroupbyNormalCol(pQuery->pGroupbyExpr) || isIntervalQuery(pQuery)) { + } else if (pRuntimeEnv->groupbyNormalCol || QUERY_IS_INTERVAL_QUERY(pQuery)) { int32_t rows = getInitialPageNum(pQInfo); code = createDiskbasedResultBuffer(&pRuntimeEnv->pResultBuf, rows, pQuery->rowSize, pQInfo); if (code != TSDB_CODE_SUCCESS) { @@ -4142,7 +4214,7 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo } int16_t type = TSDB_DATA_TYPE_NULL; - if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) { + if (pRuntimeEnv->groupbyNormalCol) { type = getGroupbyColumnType(pQuery, pQuery->pGroupbyExpr); } else { type = TSDB_DATA_TYPE_TIMESTAMP; @@ -4153,15 +4225,22 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo if (pQuery->fillType != TSDB_FILL_NONE && !isPointInterpoQuery(pQuery)) { SFillColInfo* pColInfo = taosCreateFillColInfo(pQuery); - pRuntimeEnv->pFillInfo = taosInitFillInfo(pQuery->order.order, 0, 0, pQuery->rec.capacity, pQuery->numOfOutput, + STimeWindow w = TSWINDOW_INITIALIZER; + + TSKEY sk = MIN(pQuery->window.skey, pQuery->window.ekey); + TSKEY ek = MAX(pQuery->window.skey, pQuery->window.ekey); + getAlignQueryTimeWindow(pQuery, pQuery->window.skey, sk, ek, &w); + + pRuntimeEnv->pFillInfo = taosInitFillInfo(pQuery->order.order, w.skey, 0, pQuery->rec.capacity, pQuery->numOfOutput, pQuery->slidingTime, pQuery->slidingTimeUnit, pQuery->precision, pQuery->fillType, pColInfo); } // todo refactor pRuntimeEnv->topBotQuery = isTopBottomQuery(pQuery); - setQueryStatus(pQuery, QUERY_NOT_COMPLETED); + pRuntimeEnv->hasTagResults = hasTagValOutput(pQuery); + setQueryStatus(pQuery, QUERY_NOT_COMPLETED); return TSDB_CODE_SUCCESS; } @@ -4184,13 +4263,15 @@ static int64_t scanMultiTableDataBlocks(SQInfo *pQInfo) { int64_t st = taosGetTimestampMs(); TsdbQueryHandleT pQueryHandle = IS_MASTER_SCAN(pRuntimeEnv)? pRuntimeEnv->pQueryHandle : pRuntimeEnv->pSecQueryHandle; + SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER; + while (tsdbNextDataBlock(pQueryHandle)) { summary->totalBlocks += 1; if (isQueryKilled(pQInfo)) { break; } - SDataBlockInfo blockInfo = tsdbRetrieveDataBlockInfo(pQueryHandle); + tsdbRetrieveDataBlockInfo(pQueryHandle, &blockInfo); STableQueryInfo **pTableQueryInfo = (STableQueryInfo**) taosHashGet(pQInfo->tableqinfoGroupInfo.map, &blockInfo.tid, sizeof(blockInfo.tid)); if(pTableQueryInfo == NULL) { break; @@ -4202,14 +4283,17 @@ static int64_t scanMultiTableDataBlocks(SQInfo *pQInfo) { SDataStatis *pStatis = NULL; SArray *pDataBlock = loadDataBlockOnDemand(pRuntimeEnv, pQueryHandle, &blockInfo, &pStatis); - if (!isGroupbyNormalCol(pQuery->pGroupbyExpr)) { - if (!isIntervalQuery(pQuery)) { + if (!pRuntimeEnv->groupbyNormalCol) { + if (!QUERY_IS_INTERVAL_QUERY(pQuery)) { int32_t step = QUERY_IS_ASC_QUERY(pQuery)? 1:-1; setExecutionContext(pQInfo, (*pTableQueryInfo)->groupIndex, blockInfo.window.ekey + step); } else { // interval query TSKEY nextKey = blockInfo.window.skey; setIntervalQueryRange(pQInfo, nextKey); - /*int32_t ret = */setAdditionalInfo(pQInfo, (*pTableQueryInfo)->pTable, *pTableQueryInfo); + + if (pRuntimeEnv->hasTagResults || pRuntimeEnv->pTSBuf != NULL) { + setAdditionalInfo(pQInfo, (*pTableQueryInfo)->pTable, *pTableQueryInfo); + } } } @@ -4234,9 +4318,9 @@ static bool multiTableMultioutputHelper(SQInfo *pQInfo, int32_t index) { setTagVal(pRuntimeEnv, pCheckInfo->pTable, pQInfo->tsdb); - STableId id = tsdbGetTableId(pCheckInfo->pTable); + STableId* id = TSDB_TABLEID(pCheckInfo->pTable); qDebug("QInfo:%p query on (%d): uid:%" PRIu64 ", tid:%d, qrange:%" PRId64 "-%" PRId64, pQInfo, index, - id.uid, id.tid, pCheckInfo->lastKey, pCheckInfo->win.ekey); + id->uid, id->tid, pCheckInfo->lastKey, pCheckInfo->win.ekey); STsdbQueryCond cond = { .twindow = {pCheckInfo->lastKey, pCheckInfo->win.ekey}, @@ -4365,7 +4449,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) { break; } } - } else if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) { // group-by on normal columns query + } else if (pRuntimeEnv->groupbyNormalCol) { // group-by on normal columns query while (pQInfo->groupIndex < numOfGroups) { SArray* group = taosArrayGetP(pQInfo->tableGroupInfo.pGroupList, pQInfo->groupIndex); @@ -4391,6 +4475,8 @@ static void sequentialTableProcess(SQInfo *pQInfo) { } pRuntimeEnv->pQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &gp, pQInfo); + taosArrayDestroy(g1); + taosArrayDestroy(tx); SArray* s = tsdbGetQueriedTableList(pRuntimeEnv->pQueryHandle); assert(taosArrayGetSize(s) >= 1); @@ -4427,7 +4513,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) { pQInfo->groupIndex = 0; ensureOutputBufferSimple(pRuntimeEnv, pWindowResInfo->size); - copyFromWindowResToSData(pQInfo, pWindowResInfo->pResult); + copyFromWindowResToSData(pQInfo, pWindowResInfo); pQInfo->groupIndex = currentGroupIndex; //restore the group index assert(pQuery->rec.rows == pWindowResInfo->size); @@ -4442,7 +4528,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) { * we need to return it to client in the first place. */ if (pQInfo->groupIndex > 0) { - copyFromWindowResToSData(pQInfo, pRuntimeEnv->windowResInfo.pResult); + copyFromWindowResToSData(pQInfo, &pRuntimeEnv->windowResInfo); pQuery->rec.total += pQuery->rec.rows; if (pQuery->rec.rows > 0) { @@ -4503,11 +4589,11 @@ static void sequentialTableProcess(SQInfo *pQInfo) { */ pQInfo->tableIndex++; - STableIdInfo tidInfo; - STableId id = tsdbGetTableId(pQuery->current->pTable); + STableIdInfo tidInfo = {0}; - tidInfo.uid = id.uid; - tidInfo.tid = id.tid; + STableId* id = TSDB_TABLEID(pQuery->current->pTable); + tidInfo.uid = id->uid; + tidInfo.tid = id->tid; tidInfo.key = pQuery->current->lastKey; taosArrayPush(pQInfo->arrTableIdInfo, &tidInfo); @@ -4609,7 +4695,9 @@ static void doRestoreContext(SQInfo *pQInfo) { static void doCloseAllTimeWindowAfterScan(SQInfo *pQInfo) { SQuery *pQuery = pQInfo->runtimeEnv.pQuery; - if (isIntervalQuery(pQuery)) { +// int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); + + if (QUERY_IS_INTERVAL_QUERY(pQuery)) { size_t numOfGroup = GET_NUM_OF_TABLEGROUP(pQInfo); for (int32_t i = 0; i < numOfGroup; ++i) { SArray *group = GET_TABLEGROUP(pQInfo, i); @@ -4618,6 +4706,7 @@ static void doCloseAllTimeWindowAfterScan(SQInfo *pQInfo) { for (int32_t j = 0; j < num; ++j) { STableQueryInfo* item = taosArrayGetP(group, j); closeAllTimeWindow(&item->windowResInfo); +// removeRedundantWindow(&item->windowResInfo, item->lastKey - step, step); } } } else { // close results for group result @@ -4634,13 +4723,13 @@ static void multiTableQueryProcess(SQInfo *pQInfo) { * if the groupIndex > 0, the query process must be completed yet, we only need to * copy the data into output buffer */ - if (isIntervalQuery(pQuery)) { + if (QUERY_IS_INTERVAL_QUERY(pQuery)) { copyResToQueryResultBuf(pQInfo, pQuery); #ifdef _DEBUG_VIEW displayInterResult(pQuery->sdata, pRuntimeEnv, pQuery->sdata[0]->num); #endif } else { - copyFromWindowResToSData(pQInfo, pRuntimeEnv->windowResInfo.pResult); + copyFromWindowResToSData(pQInfo, &pRuntimeEnv->windowResInfo); } qDebug("QInfo:%p current:%"PRId64", total:%"PRId64"", pQInfo, pQuery->rec.rows, pQuery->rec.total); @@ -4669,6 +4758,7 @@ static void multiTableQueryProcess(SQInfo *pQInfo) { el = scanMultiTableDataBlocks(pQInfo); qDebug("QInfo:%p reversed scan completed, elapsed time: %" PRId64 "ms", pQInfo, el); +// doCloseAllTimeWindowAfterScan(pQInfo); doRestoreContext(pQInfo); } else { qDebug("QInfo:%p no need to do reversed scan, query completed", pQInfo); @@ -4681,7 +4771,7 @@ static void multiTableQueryProcess(SQInfo *pQInfo) { return; } - if (isIntervalQuery(pQuery) || isSumAvgRateQuery(pQuery)) { + if (QUERY_IS_INTERVAL_QUERY(pQuery) || isSumAvgRateQuery(pQuery)) { if (mergeIntoGroupResult(pQInfo) == TSDB_CODE_SUCCESS) { copyResToQueryResultBuf(pQInfo, pQuery); @@ -4690,7 +4780,7 @@ static void multiTableQueryProcess(SQInfo *pQInfo) { #endif } } else { // not a interval query - copyFromWindowResToSData(pQInfo, pRuntimeEnv->windowResInfo.pResult); + copyFromWindowResToSData(pQInfo, &pRuntimeEnv->windowResInfo); } // handle the limitation of output buffer @@ -4778,10 +4868,10 @@ static void tableMultiOutputProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) pQuery->current->lastKey, pQuery->window.ekey); } else if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) { STableIdInfo tidInfo; - STableId id = tsdbGetTableId(pQuery->current); + STableId* id = TSDB_TABLEID(pQuery->current); - tidInfo.uid = id.uid; - tidInfo.tid = id.tid; + tidInfo.uid = id->uid; + tidInfo.tid = id->tid; tidInfo.key = pQuery->current->lastKey; taosArrayPush(pQInfo->arrTableIdInfo, &tidInfo); } @@ -4842,10 +4932,10 @@ static void tableIntervalProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) { while (1) { tableIntervalProcessImpl(pRuntimeEnv, newStartKey); - if (isIntervalQuery(pQuery)) { + if (QUERY_IS_INTERVAL_QUERY(pQuery)) { pQInfo->groupIndex = 0; // always start from 0 pQuery->rec.rows = 0; - copyFromWindowResToSData(pQInfo, pRuntimeEnv->windowResInfo.pResult); + copyFromWindowResToSData(pQInfo, &pRuntimeEnv->windowResInfo); clearFirstNTimeWindow(pRuntimeEnv, pQInfo->groupIndex); } @@ -4871,10 +4961,10 @@ static void tableIntervalProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) { } // all data scanned, the group by normal column can return - if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) { // todo refactor with merge interval time result + if (pRuntimeEnv->groupbyNormalCol) { // todo refactor with merge interval time result pQInfo->groupIndex = 0; pQuery->rec.rows = 0; - copyFromWindowResToSData(pQInfo, pRuntimeEnv->windowResInfo.pResult); + copyFromWindowResToSData(pQInfo, &pRuntimeEnv->windowResInfo); clearFirstNTimeWindow(pRuntimeEnv, pQInfo->groupIndex); } @@ -4906,7 +4996,7 @@ static void tableQueryImpl(SQInfo *pQInfo) { pQInfo->groupIndex = 0; // always start from 0 if (pRuntimeEnv->windowResInfo.size > 0) { - copyFromWindowResToSData(pQInfo, pRuntimeEnv->windowResInfo.pResult); + copyFromWindowResToSData(pQInfo, &pRuntimeEnv->windowResInfo); clearFirstNTimeWindow(pRuntimeEnv, pQInfo->groupIndex); if (pQuery->rec.rows > 0) { @@ -4932,7 +5022,7 @@ static void tableQueryImpl(SQInfo *pQInfo) { STableQueryInfo* item = taosArrayGetP(g, 0); // group by normal column, sliding window query, interval query are handled by interval query processor - if (isIntervalQuery(pQuery) || isGroupbyNormalCol(pQuery->pGroupbyExpr)) { // interval (down sampling operation) + if (QUERY_IS_INTERVAL_QUERY(pQuery) || pRuntimeEnv->groupbyNormalCol) { // interval (down sampling operation) tableIntervalProcess(pQInfo, item); } else if (isFixedOutputQuery(pQuery)) { tableFixedOutputProcess(pQInfo, item); @@ -4947,18 +5037,19 @@ static void tableQueryImpl(SQInfo *pQInfo) { } static void stableQueryImpl(SQInfo *pQInfo) { - SQuery *pQuery = pQInfo->runtimeEnv.pQuery; + SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv; + SQuery *pQuery = pRuntimeEnv->pQuery; pQuery->rec.rows = 0; int64_t st = taosGetTimestampUs(); - if (isIntervalQuery(pQuery) || - (isFixedOutputQuery(pQuery) && (!isPointInterpoQuery(pQuery)) && !isGroupbyNormalCol(pQuery->pGroupbyExpr) && + if (QUERY_IS_INTERVAL_QUERY(pQuery) || + (isFixedOutputQuery(pQuery) && (!isPointInterpoQuery(pQuery)) && !pRuntimeEnv->groupbyNormalCol && !isFirstLastRowQuery(pQuery))) { multiTableQueryProcess(pQInfo); } else { assert((pQuery->checkBuffer == 1 && pQuery->intervalTime == 0) || isPointInterpoQuery(pQuery) || - isFirstLastRowQuery(pQuery) || isGroupbyNormalCol(pQuery->pGroupbyExpr)); + isFirstLastRowQuery(pQuery) || pRuntimeEnv->groupbyNormalCol); sequentialTableProcess(pQInfo); } @@ -5653,28 +5744,33 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList, STimeWindow window = pQueryMsg->window; taosArraySort(pTableIdList, compareTableIdInfo); + pQInfo->runtimeEnv.interBufSize = getOutputInterResultBufSize(pQuery); + pQInfo->pBuf = calloc(pTableGroupInfo->numOfTables, sizeof(STableQueryInfo)); + int32_t index = 0; + for(int32_t i = 0; i < numOfGroups; ++i) { SArray* pa = taosArrayGetP(pTableGroupInfo->pGroupList, i); - size_t s = taosArrayGetSize(pa); + size_t s = taosArrayGetSize(pa); SArray* p1 = taosArrayInit(s, POINTER_BYTES); for(int32_t j = 0; j < s; ++j) { void* pTable = taosArrayGetP(pa, j); + STableId* id = TSDB_TABLEID(pTable); - // NOTE: compare STableIdInfo with STableId - STableId id = tsdbGetTableId(pTable); - STableIdInfo* pTableId = taosArraySearch(pTableIdList, &id, compareTableIdInfo); + STableIdInfo* pTableId = taosArraySearch(pTableIdList, id, compareTableIdInfo); if (pTableId != NULL ) { window.skey = pTableId->key; } else { window.skey = pQueryMsg->window.skey; } - STableQueryInfo* item = createTableQueryInfo(&pQInfo->runtimeEnv, pTable, window); + void* buf = pQInfo->pBuf + index * sizeof(STableQueryInfo); + STableQueryInfo* item = createTableQueryInfo(&pQInfo->runtimeEnv, pTable, window, buf); item->groupIndex = i; taosArrayPush(p1, &item); - taosHashPut(pQInfo->tableqinfoGroupInfo.map, &id.tid, sizeof(id.tid), &item, POINTER_BYTES); + taosHashPut(pQInfo->tableqinfoGroupInfo.map, &id->tid, sizeof(id->tid), &item, POINTER_BYTES); + index += 1; } taosArrayPush(pQInfo->tableqinfoGroupInfo.pGroupList, &p1); @@ -5734,7 +5830,7 @@ static int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQ qDebug("QInfo:%p no result in time range %" PRId64 "-%" PRId64 ", order %d", pQInfo, pQuery->window.skey, pQuery->window.ekey, pQuery->order.order); setQueryStatus(pQuery, QUERY_COMPLETED); - + pQInfo->tableqinfoGroupInfo.numOfTables = 0; sem_post(&pQInfo->dataReady); return TSDB_CODE_SUCCESS; } @@ -5763,6 +5859,18 @@ _error: return code; } +static void freeColumnFilterInfo(SColumnFilterInfo* pFilter, int32_t numOfFilters) { + if (pFilter == NULL) { + return; + } + for (int32_t i = 0; i < numOfFilters; i++) { + if (pFilter[i].filterstr) { + free((void*)(pFilter[i].pz)); + } + } + free(pFilter); +} + static void freeQInfo(SQInfo *pQInfo) { if (!isValidQInfo(pQInfo)) { return; @@ -5818,6 +5926,7 @@ static void freeQInfo(SQInfo *pQInfo) { taosArrayDestroy(p); } + tfree(pQInfo->pBuf); taosArrayDestroy(pQInfo->tableqinfoGroupInfo.pGroupList); taosHashCleanup(pQInfo->tableqinfoGroupInfo.map); tsdbDestoryTableGroup(&pQInfo->tableGroupInfo); @@ -5830,7 +5939,15 @@ static void freeQInfo(SQInfo *pQInfo) { tfree(pQuery->tagColList); tfree(pQuery->pFilterInfo); - tfree(pQuery->colList); + + if (pQuery->colList != NULL) { + for (int32_t i = 0; i < pQuery->numOfCols; i++) { + SColumnInfo* column = pQuery->colList + i; + freeColumnFilterInfo(column->filters, column->numOfFilters); + } + tfree(pQuery->colList); + } + tfree(pQuery->sdata); tfree(pQuery); @@ -6027,6 +6144,11 @@ _over: free(pExprMsg); taosArrayDestroy(pTableIdList); + for (int32_t i = 0; i < pQueryMsg->numOfCols; i++) { + SColumnInfo* column = pQueryMsg->colList + i; + freeColumnFilterInfo(column->filters, column->numOfFilters); + } + //pQInfo already freed in initQInfo, but *pQInfo may not pointer to null; if (code != TSDB_CODE_SUCCESS) { *pQInfo = NULL; @@ -6094,7 +6216,8 @@ void qTableQuery(qinfo_t qinfo) { return; } - int32_t ret = setjmp(pQInfo->env); + int32_t ret = setjmp(pQInfo->runtimeEnv.env); + // error occurs, record the error code and return to client if (ret != TSDB_CODE_SUCCESS) { pQInfo->code = ret; @@ -6277,13 +6400,13 @@ static void buildTagQueryResult(SQInfo* pQInfo) { varDataSetLen(output, rsize - VARSTR_HEADER_SIZE); output = varDataVal(output); - STableId id = tsdbGetTableId(item->pTable); + STableId* id = TSDB_TABLEID(item->pTable); - *(int64_t *)output = id.uid; // memory align problem, todo serialize - output += sizeof(id.uid); + *(int64_t *)output = id->uid; // memory align problem, todo serialize + output += sizeof(id->uid); - *(int32_t *)output = id.tid; - output += sizeof(id.tid); + *(int32_t *)output = id->tid; + output += sizeof(id->tid); *(int32_t *)output = pQInfo->vgId; output += sizeof(pQInfo->vgId); @@ -6404,7 +6527,7 @@ void qSetQueryMgmtClosed(void* pQMgmt) { pQueryMgmt->closed = true; pthread_mutex_unlock(&pQueryMgmt->lock); - taosCacheEmpty(pQueryMgmt->qinfoPool, true); + taosCacheRefresh(pQueryMgmt->qinfoPool, freeqinfoFn); } void qCleanupQueryMgmt(void* pQMgmt) { @@ -6427,11 +6550,13 @@ void qCleanupQueryMgmt(void* pQMgmt) { qDebug("vgId:%d querymgmt cleanup completed", vgId); } -void** qRegisterQInfo(void* pMgmt, void* qInfo) { +void** qRegisterQInfo(void* pMgmt, uint64_t qInfo) { if (pMgmt == NULL) { return NULL; } + const int32_t DEFAULT_QHANDLE_LIFE_SPAN = tsShellActivityTimer * 2; + SQueryMgmt *pQueryMgmt = pMgmt; if (pQueryMgmt->qinfoPool == NULL) { return NULL; @@ -6443,21 +6568,23 @@ void** qRegisterQInfo(void* pMgmt, void* qInfo) { return NULL; } else { - void** handle = taosCachePut(pQueryMgmt->qinfoPool, qInfo, POINTER_BYTES, &qInfo, POINTER_BYTES, tsShellActivityTimer*2); + uint64_t handleVal = (uint64_t) qInfo; + + void** handle = taosCachePut(pQueryMgmt->qinfoPool, &handleVal, sizeof(int64_t), &qInfo, POINTER_BYTES, DEFAULT_QHANDLE_LIFE_SPAN); pthread_mutex_unlock(&pQueryMgmt->lock); return handle; } } -void** qAcquireQInfo(void* pMgmt, void** key) { +void** qAcquireQInfo(void* pMgmt, uint64_t key) { SQueryMgmt *pQueryMgmt = pMgmt; if (pQueryMgmt->qinfoPool == NULL || pQueryMgmt->closed) { return NULL; } - void** handle = taosCacheAcquireByKey(pQueryMgmt->qinfoPool, key, POINTER_BYTES); + void** handle = taosCacheAcquireByKey(pQueryMgmt->qinfoPool, &key, sizeof(uint64_t)); if (handle == NULL || *handle == NULL) { return NULL; } else { diff --git a/src/query/src/qUtil.c b/src/query/src/qUtil.c index d7725f561d..dce2c24ea0 100644 --- a/src/query/src/qUtil.c +++ b/src/query/src/qUtil.c @@ -17,22 +17,30 @@ #include "hash.h" #include "taosmsg.h" -#include "qextbuffer.h" -#include "ttime.h" - -#include "qfill.h" #include "ttime.h" #include "qExecutor.h" #include "qUtil.h" +int32_t getOutputInterResultBufSize(SQuery* pQuery) { + int32_t size = 0; + + for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { + assert(pQuery->pSelectExpr[i].interBytes <= DEFAULT_INTERN_BUF_PAGE_SIZE); + size += pQuery->pSelectExpr[i].interBytes; + } + + assert(size > 0); + + return size; +} + int32_t initWindowResInfo(SWindowResInfo *pWindowResInfo, SQueryRuntimeEnv *pRuntimeEnv, int32_t size, int32_t threshold, int16_t type) { pWindowResInfo->capacity = size; pWindowResInfo->threshold = threshold; pWindowResInfo->type = type; - _hash_fn_t fn = taosGetDefaultHashFunction(type); pWindowResInfo->hashList = taosHashInit(threshold, fn, false); @@ -44,7 +52,7 @@ int32_t initWindowResInfo(SWindowResInfo *pWindowResInfo, SQueryRuntimeEnv *pRun pWindowResInfo->pResult = calloc(threshold, sizeof(SWindowResult)); for (int32_t i = 0; i < pWindowResInfo->capacity; ++i) { SPosInfo posInfo = {-1, -1}; - createQueryResultInfo(pRuntimeEnv->pQuery, &pWindowResInfo->pResult[i], pRuntimeEnv->stableQuery, &posInfo); + createQueryResultInfo(pRuntimeEnv->pQuery, &pWindowResInfo->pResult[i], pRuntimeEnv->stableQuery, &posInfo, pRuntimeEnv->interBufSize); } return TSDB_CODE_SUCCESS; @@ -54,11 +62,8 @@ void destroyTimeWindowRes(SWindowResult *pWindowRes, int32_t nOutputCols) { if (pWindowRes == NULL) { return; } - - for (int32_t i = 0; i < nOutputCols; ++i) { - free(pWindowRes->resultInfo[i].interResultBuf); - } - + + free(pWindowRes->resultInfo[0].interResultBuf); free(pWindowRes->resultInfo); } @@ -180,19 +185,33 @@ void closeAllTimeWindow(SWindowResInfo *pWindowResInfo) { /* * remove the results that are not the FIRST time window that spreads beyond the - * the last qualified time stamp in case of sliding query, which the sliding time is not equalled to the interval time + * the last qualified time stamp in case of sliding query, which the sliding time is not equalled to the interval time. + * NOTE: remove redundant, only when the result set order equals to traverse order */ void removeRedundantWindow(SWindowResInfo *pWindowResInfo, TSKEY lastKey, int32_t order) { assert(pWindowResInfo->size >= 0 && pWindowResInfo->capacity >= pWindowResInfo->size); - - int32_t i = 0; - while (i < pWindowResInfo->size && - ((pWindowResInfo->pResult[i].window.ekey < lastKey && order == QUERY_ASC_FORWARD_STEP) || - (pWindowResInfo->pResult[i].window.skey > lastKey && order == QUERY_DESC_FORWARD_STEP))) { - ++i; + if (pWindowResInfo->size <= 1) { + return; } - - // assert(i < pWindowResInfo->size); + + // get the result order + int32_t resultOrder = (pWindowResInfo->pResult[0].window.skey < pWindowResInfo->pResult[1].window.skey)? 1:-1; + + if (order != resultOrder) { + return; + } + + int32_t i = 0; + if (order == QUERY_ASC_FORWARD_STEP) { + while (i < pWindowResInfo->size && (pWindowResInfo->pResult[i].window.ekey < lastKey)) { + ++i; + } + } else if (order == QUERY_DESC_FORWARD_STEP) { + while (i < pWindowResInfo->size && (pWindowResInfo->pResult[i].window.skey > lastKey)) { + ++i; + } + } + if (i < pWindowResInfo->size) { pWindowResInfo->size = (i + 1); } @@ -227,10 +246,9 @@ void clearTimeWindowResBuf(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *pWindow } pWindowRes->numOfRows = 0; - // pWindowRes->nAlloc = 0; pWindowRes->pos = (SPosInfo){-1, -1}; pWindowRes->status.closed = false; - pWindowRes->window = (STimeWindow){0, 0}; + pWindowRes->window = TSWINDOW_INITIALIZER; } /** @@ -240,7 +258,6 @@ void clearTimeWindowResBuf(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *pWindow */ void copyTimeWindowResBuf(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *dst, const SWindowResult *src) { dst->numOfRows = src->numOfRows; - // dst->nAlloc = src->nAlloc; dst->window = src->window; dst->status = src->status; diff --git a/src/query/src/qast.c b/src/query/src/qast.c index 721cd8ae5a..ffd339f111 100644 --- a/src/query/src/qast.c +++ b/src/query/src/qast.c @@ -13,25 +13,26 @@ * along with this program. If not, see . */ + #include "os.h" -#include "tulog.h" -#include "tutil.h" -#include "tbuffer.h" + +#include "tname.h" #include "qast.h" -#include "tcompare.h" +#include "tsdb.h" +#include "exception.h" #include "qsqlparser.h" #include "qsyntaxtreefunction.h" #include "taosdef.h" #include "taosmsg.h" +#include "tarray.h" +#include "tbuffer.h" +#include "tcompare.h" +#include "tskiplist.h" #include "tsqlfunction.h" #include "tstoken.h" #include "ttokendef.h" -#include "tschemautil.h" -#include "tarray.h" -#include "tskiplist.h" -#include "queryLog.h" -#include "tsdbMain.h" -#include "exception.h" +#include "tulog.h" +#include "tutil.h" /* * @@ -327,104 +328,6 @@ static tExprNode *createSyntaxTree(SSchema *pSchema, int32_t numOfCols, char *st } } -void tSQLBinaryExprFromString(tExprNode **pExpr, SSchema *pSchema, int32_t numOfCols, char *src, int32_t len) { - *pExpr = NULL; - - if (len <= 0 || src == NULL || pSchema == NULL || numOfCols <= 0) { - return; - } - - int32_t pos = 0; - - *pExpr = createSyntaxTree(pSchema, numOfCols, src, &pos); - if (*pExpr != NULL) { - assert((*pExpr)->nodeType == TSQL_NODE_EXPR); - } -} - -int32_t tSQLBinaryExprToStringImpl(tExprNode *pNode, char *dst, uint8_t type) { - int32_t len = 0; - if (type == TSQL_NODE_EXPR) { - *dst = '('; - tSQLBinaryExprToString(pNode, dst + 1, &len); - len += 2; - *(dst + len - 1) = ')'; - } else if (type == TSQL_NODE_COL) { - len = sprintf(dst, "%s", pNode->pSchema->name); - } else { - len = tVariantToString(pNode->pVal, dst); - } - return len; -} - -// TODO REFACTOR WITH SQL PARSER -static char *tSQLOptrToString(uint8_t optr, char *dst) { - switch (optr) { - case TSDB_RELATION_LESS: { - *dst = '<'; - dst += 1; - break; - } - case TSDB_RELATION_LESS_EQUAL: { - *dst = '<'; - *(dst + 1) = '='; - dst += 2; - break; - } - case TSDB_RELATION_EQUAL: { - *dst = '='; - dst += 1; - break; - } - case TSDB_RELATION_GREATER: { - *dst = '>'; - dst += 1; - break; - } - case TSDB_RELATION_GREATER_EQUAL: { - *dst = '>'; - *(dst + 1) = '='; - dst += 2; - break; - } - case TSDB_RELATION_NOT_EQUAL: { - *dst = '<'; - *(dst + 1) = '>'; - dst += 2; - break; - } - case TSDB_RELATION_OR: { - memcpy(dst, "or", 2); - dst += 2; - break; - } - case TSDB_RELATION_AND: { - memcpy(dst, "and", 3); - dst += 3; - break; - } - default:; - } - return dst; -} - -void tSQLBinaryExprToString(tExprNode *pExpr, char *dst, int32_t *len) { - if (pExpr == NULL) { - *dst = 0; - *len = 0; - return; - } - - int32_t lhs = tSQLBinaryExprToStringImpl(pExpr->_node.pLeft, dst, pExpr->_node.pLeft->nodeType); - dst += lhs; - *len = lhs; - - char *start = tSQLOptrToString(pExpr->_node.optr, dst); - *len += (start - dst); - - *len += tSQLBinaryExprToStringImpl(pExpr->_node.pRight, start, pExpr->_node.pRight->nodeType); -} - static void UNUSED_FUNC destroySyntaxTree(tExprNode *pNode) { tExprNodeDestroy(pNode, NULL); } void tExprNodeDestroy(tExprNode *pNode, void (*fp)(void *)) { @@ -773,8 +676,7 @@ static void tQueryIndexlessColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, SSkipListNode *pNode = tSkipListIterGet(iter); char * pData = SL_GET_NODE_DATA(pNode); - // todo refactor: - tstr *name = (*(STable **)pData)->name; + tstr *name = (tstr*) tsdbGetTableName(*(void**) pData); // todo speed up by using hash if (pQueryInfo->colIndex == TSDB_TBNAME_COLUMN_INDEX) { if (pQueryInfo->optr == TSDB_RELATION_IN) { @@ -976,27 +878,27 @@ void tExprTreeCalcTraverse(tExprNode *pExprs, int32_t numOfRows, char *pOutput, free(pRightOutput); } -void tSQLBinaryExprTrv(tExprNode *pExprs, SArray* res) { - if (pExprs == NULL) { - return; - } - - tExprNode *pLeft = pExprs->_node.pLeft; - tExprNode *pRight = pExprs->_node.pRight; - - // recursive traverse left child branch - if (pLeft->nodeType == TSQL_NODE_EXPR) { - tSQLBinaryExprTrv(pLeft, res); - } else if (pLeft->nodeType == TSQL_NODE_COL) { - taosArrayPush(res, &pLeft->pSchema->colId); - } - - if (pRight->nodeType == TSQL_NODE_EXPR) { - tSQLBinaryExprTrv(pRight, res); - } else if (pRight->nodeType == TSQL_NODE_COL) { - taosArrayPush(res, &pRight->pSchema->colId); - } -} +//void tSQLBinaryExprTrv(tExprNode *pExprs, SArray* res) { +// if (pExprs == NULL) { +// return; +// } +// +// tExprNode *pLeft = pExprs->_node.pLeft; +// tExprNode *pRight = pExprs->_node.pRight; +// +// // recursive traverse left child branch +// if (pLeft->nodeType == TSQL_NODE_EXPR) { +// tSQLBinaryExprTrv(pLeft, res); +// } else if (pLeft->nodeType == TSQL_NODE_COL) { +// taosArrayPush(res, &pLeft->pSchema->colId); +// } +// +// if (pRight->nodeType == TSQL_NODE_EXPR) { +// tSQLBinaryExprTrv(pRight, res); +// } else if (pRight->nodeType == TSQL_NODE_COL) { +// taosArrayPush(res, &pRight->pSchema->colId); +// } +//} static void exprTreeToBinaryImpl(SBufferWriter* bw, tExprNode* expr) { tbufWriteUint8(bw, expr->nodeType); diff --git a/src/query/src/qextbuffer.c b/src/query/src/qextbuffer.c index ce3f60c072..afcf902123 100644 --- a/src/query/src/qextbuffer.c +++ b/src/query/src/qextbuffer.c @@ -118,7 +118,7 @@ static bool tExtMemBufferAlloc(tExtMemBuffer *pMemBuffer) { * To flush data to disk to accommodate more data */ if (pMemBuffer->numOfInMemPages > 0 && pMemBuffer->numOfInMemPages == pMemBuffer->inMemCapacity) { - if (!tExtMemBufferFlush(pMemBuffer)) { + if (tExtMemBufferFlush(pMemBuffer) != 0) { return false; } } @@ -268,6 +268,7 @@ int32_t tExtMemBufferFlush(tExtMemBuffer *pMemBuffer) { size_t retVal = fwrite((char *)&(first->item), pMemBuffer->pageSize, 1, pMemBuffer->file); if (retVal <= 0) { // failed to write to buffer, may be not enough space ret = TAOS_SYSTEM_ERROR(errno); + return ret; } pMemBuffer->fileMeta.numOfElemsInFile += first->item.num; diff --git a/src/query/src/qfill.c b/src/query/src/qfill.c index 4cb3779166..65951a5b9e 100644 --- a/src/query/src/qfill.c +++ b/src/query/src/qfill.c @@ -22,41 +22,6 @@ #define FILL_IS_ASC_FILL(_f) ((_f)->order == TSDB_ORDER_ASC) -int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, char timeUnit, int16_t precision) { - if (slidingTime == 0) { - return startTime; - } - - if (timeUnit == 'a' || timeUnit == 'm' || timeUnit == 's' || timeUnit == 'h') { - return (startTime / slidingTime) * slidingTime; - } else { - /* - * here we revised the start time of day according to the local time zone, - * but in case of DST, the start time of one day need to be dynamically decided. - * - * TODO dynamically decide the start time of a day, move to common module - */ - - // todo refactor to extract function that is available for Linux/Windows/Mac platform -#if defined(WINDOWS) && _MSC_VER >= 1900 - // see https://docs.microsoft.com/en-us/cpp/c-runtime-library/daylight-dstbias-timezone-and-tzname?view=vs-2019 - int64_t timezone = _timezone; - int32_t daylight = _daylight; - char** tzname = _tzname; -#endif - - int64_t t = (precision == TSDB_TIME_PRECISION_MILLI) ? MILLISECOND_PER_SECOND : MILLISECOND_PER_SECOND * 1000L; - - int64_t revStartime = (startTime / slidingTime) * slidingTime + timezone * t; - int64_t revEndtime = revStartime + slidingTime - 1; - if (revEndtime < startTime) { - revStartime += slidingTime; - } - - return revStartime; - } -} - SFillInfo* taosInitFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_t capacity, int32_t numOfCols, int64_t slidingTime, int8_t slidingUnit, int8_t precision, int32_t fillType, SFillColInfo* pFillCol) { if (fillType == TSDB_FILL_NONE) { @@ -128,7 +93,7 @@ static TSKEY taosGetRevisedEndKey(TSKEY ekey, int32_t order, int64_t timeInterva if (order == TSDB_ORDER_ASC) { return ekey; } else { - return taosGetIntervalStartTimestamp(ekey, timeInterval, slidingTimeUnit, precision); + return taosGetIntervalStartTimestamp(ekey, timeInterval, timeInterval, slidingTimeUnit, precision); } } @@ -209,7 +174,7 @@ int32_t taosNumOfRemainRows(SFillInfo* pFillInfo) { return 0; } - return FILL_IS_ASC_FILL(pFillInfo) ? (pFillInfo->numOfRows - pFillInfo->rowIdx) : pFillInfo->rowIdx + 1; + return pFillInfo->numOfRows - pFillInfo->rowIdx; } // todo: refactor diff --git a/src/query/tests/astTest.cpp b/src/query/tests/astTest.cpp index 15eb780021..df27087216 100644 --- a/src/query/tests/astTest.cpp +++ b/src/query/tests/astTest.cpp @@ -1,11 +1,10 @@ #include -#include #include #include #include -#include "qast.h" #include "taosmsg.h" +#include "qast.h" #include "tsdb.h" #include "tskiplist.h" @@ -24,8 +23,6 @@ static void initSchema_binary(SSchema *schema, int32_t numOfCols); static SSkipList *createSkipList(SSchema *pSchema, int32_t numOfTags); static SSkipList *createSkipList_binary(SSchema *pSchema, int32_t numOfTags); -static void testQueryStr(SSchema *schema, int32_t numOfCols, char *sql, SSkipList *pSkipList, ResultObj *expectedVal); - static void dropMeter(SSkipList *pSkipList); static void Right2LeftTest(SSchema *schema, int32_t numOfCols, SSkipList *pSkipList); @@ -239,44 +236,45 @@ static void initSchema(SSchema *schema, int32_t numOfCols) { // return pSkipList; //} -static void testQueryStr(SSchema *schema, int32_t numOfCols, char *sql, SSkipList *pSkipList, ResultObj *pResult) { - tExprNode *pExpr = NULL; - tSQLBinaryExprFromString(&pExpr, schema, numOfCols, sql, strlen(sql)); - - char str[512] = {0}; - int32_t len = 0; - if (pExpr == NULL) { - printf("-----error in parse syntax:%s\n\n", sql); - assert(pResult == NULL); - return; - } - - tSQLBinaryExprToString(pExpr, str, &len); - printf("expr is: %s\n", str); - - SArray *result = NULL; - // tExprTreeTraverse(pExpr, pSkipList, result, SSkipListNodeFilterCallback, &result); - // printf("the result is:%lld\n", result.num); - // - // bool findResult = false; - // for (int32_t i = 0; i < result.num; ++i) { - // STabObj *pm = (STabObj *)result.pRes[i]; - // printf("meterid:%s,\t", pm->meterId); - // - // for (int32_t j = 0; j < pResult->numOfResult; ++j) { - // if (strcmp(pm->meterId, pResult->resultName[j]) == 0) { - // findResult = true; - // break; - // } - // } - // assert(findResult == true); - // findResult = false; - // } - - printf("\n\n"); - tExprTreeDestroy(&pExpr, NULL); -} +//static void testQueryStr(SSchema *schema, int32_t numOfCols, char *sql, SSkipList *pSkipList, ResultObj *pResult) { +// tExprNode *pExpr = NULL; +// tSQLBinaryExprFromString(&pExpr, schema, numOfCols, sql, strlen(sql)); +// +// char str[512] = {0}; +// int32_t len = 0; +// if (pExpr == NULL) { +// printf("-----error in parse syntax:%s\n\n", sql); +// assert(pResult == NULL); +// return; +// } +// +// tSQLBinaryExprToString(pExpr, str, &len); +// printf("expr is: %s\n", str); +// +// SArray *result = NULL; +// // tExprTreeTraverse(pExpr, pSkipList, result, SSkipListNodeFilterCallback, &result); +// // printf("the result is:%lld\n", result.num); +// // +// // bool findResult = false; +// // for (int32_t i = 0; i < result.num; ++i) { +// // STabObj *pm = (STabObj *)result.pRes[i]; +// // printf("meterid:%s,\t", pm->meterId); +// // +// // for (int32_t j = 0; j < pResult->numOfResult; ++j) { +// // if (strcmp(pm->meterId, pResult->resultName[j]) == 0) { +// // findResult = true; +// // break; +// // } +// // } +// // assert(findResult == true); +// // findResult = false; +// // } +// +// printf("\n\n"); +// tExprTreeDestroy(&pExpr, NULL); +//} +#if 0 static void Left2RightTest(SSchema *schema, int32_t numOfCols, SSkipList *pSkipList) { char str[256] = {0}; @@ -632,4 +630,5 @@ void exprSerializeTest2() { } // namespace TEST(testCase, astTest) { // exprSerializeTest2(); -} \ No newline at end of file +} +#endif \ No newline at end of file diff --git a/src/rpc/inc/rpcLog.h b/src/rpc/inc/rpcLog.h index 0504ddac43..f0f5c84ff9 100644 --- a/src/rpc/inc/rpcLog.h +++ b/src/rpc/inc/rpcLog.h @@ -31,9 +31,7 @@ extern int32_t tscEmbedded; #define tInfo(...) { if (rpcDebugFlag & DEBUG_INFO) { taosPrintLog("RPC INFO ", tscEmbedded ? 255 : rpcDebugFlag, __VA_ARGS__); }} #define tDebug(...) { if (rpcDebugFlag & DEBUG_DEBUG) { taosPrintLog("RPC DEBUG ", rpcDebugFlag, __VA_ARGS__); }} #define tTrace(...) { if (rpcDebugFlag & DEBUG_TRACE) { taosPrintLog("RPC TRACE ", rpcDebugFlag, __VA_ARGS__); }} - -#define tDebugDump(x, y) { if (rpcDebugFlag & DEBUG_DEBUG) { taosDumpData((unsigned char *)x, y); }} -#define tTraceDump(x, y) { if (rpcDebugFlag & DEBUG_TRACE) { taosDumpData((unsigned char *)x, y); }} +#define tDump(x, y) { if (rpcDebugFlag & DEBUG_DUMP) { taosDumpData((unsigned char *)x, y); }} #ifdef __cplusplus } diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c index c05c8c76e1..5d67d5e615 100644 --- a/src/rpc/src/rpcMain.c +++ b/src/rpc/src/rpcMain.c @@ -538,6 +538,7 @@ void rpcCancelRequest(void *handle) { if (pContext->pConn) { tDebug("%s, app trys to cancel request", pContext->pConn->info); + pContext->pConn->pReqMsg = NULL; rpcCloseConn(pContext->pConn); pContext->pConn = NULL; rpcFreeCont(pContext->pCont); @@ -602,8 +603,13 @@ static void rpcReleaseConn(SRpcConn *pConn) { rpcFreeMsg(pConn->pRspMsg); // it may have a response msg saved, but not request msg pConn->pRspMsg = NULL; - if (pConn->pReqMsg) rpcFreeCont(pConn->pReqMsg); - } + // if server has ever reported progress, free content + if (pConn->pReqMsg) rpcFreeCont(pConn->pReqMsg); // do not use rpcFreeMsg + } else { + // if there is an outgoing message, free it + if (pConn->outType && pConn->pReqMsg) + rpcFreeMsg(pConn->pReqMsg); + } // memset could not be used, since lockeBy can not be reset pConn->inType = 0; @@ -959,6 +965,7 @@ static void rpcProcessBrokenLink(SRpcConn *pConn) { if (pConn->outType) { SRpcReqContext *pContext = pConn->pContext; pContext->code = TSDB_CODE_RPC_NETWORK_UNAVAIL; + pConn->pReqMsg = NULL; taosTmrStart(rpcProcessConnError, 0, pContext, pRpc->tmrCtrl); } @@ -973,7 +980,7 @@ static void *rpcProcessMsgFromPeer(SRecvInfo *pRecv) { SRpcInfo *pRpc = (SRpcInfo *)pRecv->shandle; SRpcConn *pConn = (SRpcConn *)pRecv->thandle; - tTraceDump(pRecv->msg, pRecv->msgLen); + tDump(pRecv->msg, pRecv->msgLen); // underlying UDP layer does not know it is server or client pRecv->connType = pRecv->connType | pRpc->connType; @@ -1061,6 +1068,7 @@ static void rpcProcessIncomingMsg(SRpcConn *pConn, SRpcHead *pHead) { SRpcReqContext *pContext = pConn->pContext; rpcMsg.handle = pContext; pConn->pContext = NULL; + pConn->pReqMsg = NULL; // for UDP, port may be changed by server, the port in ipSet shall be used for cache if (pHead->code != TSDB_CODE_RPC_TOO_SLOW) { @@ -1247,7 +1255,7 @@ static void rpcSendMsgToPeer(SRpcConn *pConn, void *msg, int msgLen) { tError("%s, failed to send, msgLen:%d written:%d, reason:%s", pConn->info, msgLen, writtenLen, strerror(errno)); } - tTraceDump(msg, msgLen); + tDump(msg, msgLen); } static void rpcProcessConnError(void *param, void *id) { @@ -1297,6 +1305,7 @@ static void rpcProcessRetryTimer(void *param, void *tmrId) { tDebug("%s, failed to send msg:%s to %s:%hu", pConn->info, taosMsg[pConn->outType], pConn->peerFqdn, pConn->peerPort); if (pConn->pContext) { pConn->pContext->code = TSDB_CODE_RPC_NETWORK_UNAVAIL; + pConn->pReqMsg = NULL; taosTmrStart(rpcProcessConnError, 0, pConn->pContext, pRpc->tmrCtrl); rpcReleaseConn(pConn); } @@ -1357,6 +1366,7 @@ static int32_t rpcCompressRpcMsg(char* pCont, int32_t contLen) { } int32_t compLen = LZ4_compress_default(pCont, buf, contLen, contLen + overhead); + tDebug("compress rpc msg, before:%d, after:%d, overhead:%d", contLen, compLen, overhead); /* * only the compressed size is less than the value of contLen - overhead, the compression is applied @@ -1369,7 +1379,7 @@ static int32_t rpcCompressRpcMsg(char* pCont, int32_t contLen) { memcpy(pCont + overhead, buf, compLen); pHead->comp = 1; - //tDebug("compress rpc msg, before:%d, after:%d", contLen, compLen); + tDebug("compress rpc msg, before:%d, after:%d", contLen, compLen); finalLen = compLen + overhead; } else { finalLen = contLen; diff --git a/src/rpc/test/rclient.c b/src/rpc/test/rclient.c index e51b54e299..f8dbbedb11 100644 --- a/src/rpc/test/rclient.c +++ b/src/rpc/test/rclient.c @@ -156,6 +156,7 @@ int main(int argc, char *argv[]) { } tInfo("client is initialized"); + tInfo("threads:%d msgSize:%d requests:%d", appThreads, msgSize, numOfReqs); gettimeofday(&systemTime, NULL); startTime = systemTime.tv_sec*1000000 + systemTime.tv_usec; diff --git a/src/rpc/test/rserver.c b/src/rpc/test/rserver.c index 1ac9409a57..d06e9df64b 100644 --- a/src/rpc/test/rserver.c +++ b/src/rpc/test/rserver.c @@ -24,23 +24,21 @@ int msgSize = 128; int commit = 0; int dataFd = -1; void *qhandle = NULL; +void *qset = NULL; void processShellMsg() { static int num = 0; taos_qall qall; SRpcMsg *pRpcMsg, rpcMsg; int type; + void *pvnode; qall = taosAllocateQall(); while (1) { - int numOfMsgs = taosReadAllQitems(qhandle, qall); - if (numOfMsgs <= 0) { - usleep(100); - continue; - } - + int numOfMsgs = taosReadAllQitemsFromQset(qset, qall, &pvnode); tDebug("%d shell msgs are received", numOfMsgs); + if (numOfMsgs <= 0) break; for (int i=0; i schemaVersion(*(STSchema **)key2)) { + return 1; + } else { + return 0; + } +} + +static FORCE_INLINE STSchema* tsdbGetTableSchemaImpl(STable* pTable, bool lock, bool copy, int16_t version) { + STable* pDTable = (TABLE_TYPE(pTable) == TSDB_CHILD_TABLE) ? pTable->pSuper : pTable; + STSchema* pSchema = NULL; + STSchema* pTSchema = NULL; + + if (lock) taosRLockLatch(&(pDTable->latch)); + if (version < 0) { // get the latest version of schema + pTSchema = pDTable->schema[pDTable->numOfSchemas - 1]; + } else { // get the schema with version + void* ptr = taosbsearch(&version, pDTable->schema, pDTable->numOfSchemas, sizeof(STSchema*), + tsdbCompareSchemaVersion, TD_EQ); + if (ptr == NULL) { + terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION; + goto _exit; + } + pTSchema = *(STSchema**)ptr; + } + + ASSERT(pTSchema != NULL); + + if (copy) { + if ((pSchema = tdDupSchema(pTSchema)) == NULL) terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + } else { + pSchema = pTSchema; + } + +_exit: + if (lock) taosRUnLockLatch(&(pDTable->latch)); + return pSchema; +} + +static FORCE_INLINE STSchema* tsdbGetTableSchema(STable* pTable) { + return tsdbGetTableSchemaImpl(pTable, false, false, -1); +} + +static FORCE_INLINE STSchema *tsdbGetTableTagSchema(STable *pTable) { + if (pTable->type == TSDB_CHILD_TABLE) { // check child table first + STable *pSuper = pTable->pSuper; + if (pSuper == NULL) return NULL; + return pSuper->tagSchema; + } else if (pTable->type == TSDB_SUPER_TABLE) { + return pTable->tagSchema; + } else { + return NULL; + } +} // ------------------ tsdbBuffer.c STsdbBufPool* tsdbNewBufPool(); @@ -383,8 +437,9 @@ int tsdbLoadCompIdx(SRWHelper* pHelper, void* target); int tsdbLoadCompInfo(SRWHelper* pHelper, void* target); int tsdbLoadCompData(SRWHelper* phelper, SCompBlock* pcompblock, void* target); void tsdbGetDataStatis(SRWHelper* pHelper, SDataStatis* pStatis, int numOfCols); -int tsdbLoadBlockDataCols(SRWHelper* pHelper, SCompBlock* pCompBlock, int16_t* colIds, int numOfColIds); -int tsdbLoadBlockData(SRWHelper* pHelper, SCompBlock* pCompBlock); +int tsdbLoadBlockDataCols(SRWHelper* pHelper, SCompBlock* pCompBlock, SCompInfo* pCompInfo, int16_t* colIds, + int numOfColIds); +int tsdbLoadBlockData(SRWHelper* pHelper, SCompBlock* pCompBlock, SCompInfo* pCompInfo); // ------------------ tsdbMain.c #define REPO_ID(r) (r)->config.tsdbId diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c index 4b9e977a1b..6b31600705 100644 --- a/src/tsdb/src/tsdbMain.c +++ b/src/tsdb/src/tsdbMain.c @@ -41,9 +41,9 @@ typedef struct { } SSubmitBlkIter; typedef struct { - int32_t totalLen; - int32_t len; - SSubmitBlk *pBlock; + int32_t totalLen; + int32_t len; + void * pMsg; } SSubmitMsgIter; static int32_t tsdbCheckAndSetDefaultCfg(STsdbCfg *pCfg); @@ -56,7 +56,7 @@ static STsdbRepo * tsdbNewRepo(char *rootDir, STsdbAppH *pAppH, STsdbCfg *pCfg); static void tsdbFreeRepo(STsdbRepo *pRepo); static int tsdbInitSubmitMsgIter(SSubmitMsg *pMsg, SSubmitMsgIter *pIter); static int32_t tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, TSKEY now, int32_t *affectedrows); -static SSubmitBlk *tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter); +static int tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter, SSubmitBlk **pPBlock); static SDataRow tsdbGetSubmitBlkNext(SSubmitBlkIter *pIter); static int tsdbRestoreInfo(STsdbRepo *pRepo); static int tsdbInitSubmitBlkIter(SSubmitBlk *pBlock, SSubmitBlkIter *pIter); @@ -68,6 +68,7 @@ static int keyFGroupCompFunc(const void *key, const void *fgroup); static int tsdbEncodeCfg(void **buf, STsdbCfg *pCfg); static void * tsdbDecodeCfg(void *buf, STsdbCfg *pCfg); static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pTable); +static int tsdbScanAndConvertSubmitMsg(STsdbRepo *pRepo, SSubmitMsg *pMsg); // Function declaration int32_t tsdbCreateRepo(char *rootDir, STsdbCfg *pCfg) { @@ -164,6 +165,13 @@ int32_t tsdbInsertData(TSDB_REPO_T *repo, SSubmitMsg *pMsg, SShellSubmitRspMsg * STsdbRepo * pRepo = (STsdbRepo *)repo; SSubmitMsgIter msgIter = {0}; + if (tsdbScanAndConvertSubmitMsg(pRepo, pMsg) < 0) { + if (terrno != TSDB_CODE_TDB_TABLE_RECONFIGURE) { + tsdbError("vgId:%d failed to insert data since %s", REPO_ID(pRepo), tstrerror(terrno)); + } + return -1; + } + if (tsdbInitSubmitMsgIter(pMsg, &msgIter) < 0) { tsdbError("vgId:%d failed to insert data since %s", REPO_ID(pRepo), tstrerror(terrno)); return -1; @@ -173,12 +181,14 @@ int32_t tsdbInsertData(TSDB_REPO_T *repo, SSubmitMsg *pMsg, SShellSubmitRspMsg * int32_t affectedrows = 0; TSKEY now = taosGetTimestamp(pRepo->config.precision); - - while ((pBlock = tsdbGetSubmitMsgNext(&msgIter)) != NULL) { + while (true) { + tsdbGetSubmitMsgNext(&msgIter, &pBlock); + if (pBlock == NULL) break; if (tsdbInsertDataToTable(pRepo, pBlock, now, &affectedrows) < 0) { return -1; } } + if (pRsp != NULL) pRsp->affectedRows = htonl(affectedrows); return 0; } @@ -263,7 +273,7 @@ void tsdbStartStream(TSDB_REPO_T *repo) { STable *pTable = pMeta->tables[i]; if (pTable && pTable->type == TSDB_STREAM_TABLE) { pTable->cqhandle = (*pRepo->appH.cqCreateFunc)(pRepo->appH.cqH, TABLE_UID(pTable), TABLE_TID(pTable), pTable->sql, - tsdbGetTableSchema(pTable)); + tsdbGetTableSchemaImpl(pTable, false, false, -1)); } } } @@ -694,17 +704,12 @@ static int tsdbInitSubmitMsgIter(SSubmitMsg *pMsg, SSubmitMsgIter *pIter) { return -1; } - pMsg->length = htonl(pMsg->length); - pMsg->numOfBlocks = htonl(pMsg->numOfBlocks); - pMsg->compressed = htonl(pMsg->compressed); - pIter->totalLen = pMsg->length; - pIter->len = TSDB_SUBMIT_MSG_HEAD_SIZE; + pIter->len = 0; + pIter->pMsg = pMsg; if (pMsg->length <= TSDB_SUBMIT_MSG_HEAD_SIZE) { terrno = TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP; return -1; - } else { - pIter->pBlock = pMsg->blocks; } return 0; @@ -714,26 +719,8 @@ static int32_t tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, TSKEY STsdbMeta *pMeta = pRepo->tsdbMeta; int64_t points = 0; - STable *pTable = tsdbGetTableByUid(pMeta, pBlock->uid); - if (pTable == NULL || TABLE_TID(pTable) != pBlock->tid) { - tsdbError("vgId:%d failed to get table to insert data, uid %" PRIu64 " tid %d", REPO_ID(pRepo), pBlock->uid, - pBlock->tid); - terrno = TSDB_CODE_TDB_INVALID_TABLE_ID; - return -1; - } - - if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) { - tsdbError("vgId:%d invalid action trying to insert a super table %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable)); - terrno = TSDB_CODE_TDB_INVALID_ACTION; - return -1; - } - - // Check schema version and update schema if needed - if (tsdbCheckTableSchema(pRepo, pBlock, pTable) < 0) { - tsdbError("vgId:%d failed to insert data to table %s since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), - tstrerror(terrno)); - return -1; - } + STable *pTable = pMeta->tables[pBlock->tid]; + ASSERT(pTable != NULL && TABLE_UID(pTable) == pBlock->uid); SSubmitBlkIter blkIter = {0}; SDataRow row = NULL; @@ -764,27 +751,23 @@ static int32_t tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, TSKEY return 0; } -static SSubmitBlk *tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter) { - SSubmitBlk *pBlock = pIter->pBlock; - if (pBlock == NULL) return NULL; - - pBlock->dataLen = htonl(pBlock->dataLen); - pBlock->schemaLen = htonl(pBlock->schemaLen); - pBlock->numOfRows = htons(pBlock->numOfRows); - pBlock->uid = htobe64(pBlock->uid); - pBlock->tid = htonl(pBlock->tid); - - pBlock->sversion = htonl(pBlock->sversion); - pBlock->padding = htonl(pBlock->padding); - - pIter->len = pIter->len + sizeof(SSubmitBlk) + pBlock->dataLen; - if (pIter->len >= pIter->totalLen) { - pIter->pBlock = NULL; +static int tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter, SSubmitBlk **pPBlock) { + if (pIter->len == 0) { + pIter->len += TSDB_SUBMIT_MSG_HEAD_SIZE; } else { - pIter->pBlock = (SSubmitBlk *)((char *)pBlock + pBlock->dataLen + sizeof(SSubmitBlk)); + SSubmitBlk *pSubmitBlk = (SSubmitBlk *)POINTER_SHIFT(pIter->pMsg, pIter->len); + pIter->len += (sizeof(SSubmitBlk) + pSubmitBlk->dataLen + pSubmitBlk->schemaLen); } - return pBlock; + if (pIter->len > pIter->totalLen) { + terrno = TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP; + *pPBlock = NULL; + return -1; + } + + *pPBlock = (pIter->len == pIter->totalLen) ? NULL : (SSubmitBlk *)POINTER_SHIFT(pIter->pMsg, pIter->len); + + return 0; } static SDataRow tsdbGetSubmitBlkNext(SSubmitBlkIter *pIter) { @@ -969,42 +952,64 @@ static void *tsdbDecodeCfg(void *buf, STsdbCfg *pCfg) { static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pTable) { ASSERT(pTable != NULL); - STSchema *pSchema = tsdbGetTableSchema(pTable); + STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1); int sversion = schemaVersion(pSchema); - if (pBlock->sversion == sversion) return 0; - if (pBlock->sversion > sversion) { // need to config - tsdbDebug("vgId:%d table %s tid %d has version %d smaller than client version %d, try to config", REPO_ID(pRepo), - TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), sversion, pBlock->sversion); - if (pRepo->appH.configFunc) { - void *msg = (*pRepo->appH.configFunc)(REPO_ID(pRepo), TABLE_TID(pTable)); - if (msg == NULL) { - tsdbError("vgId:%d failed to config table %s tid %d since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), - TABLE_TID(pTable), tstrerror(terrno)); - return -1; - } - - STableCfg *pTableCfg = tsdbCreateTableCfgFromMsg(msg); - if (pTableCfg == NULL) { - rpcFreeCont(msg); - return -1; - } - - if (tsdbUpdateTable(pRepo, (TABLE_TYPE(pTable) == TSDB_CHILD_TABLE) ? pTable->pSuper : pTable, pTableCfg) < 0) { - tsdbError("vgId:%d failed to update table %s since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), - tstrerror(terrno)); - tsdbClearTableCfg(pTableCfg); - rpcFreeCont(msg); - return -1; - } - tsdbClearTableCfg(pTableCfg); - rpcFreeCont(msg); - } else { + if (pBlock->sversion == sversion) { + return 0; + } else { + if (TABLE_TYPE(pTable) == TSDB_STREAM_TABLE) { // stream table is not allowed to change schema terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION; return -1; } + } + + if (pBlock->sversion > sversion) { // may need to update table schema + if (pBlock->schemaLen > 0) { + tsdbDebug( + "vgId:%d table %s tid %d uid %" PRIu64 " schema version %d is out of data, client version %d, update...", + REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable), sversion, pBlock->sversion); + ASSERT(pBlock->schemaLen % sizeof(STColumn) == 0); + int numOfCols = pBlock->schemaLen / sizeof(STColumn); + STColumn *pTCol = (STColumn *)pBlock->data; + + STSchemaBuilder schemaBuilder = {0}; + if (tdInitTSchemaBuilder(&schemaBuilder, pBlock->sversion) < 0) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + tsdbError("vgId:%d failed to update schema of table %s since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), + tstrerror(terrno)); + return -1; + } + + for (int i = 0; i < numOfCols; i++) { + if (tdAddColToSchema(&schemaBuilder, pTCol[i].type, htons(pTCol[i].colId), htons(pTCol[i].bytes)) < 0) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + tsdbError("vgId:%d failed to update schema of table %s since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), + tstrerror(terrno)); + tdDestroyTSchemaBuilder(&schemaBuilder); + return -1; + } + } + + STSchema *pNSchema = tdGetSchemaFromBuilder(&schemaBuilder); + if (pNSchema == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + tdDestroyTSchemaBuilder(&schemaBuilder); + return -1; + } + + tdDestroyTSchemaBuilder(&schemaBuilder); + tsdbUpdateTableSchema(pRepo, pTable, pNSchema, true); + } else { + tsdbDebug( + "vgId:%d table %s tid %d uid %" PRIu64 " schema version %d is out of data, client version %d, reconfigure...", + REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable), sversion, pBlock->sversion); + terrno = TSDB_CODE_TDB_TABLE_RECONFIGURE; + return -1; + } } else { - if (tsdbGetTableSchemaByVersion(pTable, pBlock->sversion) == NULL) { + ASSERT(pBlock->sversion >= 0); + if (tsdbGetTableSchemaImpl(pTable, false, false, pBlock->sversion) == NULL) { tsdbError("vgId:%d invalid submit schema version %d to table %s tid %d from client", REPO_ID(pRepo), pBlock->sversion, TABLE_CHAR_NAME(pTable), TABLE_TID(pTable)); } @@ -1013,7 +1018,64 @@ static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pT } return 0; - } +} + +static int tsdbScanAndConvertSubmitMsg(STsdbRepo *pRepo, SSubmitMsg *pMsg) { + ASSERT(pMsg != NULL); + STsdbMeta * pMeta = pRepo->tsdbMeta; + SSubmitMsgIter msgIter = {0}; + SSubmitBlk * pBlock = NULL; + + terrno = TSDB_CODE_SUCCESS; + pMsg->length = htonl(pMsg->length); + pMsg->numOfBlocks = htonl(pMsg->numOfBlocks); + + if (tsdbInitSubmitMsgIter(pMsg, &msgIter) < 0) return -1; + while (true) { + if (tsdbGetSubmitMsgNext(&msgIter, &pBlock) < 0) return -1; + if (pBlock == NULL) break; + + pBlock->uid = htobe64(pBlock->uid); + pBlock->tid = htonl(pBlock->tid); + pBlock->sversion = htonl(pBlock->sversion); + pBlock->dataLen = htonl(pBlock->dataLen); + pBlock->schemaLen = htonl(pBlock->schemaLen); + pBlock->numOfRows = htons(pBlock->numOfRows); + + if (pBlock->tid <= 0 || pBlock->tid >= pRepo->config.maxTables) { + tsdbError("vgId:%d failed to get table to insert data, uid %" PRIu64 " tid %d", REPO_ID(pRepo), pBlock->uid, + pBlock->tid); + terrno = TSDB_CODE_TDB_INVALID_TABLE_ID; + return -1; + } + + STable *pTable = pMeta->tables[pBlock->tid]; + if (pTable == NULL || TABLE_UID(pTable) != pBlock->uid) { + tsdbError("vgId:%d failed to get table to insert data, uid %" PRIu64 " tid %d", REPO_ID(pRepo), pBlock->uid, + pBlock->tid); + terrno = TSDB_CODE_TDB_INVALID_TABLE_ID; + return -1; + } + + if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) { + tsdbError("vgId:%d invalid action trying to insert a super table %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable)); + terrno = TSDB_CODE_TDB_INVALID_ACTION; + return -1; + } + + // Check schema version and update schema if needed + if (tsdbCheckTableSchema(pRepo, pBlock, pTable) < 0) { + if (terrno == TSDB_CODE_TDB_TABLE_RECONFIGURE) { + continue; + } else { + return -1; + } + } + } + + if (terrno != TSDB_CODE_SUCCESS) return -1; + return 0; +} static int tsdbAlterCacheTotalBlocks(STsdbRepo *pRepo, int totalBlocks) { // TODO diff --git a/src/tsdb/src/tsdbMemTable.c b/src/tsdb/src/tsdbMemTable.c index d3b9081a36..675e44f458 100644 --- a/src/tsdb/src/tsdbMemTable.c +++ b/src/tsdb/src/tsdbMemTable.c @@ -119,7 +119,8 @@ int tsdbInsertRowToMem(STsdbRepo *pRepo, SDataRow row, STable *pTable) { int tsdbRefMemTable(STsdbRepo *pRepo, SMemTable *pMemTable) { if (pMemTable == NULL) return 0; - T_REF_INC(pMemTable); + int ref = T_REF_INC(pMemTable); + tsdbDebug("vgId:%d ref memtable %p ref %d", REPO_ID(pRepo), pMemTable, ref); return 0; } @@ -127,7 +128,9 @@ int tsdbRefMemTable(STsdbRepo *pRepo, SMemTable *pMemTable) { int tsdbUnRefMemTable(STsdbRepo *pRepo, SMemTable *pMemTable) { if (pMemTable == NULL) return 0; - if (T_REF_DEC(pMemTable) == 0) { + int ref = T_REF_DEC(pMemTable); + tsdbDebug("vgId:%d unref memtable %p ref %d", REPO_ID(pRepo), pMemTable, ref); + if (ref == 0) { STsdbCfg * pCfg = &pRepo->config; STsdbBufPool *pBufPool = pRepo->pPool; @@ -167,6 +170,7 @@ int tsdbTakeMemSnapshot(STsdbRepo *pRepo, SMemTable **pMem, SMemTable **pIMem) { tsdbRefMemTable(pRepo, *pIMem); if (tsdbUnlockRepo(pRepo) < 0) return -1; + tsdbDebug("vgId:%d take memory snapshot, pMem %p pIMem %p", REPO_ID(pRepo), *pMem, *pIMem); return 0; } @@ -538,10 +542,12 @@ static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHe SCommitIter *pIter = iters + tid; if (pIter->pTable == NULL) continue; + taosRLockLatch(&(pIter->pTable->latch)); + tsdbSetHelperTable(pHelper, pIter->pTable, pRepo); if (pIter->pIter != NULL) { - tdInitDataCols(pDataCols, tsdbGetTableSchema(pIter->pTable)); + tdInitDataCols(pDataCols, tsdbGetTableSchemaImpl(pIter->pTable, false, false, -1)); int maxRowsToRead = pCfg->maxRowsPerFileBlock * 4 / 5; int nLoop = 0; @@ -557,6 +563,7 @@ static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHe int rowsWritten = tsdbWriteDataBlock(pHelper, pDataCols); ASSERT(rowsWritten != 0); if (rowsWritten < 0) { + taosRUnLockLatch(&(pIter->pTable->latch)); tsdbError("vgId:%d failed to write data block to table %s tid %d uid %" PRIu64 " since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pIter->pTable), TABLE_TID(pIter->pTable), TABLE_UID(pIter->pTable), tstrerror(terrno)); @@ -571,6 +578,8 @@ static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHe ASSERT(pDataCols->numOfRows == 0); } + taosRUnLockLatch(&(pIter->pTable->latch)); + // Move the last block to the new .l file if neccessary if (tsdbMoveLastBlockIfNeccessary(pHelper) < 0) { tsdbError("vgId:%d, failed to move last block, since %s", REPO_ID(pRepo), tstrerror(terrno)); @@ -680,10 +689,10 @@ static int tsdbReadRowsFromCache(STsdbMeta *pMeta, STable *pTable, SSkipListIter if (dataRowKey(row) > maxKey) break; if (pSchema == NULL || schemaVersion(pSchema) != dataRowVersion(row)) { - pSchema = tsdbGetTableSchemaByVersion(pTable, dataRowVersion(row)); + pSchema = tsdbGetTableSchemaImpl(pTable, true, false, dataRowVersion(row)); if (pSchema == NULL) { // TODO: deal with the error here - ASSERT(false); + ASSERT(0); } } diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c index cbbf51d862..84c1c8e7d1 100644 --- a/src/tsdb/src/tsdbMeta.c +++ b/src/tsdb/src/tsdbMeta.c @@ -29,10 +29,9 @@ static void tsdbOrgMeta(void *pHandle); static char * getTagIndexKey(const void *pData); static STable *tsdbNewTable(STableCfg *pCfg, bool isSuper); static void tsdbFreeTable(STable *pTable); -static int tsdbUpdateTableTagSchema(STable *pTable, STSchema *newSchema); -static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx); +static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx, bool lock); static void tsdbRemoveTableFromMeta(STsdbRepo *pRepo, STable *pTable, bool rmFromIdx, bool lock); -static int tsdbAddTableIntoIndex(STsdbMeta *pMeta, STable *pTable); +static int tsdbAddTableIntoIndex(STsdbMeta *pMeta, STable *pTable, bool refSuper); static int tsdbRemoveTableFromIndex(STsdbMeta *pMeta, STable *pTable); static int tsdbInitTableCfg(STableCfg *config, ETableType type, uint64_t uid, int32_t tid); static int tsdbTableSetSchema(STableCfg *config, STSchema *pSchema, bool dup); @@ -76,7 +75,7 @@ int tsdbCreateTable(TSDB_REPO_T *repo, STableCfg *pCfg) { // TODO if (super->type != TSDB_SUPER_TABLE) return -1; if (super->tableId.uid != pCfg->superUid) return -1; - tsdbUpdateTable(pRepo, super, pCfg); + // tsdbUpdateTable(pRepo, super, pCfg); } } @@ -84,10 +83,18 @@ int tsdbCreateTable(TSDB_REPO_T *repo, STableCfg *pCfg) { if (table == NULL) goto _err; // Register to meta + tsdbWLockRepoMeta(pRepo); if (newSuper) { - if (tsdbAddTableToMeta(pRepo, super, true) < 0) goto _err; + if (tsdbAddTableToMeta(pRepo, super, true, false) < 0) { + tsdbUnlockRepoMeta(pRepo); + goto _err; + } } - if (tsdbAddTableToMeta(pRepo, table, true) < 0) goto _err; + if (tsdbAddTableToMeta(pRepo, table, true, false) < 0) { + tsdbUnlockRepoMeta(pRepo); + goto _err; + } + tsdbUnlockRepoMeta(pRepo); // Write to memtable action int tlen1 = (newSuper) ? tsdbGetTableEncodeSize(TSDB_UPDATE_META, super) : 0; @@ -185,11 +192,6 @@ char *tsdbGetTableName(void* pTable) { } } -STableId tsdbGetTableId(void *pTable) { - assert(pTable); - return ((STable*)pTable)->tableId; -} - STableCfg *tsdbCreateTableCfgFromMsg(SMDCreateTableMsg *pMsg) { if (pMsg == NULL) return NULL; @@ -255,7 +257,7 @@ _err: return NULL; } -static int32_t colIdCompar(const void* left, const void* right) { +static UNUSED_FUNC int32_t colIdCompar(const void* left, const void* right) { int16_t colId = *(int16_t*) left; STColumn* p2 = (STColumn*) right; @@ -266,89 +268,118 @@ static int32_t colIdCompar(const void* left, const void* right) { return (colId < p2->colId)? -1:1; } -int tsdbUpdateTagValue(TSDB_REPO_T *repo, SUpdateTableTagValMsg *pMsg) { +int tsdbUpdateTableTagValue(TSDB_REPO_T *repo, SUpdateTableTagValMsg *pMsg) { STsdbRepo *pRepo = (STsdbRepo *)repo; STsdbMeta *pMeta = pRepo->tsdbMeta; + STSchema * pNewSchema = NULL; pMsg->uid = htobe64(pMsg->uid); pMsg->tid = htonl(pMsg->tid); pMsg->tversion = htons(pMsg->tversion); pMsg->colId = htons(pMsg->colId); + pMsg->bytes = htons(pMsg->bytes); pMsg->tagValLen = htonl(pMsg->tagValLen); pMsg->numOfTags = htons(pMsg->numOfTags); pMsg->schemaLen = htonl(pMsg->schemaLen); - assert(pMsg->schemaLen == sizeof(STColumn) * pMsg->numOfTags); - - char* d = pMsg->data; - for(int32_t i = 0; i < pMsg->numOfTags; ++i) { - STColumn* pCol = (STColumn*) d; - pCol->colId = htons(pCol->colId); - pCol->bytes = htons(pCol->bytes); - pCol->offset = 0; - - d += sizeof(STColumn); + for (int i = 0; i < pMsg->numOfTags; i++) { + STColumn *pTCol = (STColumn *)pMsg->data + i; + pTCol->bytes = htons(pTCol->bytes); + pTCol->colId = htons(pTCol->colId); } STable *pTable = tsdbGetTableByUid(pMeta, pMsg->uid); - if (pTable == NULL) { - terrno = TSDB_CODE_TDB_INVALID_TABLE_ID; - return -1; - } - if (TABLE_TID(pTable) != pMsg->tid) { + if (pTable == NULL || TABLE_TID(pTable) != pMsg->tid) { + tsdbError("vgId:%d failed to update table tag value since invalid table id %d uid %" PRIu64, REPO_ID(pRepo), + pMsg->tid, pMsg->uid); terrno = TSDB_CODE_TDB_INVALID_TABLE_ID; return -1; } if (TABLE_TYPE(pTable) != TSDB_CHILD_TABLE) { - tsdbError("vgId:%d failed to update tag value of table %s since its type is %d", REPO_ID(pRepo), - TABLE_CHAR_NAME(pTable), TABLE_TYPE(pTable)); + tsdbError("vgId:%d try to update tag value of a non-child table, invalid action", REPO_ID(pRepo)); terrno = TSDB_CODE_TDB_INVALID_ACTION; return -1; } - if (schemaVersion(tsdbGetTableTagSchema(pTable)) < pMsg->tversion) { - tsdbDebug("vgId:%d server tag version %d is older than client tag version %d, try to config", REPO_ID(pRepo), - schemaVersion(tsdbGetTableTagSchema(pTable)), pMsg->tversion); - void *msg = (*pRepo->appH.configFunc)(pRepo->config.tsdbId, pMsg->tid); - if (msg == NULL) return -1; - - // Deal with error her - STableCfg *pTableCfg = tsdbCreateTableCfgFromMsg(msg); - STable * super = tsdbGetTableByUid(pMeta, pTableCfg->superUid); - ASSERT(super != NULL); - - int32_t code = tsdbUpdateTable(pRepo, super, pTableCfg); - if (code != TSDB_CODE_SUCCESS) { - tsdbClearTableCfg(pTableCfg); - return code; - } - tsdbClearTableCfg(pTableCfg); - rpcFreeCont(msg); - } - - STSchema *pTagSchema = tsdbGetTableTagSchema(pTable); - - if (schemaVersion(pTagSchema) > pMsg->tversion) { + if (schemaVersion(pTable->pSuper->tagSchema) > pMsg->tversion) { tsdbError( "vgId:%d failed to update tag value of table %s since version out of date, client tag version %d server tag " "version %d", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), pMsg->tversion, schemaVersion(pTable->tagSchema)); - return TSDB_CODE_TDB_TAG_VER_OUT_OF_DATE; + terrno = TSDB_CODE_TDB_TAG_VER_OUT_OF_DATE; + return -1; } - if (schemaColAt(pTagSchema, DEFAULT_TAG_INDEX_COLUMN)->colId == pMsg->colId) { + + if (schemaVersion(pTable->pSuper->tagSchema) < pMsg->tversion) { // tag schema out of data, + tsdbDebug("vgId:%d need to update tag schema of table %s tid %d uid %" PRIu64 + " since out of date, current version %d new version %d", + REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable), + schemaVersion(pTable->pSuper->tagSchema), pMsg->tversion); + + STSchemaBuilder schemaBuilder = {0}; + + STColumn *pTCol = (STColumn *)pMsg->data; + ASSERT(pMsg->schemaLen % sizeof(STColumn) == 0 && pTCol[0].colId == colColId(schemaColAt(pTable->pSuper->tagSchema, 0))); + if (tdInitTSchemaBuilder(&schemaBuilder, pMsg->tversion) < 0) { + tsdbDebug("vgId:%d failed to update tag schema of table %s tid %d uid %" PRIu64 " since out of memory", + REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable)); + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + return -1; + } + for (int i = 0; i < (pMsg->schemaLen / sizeof(STColumn)); i++) { + if (tdAddColToSchema(&schemaBuilder, pTCol[i].type, pTCol[i].colId, pTCol[i].bytes) < 0) { + tdDestroyTSchemaBuilder(&schemaBuilder); + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + return -1; + } + } + pNewSchema = tdGetSchemaFromBuilder(&schemaBuilder); + if (pNewSchema == NULL) { + tdDestroyTSchemaBuilder(&schemaBuilder); + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + return -1; + } + tdDestroyTSchemaBuilder(&schemaBuilder); + } + + // Chage in memory + if (pNewSchema != NULL) { // change super table tag schema + taosWLockLatch(&(pTable->pSuper->latch)); + STSchema *pOldSchema = pTable->pSuper->tagSchema; + pTable->pSuper->tagSchema = pNewSchema; + tdFreeSchema(pOldSchema); + taosWUnLockLatch(&(pTable->pSuper->latch)); + } + + bool isChangeIndexCol = (pMsg->colId == colColId(schemaColAt(pTable->pSuper->tagSchema, 0))); + // STColumn *pCol = bsearch(&(pMsg->colId), pMsg->data, pMsg->numOfTags, sizeof(STColumn), colIdCompar); + // ASSERT(pCol != NULL); + + if (isChangeIndexCol) { + tsdbWLockRepoMeta(pRepo); tsdbRemoveTableFromIndex(pMeta, pTable); } - // TODO: remove table from index if it is the first column of tag - - // TODO: convert the tag schema from client, and then extract the type and bytes from schema according to colId - STColumn* res = bsearch(&pMsg->colId, pMsg->data, pMsg->numOfTags, sizeof(STColumn), colIdCompar); - assert(res != NULL); - - tdSetKVRowDataOfCol(&pTable->tagVal, pMsg->colId, res->type, pMsg->data + pMsg->schemaLen); - if (schemaColAt(pTagSchema, DEFAULT_TAG_INDEX_COLUMN)->colId == pMsg->colId) { - tsdbAddTableIntoIndex(pMeta, pTable); + taosWLockLatch(&(pTable->latch)); + tdSetKVRowDataOfCol(&(pTable->tagVal), pMsg->colId, pMsg->type, POINTER_SHIFT(pMsg->data, pMsg->schemaLen)); + taosWUnLockLatch(&(pTable->latch)); + if (isChangeIndexCol) { + tsdbAddTableIntoIndex(pMeta, pTable, false); + tsdbUnlockRepoMeta(pRepo); } - return TSDB_CODE_SUCCESS; + + // Update on file + int tlen1 = (pNewSchema) ? tsdbGetTableEncodeSize(TSDB_UPDATE_META, pTable->pSuper) : 0; + int tlen2 = tsdbGetTableEncodeSize(TSDB_UPDATE_META, pTable); + void *buf = tsdbAllocBytes(pRepo, tlen1+tlen2); + ASSERT(buf != NULL); + if (pNewSchema) { + void *pBuf = tsdbInsertTableAct(pRepo, TSDB_UPDATE_META, buf, pTable->pSuper); + ASSERT(POINTER_DISTANCE(pBuf, buf) == tlen1); + buf = pBuf; + } + tsdbInsertTableAct(pRepo, TSDB_UPDATE_META, buf, pTable); + + return 0; } // ------------------ INTERNAL FUNCTIONS ------------------ @@ -449,18 +480,6 @@ int tsdbCloseMeta(STsdbRepo *pRepo) { return 0; } -STSchema *tsdbGetTableSchema(STable *pTable) { - if (pTable->type == TSDB_NORMAL_TABLE || pTable->type == TSDB_SUPER_TABLE || pTable->type == TSDB_STREAM_TABLE) { - return pTable->schema[pTable->numOfSchemas - 1]; - } else if (pTable->type == TSDB_CHILD_TABLE) { - STable *pSuper = pTable->pSuper; - if (pSuper == NULL) return NULL; - return pSuper->schema[pSuper->numOfSchemas - 1]; - } else { - return NULL; - } -} - STable *tsdbGetTableByUid(STsdbMeta *pMeta, uint64_t uid) { void *ptr = taosHashGet(pMeta->uidMap, (char *)(&uid), sizeof(uid)); @@ -470,68 +489,7 @@ STable *tsdbGetTableByUid(STsdbMeta *pMeta, uint64_t uid) { } STSchema *tsdbGetTableSchemaByVersion(STable *pTable, int16_t version) { - STable *pSearchTable = (pTable->type == TSDB_CHILD_TABLE) ? pTable->pSuper : pTable; - if (pSearchTable == NULL) return NULL; - - void *ptr = taosbsearch(&version, pSearchTable->schema, pSearchTable->numOfSchemas, sizeof(STSchema *), - tsdbCompareSchemaVersion, TD_EQ); - if (ptr == NULL) return NULL; - - return *(STSchema **)ptr; -} - -STSchema *tsdbGetTableTagSchema(STable *pTable) { - if (pTable->type == TSDB_SUPER_TABLE) { - return pTable->tagSchema; - } else if (pTable->type == TSDB_CHILD_TABLE) { - STable *pSuper = pTable->pSuper; - if (pSuper == NULL) return NULL; - return pSuper->tagSchema; - } else { - return NULL; - } -} - -int tsdbUpdateTable(STsdbRepo *pRepo, STable *pTable, STableCfg *pCfg) { - // TODO: this function can only be called when there is no query and commit on this table - ASSERT(TABLE_TYPE(pTable) != TSDB_CHILD_TABLE); - bool changed = false; - STsdbMeta *pMeta = pRepo->tsdbMeta; - - if ((pTable->type == TSDB_SUPER_TABLE) && (schemaVersion(pTable->tagSchema) < schemaVersion(pCfg->tagSchema))) { - if (tsdbUpdateTableTagSchema(pTable, pCfg->tagSchema) < 0) { - tsdbError("vgId:%d failed to update table %s tag schema since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), - tstrerror(terrno)); - return -1; - } - changed = true; - } - - STSchema *pTSchema = tsdbGetTableSchema(pTable); - if (schemaVersion(pTSchema) < schemaVersion(pCfg->schema)) { - if (pTable->numOfSchemas < TSDB_MAX_TABLE_SCHEMAS) { - pTable->schema[pTable->numOfSchemas++] = tdDupSchema(pCfg->schema); - } else { - ASSERT(pTable->numOfSchemas == TSDB_MAX_TABLE_SCHEMAS); - STSchema *tSchema = tdDupSchema(pCfg->schema); - tdFreeSchema(pTable->schema[0]); - memmove(pTable->schema, pTable->schema + 1, sizeof(STSchema *) * (TSDB_MAX_TABLE_SCHEMAS - 1)); - pTable->schema[pTable->numOfSchemas - 1] = tSchema; - } - - pMeta->maxRowBytes = MAX(pMeta->maxRowBytes, dataRowMaxBytesFromSchema(pCfg->schema)); - pMeta->maxCols = MAX(pMeta->maxCols, schemaNCols(pCfg->schema)); - - changed = true; - } - - if (changed) { - int tlen = tsdbGetTableEncodeSize(TSDB_UPDATE_META, pTable); - void *buf = tsdbAllocBytes(pRepo, tlen); - tsdbInsertTableAct(pRepo, TSDB_UPDATE_META, buf, pTable); - } - - return 0; + return tsdbGetTableSchemaImpl(pTable, true, false, version); } int tsdbWLockRepoMeta(STsdbRepo *pRepo) { @@ -575,7 +533,7 @@ void tsdbRefTable(STable *pTable) { void tsdbUnRefTable(STable *pTable) { int32_t ref = T_REF_DEC(pTable); - tsdbTrace("unref table uid:%"PRIu64", tid:%d, refCount:%d", TABLE_UID(pTable), TABLE_TID(pTable), ref); + tsdbDebug("unref table uid:%"PRIu64", tid:%d, refCount:%d", TABLE_UID(pTable), TABLE_TID(pTable), ref); if (ref == 0) { // tsdbDebug("destory table name:%s uid:%"PRIu64", tid:%d", TABLE_CHAR_NAME(pTable), TABLE_UID(pTable), TABLE_TID(pTable)); @@ -587,17 +545,36 @@ void tsdbUnRefTable(STable *pTable) { } } -// ------------------ LOCAL FUNCTIONS ------------------ -static int tsdbCompareSchemaVersion(const void *key1, const void *key2) { - if (*(int16_t *)key1 < schemaVersion(*(STSchema **)key2)) { - return -1; - } else if (*(int16_t *)key1 > schemaVersion(*(STSchema **)key2)) { - return 1; +void tsdbUpdateTableSchema(STsdbRepo *pRepo, STable *pTable, STSchema *pSchema, bool insertAct) { + ASSERT(TABLE_TYPE(pTable) != TSDB_STREAM_TABLE && TABLE_TYPE(pTable) != TSDB_SUPER_TABLE); + STsdbMeta *pMeta = pRepo->tsdbMeta; + + STable *pCTable = (TABLE_TYPE(pTable) == TSDB_CHILD_TABLE) ? pTable->pSuper : pTable; + ASSERT(schemaVersion(pSchema) > schemaVersion(pCTable->schema[pCTable->numOfSchemas - 1])); + + taosWLockLatch(&(pCTable->latch)); + if (pCTable->numOfSchemas < TSDB_MAX_TABLE_SCHEMAS) { + pCTable->schema[pCTable->numOfSchemas++] = pSchema; } else { - return 0; + ASSERT(pCTable->numOfSchemas == TSDB_MAX_TABLE_SCHEMAS); + tdFreeSchema(pCTable->schema[0]); + memmove(pCTable->schema, pCTable->schema + 1, sizeof(STSchema *) * (TSDB_MAX_TABLE_SCHEMAS - 1)); + pCTable->schema[pCTable->numOfSchemas - 1] = pSchema; + } + + if (schemaNCols(pSchema) > pMeta->maxCols) pMeta->maxCols = schemaNCols(pSchema); + if (schemaTLen(pSchema) > pMeta->maxRowBytes) pMeta->maxRowBytes = schemaTLen(pSchema); + taosWUnLockLatch(&(pCTable->latch)); + + if (insertAct) { + int tlen = tsdbGetTableEncodeSize(TSDB_UPDATE_META, pCTable); + void *buf = tsdbAllocBytes(pRepo, tlen); + ASSERT(buf != NULL); + tsdbInsertTableAct(pRepo, TSDB_UPDATE_META, buf, pTable); } } +// ------------------ LOCAL FUNCTIONS ------------------ static int tsdbRestoreTable(void *pHandle, void *cont, int contLen) { STsdbRepo *pRepo = (STsdbRepo *)pHandle; STable * pTable = NULL; @@ -609,7 +586,7 @@ static int tsdbRestoreTable(void *pHandle, void *cont, int contLen) { tsdbDecodeTable(cont, &pTable); - if (tsdbAddTableToMeta(pRepo, pTable, false) < 0) { + if (tsdbAddTableToMeta(pRepo, pTable, false, false) < 0) { tsdbFreeTable(pTable); return -1; } @@ -627,7 +604,7 @@ static void tsdbOrgMeta(void *pHandle) { for (int i = 1; i < pCfg->maxTables; i++) { STable *pTable = pMeta->tables[i]; if (pTable != NULL && pTable->type == TSDB_CHILD_TABLE) { - tsdbAddTableIntoIndex(pMeta, pTable); + tsdbAddTableIntoIndex(pMeta, pTable, true); } } } @@ -737,7 +714,7 @@ _err: static void tsdbFreeTable(STable *pTable) { if (pTable) { - tsdbDebug("table %s is destroyed", TABLE_CHAR_NAME(pTable)); + if (pTable->name != NULL) tsdbDebug("table %s is destroyed", TABLE_CHAR_NAME(pTable)); tfree(TABLE_NAME(pTable)); if (TABLE_TYPE(pTable) != TSDB_CHILD_TABLE) { for (int i = 0; i < TSDB_MAX_TABLE_SCHEMAS; i++) { @@ -757,25 +734,10 @@ static void tsdbFreeTable(STable *pTable) { } } -static int tsdbUpdateTableTagSchema(STable *pTable, STSchema *newSchema) { - ASSERT(pTable->type == TSDB_SUPER_TABLE); - ASSERT(schemaVersion(pTable->tagSchema) < schemaVersion(newSchema)); - STSchema *pOldSchema = pTable->tagSchema; - STSchema *pNewSchema = tdDupSchema(newSchema); - if (pNewSchema == NULL) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - return -1; - } - pTable->tagSchema = pNewSchema; - tdFreeSchema(pOldSchema); - - return 0; -} - -static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx) { +static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx, bool lock) { STsdbMeta *pMeta = pRepo->tsdbMeta; - if (addIdx && tsdbWLockRepoMeta(pRepo) < 0) { + if (lock && tsdbWLockRepoMeta(pRepo) < 0) { tsdbError("vgId:%d failed to add table %s to meta since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), tstrerror(terrno)); return -1; @@ -790,7 +752,7 @@ static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx) { } } else { if (TABLE_TYPE(pTable) == TSDB_CHILD_TABLE && addIdx) { // add STABLE to the index - if (tsdbAddTableIntoIndex(pMeta, pTable) < 0) { + if (tsdbAddTableIntoIndex(pMeta, pTable, true) < 0) { tsdbDebug("vgId:%d failed to add table %s to meta while add table to index since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), tstrerror(terrno)); goto _err; @@ -809,14 +771,15 @@ static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx) { } if (TABLE_TYPE(pTable) != TSDB_CHILD_TABLE) { - STSchema *pSchema = tsdbGetTableSchema(pTable); + STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1); if (schemaNCols(pSchema) > pMeta->maxCols) pMeta->maxCols = schemaNCols(pSchema); if (schemaTLen(pSchema) > pMeta->maxRowBytes) pMeta->maxRowBytes = schemaTLen(pSchema); } - if (addIdx && tsdbUnlockRepoMeta(pRepo) < 0) return -1; + if (lock && tsdbUnlockRepoMeta(pRepo) < 0) return -1; if (TABLE_TYPE(pTable) == TSDB_STREAM_TABLE && addIdx) { - pTable->cqhandle = (*pRepo->appH.cqCreateFunc)(pRepo->appH.cqH, TABLE_UID(pTable), TABLE_TID(pTable), pTable->sql, tsdbGetTableSchema(pTable)); + pTable->cqhandle = (*pRepo->appH.cqCreateFunc)(pRepo->appH.cqH, TABLE_UID(pTable), TABLE_TID(pTable), pTable->sql, + tsdbGetTableSchemaImpl(pTable, false, false, -1)); } tsdbTrace("vgId:%d table %s tid %d uid %" PRIu64 " is added to meta", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), @@ -825,7 +788,7 @@ static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx) { _err: tsdbRemoveTableFromMeta(pRepo, pTable, false, false); - if (addIdx) tsdbUnlockRepoMeta(pRepo); + if (lock) tsdbUnlockRepoMeta(pRepo); return -1; } @@ -836,7 +799,7 @@ static void tsdbRemoveTableFromMeta(STsdbRepo *pRepo, STable *pTable, bool rmFro STable * tTable = NULL; STsdbCfg * pCfg = &(pRepo->config); - STSchema *pSchema = tsdbGetTableSchema(pTable); + STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1); int maxCols = schemaNCols(pSchema); int maxRowBytes = schemaTLen(pSchema); @@ -870,7 +833,7 @@ static void tsdbRemoveTableFromMeta(STsdbRepo *pRepo, STable *pTable, bool rmFro for (int i = 0; i < pCfg->maxTables; i++) { STable *pTable = pMeta->tables[i]; if (pTable != NULL) { - pSchema = tsdbGetTableSchema(pTable); + pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1); maxCols = MAX(maxCols, schemaNCols(pSchema)); maxRowBytes = MAX(maxRowBytes, schemaTLen(pSchema)); } @@ -882,7 +845,7 @@ static void tsdbRemoveTableFromMeta(STsdbRepo *pRepo, STable *pTable, bool rmFro tsdbUnRefTable(pTable); } -static int tsdbAddTableIntoIndex(STsdbMeta *pMeta, STable *pTable) { +static int tsdbAddTableIntoIndex(STsdbMeta *pMeta, STable *pTable, bool refSuper) { ASSERT(pTable->type == TSDB_CHILD_TABLE && pTable != NULL); STable *pSTable = tsdbGetTableByUid(pMeta, TABLE_SUID(pTable)); ASSERT(pSTable != NULL); @@ -906,7 +869,7 @@ static int tsdbAddTableIntoIndex(STsdbMeta *pMeta, STable *pTable) { memcpy(SL_GET_NODE_DATA(pNode), &pTable, sizeof(STable *)); tSkipListPut(pSTable->pIndex, pNode); - T_REF_INC(pSTable); + if (refSuper) T_REF_INC(pSTable); return 0; } @@ -1274,4 +1237,4 @@ static int tsdbRmTableFromMeta(STsdbRepo *pRepo, STable *pTable) { } return 0; -} +} \ No newline at end of file diff --git a/src/tsdb/src/tsdbRWHelper.c b/src/tsdb/src/tsdbRWHelper.c index eab9a5e056..326da07a36 100644 --- a/src/tsdb/src/tsdbRWHelper.c +++ b/src/tsdb/src/tsdbRWHelper.c @@ -218,7 +218,7 @@ void tsdbSetHelperTable(SRWHelper *pHelper, STable *pTable, STsdbRepo *pRepo) { pHelper->tableInfo.tid = pTable->tableId.tid; pHelper->tableInfo.uid = pTable->tableId.uid; - STSchema *pSchema = tsdbGetTableSchema(pTable); + STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1); pHelper->tableInfo.sversion = schemaVersion(pSchema); tdInitDataCols(pHelper->pDataCols[0], pSchema); @@ -318,7 +318,7 @@ int tsdbMoveLastBlockIfNeccessary(SRWHelper *pHelper) { ASSERT(pCompBlock->last); if (pCompBlock->numOfSubBlocks > 1) { - if (tsdbLoadBlockData(pHelper, blockAtIdx(pHelper, pIdx->numOfBlocks - 1)) < 0) return -1; + if (tsdbLoadBlockData(pHelper, blockAtIdx(pHelper, pIdx->numOfBlocks - 1), NULL) < 0) return -1; ASSERT(pHelper->pDataCols[0]->numOfRows > 0 && pHelper->pDataCols[0]->numOfRows < pCfg->minRowsPerFileBlock); if (tsdbWriteBlockToFile(pHelper, &(pHelper->files.nLastF), pHelper->pDataCols[0], pHelper->pDataCols[0]->numOfRows, &compBlock, true, true) < 0) @@ -577,11 +577,12 @@ void tsdbGetDataStatis(SRWHelper *pHelper, SDataStatis *pStatis, int numOfCols) } } -int tsdbLoadBlockDataCols(SRWHelper *pHelper, SCompBlock *pCompBlock, int16_t *colIds, int numOfColIds) { +int tsdbLoadBlockDataCols(SRWHelper *pHelper, SCompBlock *pCompBlock, SCompInfo *pCompInfo, int16_t *colIds, int numOfColIds) { ASSERT(pCompBlock->numOfSubBlocks >= 1); // Must be super block int numOfSubBlocks = pCompBlock->numOfSubBlocks; - if (numOfSubBlocks > 1) pCompBlock = (SCompBlock *)POINTER_SHIFT(pHelper->pCompInfo, pCompBlock->offset); + if (numOfSubBlocks > 1) + pCompBlock = (SCompBlock *)POINTER_SHIFT((pCompInfo == NULL) ? pHelper->pCompInfo : pCompInfo, pCompBlock->offset); tdResetDataCols(pHelper->pDataCols[0]); if (tsdbLoadBlockDataColsImpl(pHelper, pCompBlock, pHelper->pDataCols[0], colIds, numOfColIds) < 0) goto _err; @@ -598,10 +599,10 @@ _err: return -1; } -int tsdbLoadBlockData(SRWHelper *pHelper, SCompBlock *pCompBlock) { - +int tsdbLoadBlockData(SRWHelper *pHelper, SCompBlock *pCompBlock, SCompInfo *pCompInfo) { int numOfSubBlock = pCompBlock->numOfSubBlocks; - if (numOfSubBlock > 1) pCompBlock = (SCompBlock *)POINTER_SHIFT(pHelper->pCompInfo, pCompBlock->offset); + if (numOfSubBlock > 1) + pCompBlock = (SCompBlock *)POINTER_SHIFT((pCompInfo == NULL) ? pHelper->pCompInfo : pCompInfo, pCompBlock->offset); tdResetDataCols(pHelper->pDataCols[0]); if (tsdbLoadBlockDataImpl(pHelper, pCompBlock, pHelper->pDataCols[0]) < 0) goto _err; @@ -703,6 +704,7 @@ static int tsdbWriteBlockToFile(SRWHelper *pHelper, SFile *pFile, SDataCols *pDa } // Add checksum + ASSERT(pCompCol->len > 0); pCompCol->len += sizeof(TSCKSUM); taosCalcChecksumAppend(0, (uint8_t *)tptr, pCompCol->len); @@ -792,7 +794,7 @@ static int tsdbMergeDataWithBlock(SRWHelper *pHelper, int blkIdx, SDataCols *pDa if (tsdbAddSubBlock(pHelper, &compBlock, blkIdx, rowsWritten) < 0) goto _err; } else { // Load - if (tsdbLoadBlockData(pHelper, blockAtIdx(pHelper, blkIdx)) < 0) goto _err; + if (tsdbLoadBlockData(pHelper, blockAtIdx(pHelper, blkIdx), NULL) < 0) goto _err; ASSERT(pHelper->pDataCols[0]->numOfRows <= blockAtIdx(pHelper, blkIdx)->numOfRows); // Merge if (tdMergeDataCols(pHelper->pDataCols[0], pDataCols, rowsWritten) < 0) goto _err; @@ -848,7 +850,7 @@ static int tsdbMergeDataWithBlock(SRWHelper *pHelper, int blkIdx, SDataCols *pDa if (tsdbAddSubBlock(pHelper, &compBlock, blkIdx, rowsWritten) < 0) goto _err; } else { // Load-Merge-Write // Load - if (tsdbLoadBlockData(pHelper, blockAtIdx(pHelper, blkIdx)) < 0) goto _err; + if (tsdbLoadBlockData(pHelper, blockAtIdx(pHelper, blkIdx), NULL) < 0) goto _err; if (blockAtIdx(pHelper, blkIdx)->last) pHelper->hasOldLastBlock = false; rowsWritten = rows3; diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 6a9c8e1ff6..a4e0151f89 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -22,7 +22,7 @@ #include "exception.h" #include "../../../query/inc/qast.h" // todo move to common module -#include "../../../query/inc/tlosertree.h" // todo move to util module +#include "tlosertree.h" #include "tsdb.h" #include "tsdbMain.h" @@ -122,7 +122,7 @@ static void changeQueryHandleForInterpQuery(TsdbQueryHandleT pHandle); static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* pCheckInfo, SCompBlock* pBlock, SArray* sa); static int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order); -static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int maxRowsToRead, TSKEY* skey, TSKEY* ekey, +static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int maxRowsToRead, STimeWindow* win, STsdbQueryHandle* pQueryHandle); static void tsdbInitDataBlockLoadInfo(SDataBlockLoadInfo* pBlockLoadInfo) { @@ -249,8 +249,6 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh pCheckInfo->initBuf = true; int32_t order = pHandle->order; -// tsdbTakeMemSnapshot(pHandle->pTsdb, &pCheckInfo->mem, &pCheckInfo->imem); - // no data in buffer, abort if (pHandle->mem == NULL && pHandle->imem == NULL) { return false; @@ -392,8 +390,11 @@ static bool hasMoreDataInCache(STsdbQueryHandle* pHandle) { STable* pTable = pCheckInfo->pTableObj; assert(pTable != NULL); - - initTableMemIterator(pHandle, pCheckInfo); + + if (!pCheckInfo->initBuf) { + initTableMemIterator(pHandle, pCheckInfo); + } + SDataRow row = getSDataRowInTableMem(pCheckInfo); if (row == NULL) { return false; @@ -411,8 +412,7 @@ static bool hasMoreDataInCache(STsdbQueryHandle* pHandle) { int32_t step = ASCENDING_TRAVERSE(pHandle->order)? 1:-1; STimeWindow* win = &pHandle->cur.win; - pHandle->cur.rows = tsdbReadRowsFromCache(pCheckInfo, pHandle->window.ekey, - pHandle->outputCapacity, &win->skey, &win->ekey, pHandle); // todo refactor API + pHandle->cur.rows = tsdbReadRowsFromCache(pCheckInfo, pHandle->window.ekey, pHandle->outputCapacity, win, pHandle); // update the last key value pCheckInfo->lastKey = win->ekey + step; @@ -576,6 +576,8 @@ static SArray* getDefaultLoadColumns(STsdbQueryHandle* pQueryHandle, bool loadTS static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock, STableCheckInfo* pCheckInfo) { STsdbRepo *pRepo = pQueryHandle->pTsdb; + + // TODO refactor SCompData* data = calloc(1, sizeof(SCompData) + sizeof(SCompCol) * pBlock->numOfCols); data->numOfCols = pBlock->numOfCols; @@ -592,9 +594,12 @@ static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlo pCheckInfo->pDataCols = tdNewDataCols(pMeta->maxRowBytes, pMeta->maxCols, pRepo->config.maxRowsPerFileBlock); } - tdInitDataCols(pCheckInfo->pDataCols, tsdbGetTableSchema(pCheckInfo->pTableObj)); + STSchema* pSchema = tsdbGetTableSchema(pCheckInfo->pTableObj); + tdInitDataCols(pCheckInfo->pDataCols, pSchema); + tdInitDataCols(pQueryHandle->rhelper.pDataCols[0], pSchema); + tdInitDataCols(pQueryHandle->rhelper.pDataCols[1], pSchema); - if (tsdbLoadBlockData(&(pQueryHandle->rhelper), pBlock) == 0) { + if (tsdbLoadBlockData(&(pQueryHandle->rhelper), pBlock, pCheckInfo->pCompInfo) == 0) { SDataBlockLoadInfo* pBlockLoadInfo = &pQueryHandle->dataBlockLoadInfo; pBlockLoadInfo->fileGroup = pQueryHandle->pFileGroup; @@ -605,8 +610,9 @@ static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlo } SDataCols* pCols = pQueryHandle->rhelper.pDataCols[0]; - assert(pCols->numOfRows != 0); + assert(pCols->numOfRows != 0 && pCols->numOfRows <= pBlock->numOfRows); + pBlock->numOfRows = pCols->numOfRows; taosArrayDestroy(sa); tfree(data); @@ -636,7 +642,7 @@ static void handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SCompBlock* int32_t step = ASCENDING_TRAVERSE(pQueryHandle->order) ? 1 : -1; cur->rows = tsdbReadRowsFromCache(pCheckInfo, binfo.window.skey - step, - pQueryHandle->outputCapacity, &cur->win.skey, &cur->win.ekey, pQueryHandle); + pQueryHandle->outputCapacity, &cur->win, pQueryHandle); pQueryHandle->realNumOfRows = cur->rows; // update the last key value @@ -1237,7 +1243,6 @@ static int32_t dataBlockOrderCompar(const void* pLeft, const void* pRight, void* // assert(pLeftBlockInfoEx->compBlock->offset != pRightBlockInfoEx->compBlock->offset); if (pLeftBlockInfoEx->compBlock->offset == pRightBlockInfoEx->compBlock->offset && pLeftBlockInfoEx->compBlock->last == pRightBlockInfoEx->compBlock->last) { - // todo add more information tsdbError("error in header file, two block with same offset:%" PRId64, (int64_t)pLeftBlockInfoEx->compBlock->offset); } @@ -1477,7 +1482,8 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) { size_t numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo); assert(numOfTables > 0); - + + SDataBlockInfo blockInfo = {{0}, 0}; if (pQueryHandle->type == TSDB_QUERY_TYPE_EXTERNAL) { pQueryHandle->type = TSDB_QUERY_TYPE_ALL; pQueryHandle->order = TSDB_ORDER_DESC; @@ -1487,7 +1493,7 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) { } SArray* sa = getDefaultLoadColumns(pQueryHandle, true); - /*SDataBlockInfo* pBlockInfo =*/ tsdbRetrieveDataBlockInfo(pHandle); + /*SDataBlockInfo* pBlockInfo =*/ tsdbRetrieveDataBlockInfo(pHandle, &blockInfo); /*SArray *pDataBlock = */tsdbRetrieveDataBlock(pHandle, sa); if (pQueryHandle->cur.win.ekey == pQueryHandle->window.skey) { @@ -1558,7 +1564,7 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) { bool ret = tsdbNextDataBlock((void*) pSecQueryHandle); assert(ret); - /*SDataBlockInfo* pBlockInfo =*/ tsdbRetrieveDataBlockInfo((void*) pSecQueryHandle); + /*SDataBlockInfo* pBlockInfo =*/ tsdbRetrieveDataBlockInfo((void*) pSecQueryHandle, &blockInfo); /*SArray *pDataBlock = */tsdbRetrieveDataBlock((void*) pSecQueryHandle, sa); for (int32_t i = 0; i < numOfCols; ++i) { @@ -1693,11 +1699,11 @@ static void changeQueryHandleForInterpQuery(TsdbQueryHandleT pHandle) { pQueryHandle->window = (STimeWindow) {info.lastKey, TSKEY_INITIAL_VAL}; } -static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int maxRowsToRead, TSKEY* skey, TSKEY* ekey, +static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int maxRowsToRead, STimeWindow* win, STsdbQueryHandle* pQueryHandle) { int numOfRows = 0; int32_t numOfCols = taosArrayGetSize(pQueryHandle->pColumns); - *skey = TSKEY_INITIAL_VAL; + win->skey = TSKEY_INITIAL_VAL; int64_t st = taosGetTimestampUs(); STsdbMeta* pMeta = tsdbGetMeta(pQueryHandle->pTsdb); @@ -1717,11 +1723,11 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int break; } - if (*skey == INT64_MIN) { - *skey = key; + if (win->skey == INT64_MIN) { + win->skey = key; } - *ekey = key; + win->ekey = key; copyOneRowFromMem(pQueryHandle, maxRowsToRead, numOfRows, row, pMeta, numOfCols, pTable); if (++numOfRows >= maxRowsToRead) { @@ -1750,7 +1756,7 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int return numOfRows; } -SDataBlockInfo tsdbRetrieveDataBlockInfo(TsdbQueryHandleT* pQueryHandle) { +void tsdbRetrieveDataBlockInfo(TsdbQueryHandleT* pQueryHandle, SDataBlockInfo* pDataBlockInfo) { STsdbQueryHandle* pHandle = (STsdbQueryHandle*)pQueryHandle; SQueryFilePos* cur = &pHandle->cur; STable* pTable = NULL; @@ -1763,16 +1769,12 @@ SDataBlockInfo tsdbRetrieveDataBlockInfo(TsdbQueryHandleT* pQueryHandle) { STableCheckInfo* pCheckInfo = taosArrayGet(pHandle->pTableCheckInfo, pHandle->activeIndex); pTable = pCheckInfo->pTableObj; } - - SDataBlockInfo blockInfo = { - .uid = pTable->tableId.uid, - .tid = pTable->tableId.tid, - .rows = cur->rows, - .window = cur->win, - .numOfCols = QH_GET_NUM_OF_COLS(pHandle), - }; - return blockInfo; + pDataBlockInfo->uid = pTable->tableId.uid; + pDataBlockInfo->tid = pTable->tableId.tid; + pDataBlockInfo->rows = cur->rows; + pDataBlockInfo->window = cur->win; + pDataBlockInfo->numOfCols = QH_GET_NUM_OF_COLS(pHandle); } /* @@ -1974,9 +1976,9 @@ int32_t tableGroupComparFn(const void *p1, const void *p2, const void *param) { int32_t type = 0; int32_t bytes = 0; - if (colIndex == TSDB_TBNAME_COLUMN_INDEX) { // todo refactor extract method , to queryExecutor to generate tags values - f1 = (char*) pTable1->name; - f2 = (char*) pTable2->name; + if (colIndex == TSDB_TBNAME_COLUMN_INDEX) { + f1 = (char*) TABLE_NAME(pTable1); + f2 = (char*) TABLE_NAME(pTable2); type = TSDB_DATA_TYPE_BINARY; bytes = tGetTableNameColumnSchema().bytes; } else { @@ -2085,13 +2087,17 @@ bool indexedNodeFilterFp(const void* pNode, void* param) { char* val = NULL; if (pInfo->colIndex == TSDB_TBNAME_COLUMN_INDEX) { - val = (char*) pTable->name; + val = (char*) TABLE_NAME(pTable); } else { val = tdGetKVRowValOfCol(pTable->tagVal, pInfo->sch.colId); } - //todo :the val is possible to be null, so check it out carefully - int32_t ret = pInfo->compare(val, pInfo->q); + int32_t ret = 0; + if (val == NULL) { //the val is possible to be null, so check it out carefully + ret = -1; // val is missing in table tags value pairs + } else { + ret = pInfo->compare(val, pInfo->q); + } switch (pInfo->optr) { case TSDB_RELATION_EQUAL: { diff --git a/src/util/inc/tcache.h b/src/util/inc/tcache.h index b026ad4386..2982b8dc70 100644 --- a/src/util/inc/tcache.h +++ b/src/util/inc/tcache.h @@ -24,14 +24,13 @@ extern "C" { #include "tref.h" #include "hash.h" -typedef void (*__cache_freeres_fn_t)(void*); +typedef void (*__cache_free_fn_t)(void*); typedef struct SCacheStatis { int64_t missCount; int64_t hitCount; int64_t totalAccess; int64_t refreshCount; - int32_t numOfCollision; } SCacheStatis; typedef struct SCacheDataNode { @@ -70,7 +69,7 @@ typedef struct { // void * pTimer; SCacheStatis statistics; SHashObj * pHashTable; - __cache_freeres_fn_t freeFp; + __cache_free_fn_t freeFp; uint32_t numOfElemsInTrash; // number of element in trash uint8_t deleting; // set the deleting flag to stop refreshing ASAP. pthread_t refreshWorker; @@ -91,15 +90,7 @@ typedef struct { * @param fn free resource callback function * @return */ -SCacheObj *taosCacheInit(int32_t keyType, int64_t refreshTimeInSeconds, bool extendLifespan, __cache_freeres_fn_t fn, const char *cacheName); - -/** - * initialize the cache object and set the free object callback function - * @param refreshTimeInSeconds - * @param freeCb - * @return - */ -SCacheObj *taosCacheInitWithCb(int32_t keyType, int64_t refreshTimeInSeconds, bool extendLifespan, __cache_freeres_fn_t fn, const char *cacheName); +SCacheObj *taosCacheInit(int32_t keyType, int64_t refreshTimeInSeconds, bool extendLifespan, __cache_free_fn_t fn, const char *cacheName); /** * add data into cache @@ -163,9 +154,8 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove); /** * move all data node into trash, clear node in trash can if it is not referenced by any clients * @param handle - * @param _remove remove the data or not if refcount is greater than 0 */ -void taosCacheEmpty(SCacheObj *pCacheObj, bool _remove); +void taosCacheEmpty(SCacheObj *pCacheObj); /** * release all allocated memory and destroy the cache object. @@ -180,6 +170,14 @@ void taosCacheEmpty(SCacheObj *pCacheObj, bool _remove); */ void taosCacheCleanup(SCacheObj *pCacheObj); +/** + * + * @param pCacheObj + * @param fp + * @return + */ +void taosCacheRefresh(SCacheObj *pCacheObj, __cache_free_fn_t fp); + #ifdef __cplusplus } #endif diff --git a/src/util/inc/tfile.h b/src/util/inc/tfile.h index 5bddc76266..04e500743c 100644 --- a/src/util/inc/tfile.h +++ b/src/util/inc/tfile.h @@ -18,6 +18,7 @@ #ifdef TAOS_RANDOM_FILE_FAIL +void taosSetRandomFileFailFactor(int factor); ssize_t taos_tread(int fd, void *buf, size_t count); ssize_t taos_twrite(int fd, void *buf, size_t count); off_t taos_lseek(int fd, off_t offset, int whence); diff --git a/src/util/inc/tlog.h b/src/util/inc/tlog.h index af4e6ae1c2..1f6a81d4b4 100644 --- a/src/util/inc/tlog.h +++ b/src/util/inc/tlog.h @@ -26,6 +26,7 @@ extern "C" { #define DEBUG_INFO DEBUG_WARN #define DEBUG_DEBUG 4U #define DEBUG_TRACE 8U +#define DEBUG_DUMP 16U #define DEBUG_SCREEN 64U #define DEBUG_FILE 128U diff --git a/src/query/inc/tlosertree.h b/src/util/inc/tlosertree.h similarity index 100% rename from src/query/inc/tlosertree.h rename to src/util/inc/tlosertree.h diff --git a/src/util/inc/tskiplist.h b/src/util/inc/tskiplist.h index 686e5ab313..4ba620dce0 100644 --- a/src/util/inc/tskiplist.h +++ b/src/util/inc/tskiplist.h @@ -51,6 +51,7 @@ typedef struct SSkipListNode { #define SL_GET_NODE_KEY(s, n) ((s)->keyFn(SL_GET_NODE_DATA(n))) #define SL_GET_SL_MIN_KEY(s) (SL_GET_NODE_KEY((s), SL_GET_FORWARD_POINTER((s)->pHead, 0))) +#define SL_GET_SL_MAX_KEY(s) (SL_GET_NODE_KEY((s), SL_GET_BACKWARD_POINTER((s)->pTail, 0))) #define SL_GET_NODE_LEVEL(n) *(uint8_t *)((n)) @@ -119,7 +120,6 @@ typedef struct SSkipList { pthread_rwlock_t *lock; SSkipListNode * pHead; // point to the first element SSkipListNode * pTail; // point to the last element - void * lastKey; // last key in the skiplist #if SKIP_LIST_RECORD_PERFORMANCE tSkipListState state; // skiplist state #endif diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index d546970868..688e49a40b 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -223,9 +223,9 @@ static void doCleanupDataCache(SCacheObj *pCacheObj); * refresh cache to remove data in both hash list and trash, if any nodes' refcount == 0, every pCacheObj->refreshTime * @param handle Cache object handle */ -static void* taosCacheRefresh(void *handle); +static void* taosCacheTimedRefresh(void *pCacheObj); -SCacheObj *taosCacheInit(int32_t keyType, int64_t refreshTimeInSeconds, bool extendLifespan, __cache_freeres_fn_t fn, const char* cacheName) { +SCacheObj *taosCacheInit(int32_t keyType, int64_t refreshTimeInSeconds, bool extendLifespan, __cache_free_fn_t fn, const char* cacheName) { if (refreshTimeInSeconds <= 0) { return NULL; } @@ -261,7 +261,7 @@ SCacheObj *taosCacheInit(int32_t keyType, int64_t refreshTimeInSeconds, bool ext pthread_attr_init(&thattr); pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); - pthread_create(&pCacheObj->refreshWorker, &thattr, taosCacheRefresh, pCacheObj); + pthread_create(&pCacheObj->refreshWorker, &thattr, taosCacheTimedRefresh, pCacheObj); pthread_attr_destroy(&thattr); return pCacheObj; @@ -450,7 +450,7 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) { } } -void taosCacheEmpty(SCacheObj *pCacheObj, bool _remove) { +void taosCacheEmpty(SCacheObj *pCacheObj) { SHashMutableIterator *pIter = taosHashCreateIter(pCacheObj->pHashTable); __cache_wr_lock(pCacheObj); @@ -459,8 +459,8 @@ void taosCacheEmpty(SCacheObj *pCacheObj, bool _remove) { break; } - SCacheDataNode *pNode = *(SCacheDataNode **)taosHashIterGet(pIter); - if (T_REF_VAL_GET(pNode) == 0 || _remove) { + SCacheDataNode *pNode = *(SCacheDataNode **) taosHashIterGet(pIter); + if (T_REF_VAL_GET(pNode) == 0) { taosCacheReleaseNode(pCacheObj, pNode); } else { taosCacheMoveToTrash(pCacheObj, pNode); @@ -469,7 +469,7 @@ void taosCacheEmpty(SCacheObj *pCacheObj, bool _remove) { __cache_unlock(pCacheObj); taosHashDestroyIter(pIter); - taosTrashCanEmpty(pCacheObj, _remove); + taosTrashCanEmpty(pCacheObj, false); } void taosCacheCleanup(SCacheObj *pCacheObj) { @@ -623,8 +623,29 @@ void doCleanupDataCache(SCacheObj *pCacheObj) { free(pCacheObj); } -void* taosCacheRefresh(void *handle) { - SCacheObj *pCacheObj = (SCacheObj *)handle; +static void doCacheRefresh(SCacheObj* pCacheObj, int64_t time, __cache_free_fn_t fp) { + SHashMutableIterator *pIter = taosHashCreateIter(pCacheObj->pHashTable); + + __cache_wr_lock(pCacheObj); + while (taosHashIterNext(pIter)) { + SCacheDataNode *pNode = *(SCacheDataNode **)taosHashIterGet(pIter); + if ((pNode->addedTime + pNode->lifespan * pNode->extendFactor) <= time && T_REF_VAL_GET(pNode) <= 0) { + taosCacheReleaseNode(pCacheObj, pNode); + continue; + } + + if (fp) { + fp(pNode->data); + } + } + + __cache_unlock(pCacheObj); + + taosHashDestroyIter(pIter); +} + +void* taosCacheTimedRefresh(void *handle) { + SCacheObj* pCacheObj = handle; if (pCacheObj == NULL) { uDebug("object is destroyed. no refresh retry"); return NULL; @@ -657,21 +678,8 @@ void* taosCacheRefresh(void *handle) { // refresh data in hash table if (elemInHash > 0) { - int64_t expiredTime = taosGetTimestampMs(); - - SHashMutableIterator *pIter = taosHashCreateIter(pCacheObj->pHashTable); - - __cache_wr_lock(pCacheObj); - while (taosHashIterNext(pIter)) { - SCacheDataNode *pNode = *(SCacheDataNode **)taosHashIterGet(pIter); - if ((pNode->addedTime + pNode->lifespan * pNode->extendFactor) <= expiredTime && T_REF_VAL_GET(pNode) <= 0) { - taosCacheReleaseNode(pCacheObj, pNode); - } - } - - __cache_unlock(pCacheObj); - - taosHashDestroyIter(pIter); + int64_t now = taosGetTimestampMs(); + doCacheRefresh(pCacheObj, now, NULL); } taosTrashCanEmpty(pCacheObj, false); @@ -679,3 +687,12 @@ void* taosCacheRefresh(void *handle) { return NULL; } + +void taosCacheRefresh(SCacheObj *pCacheObj, __cache_free_fn_t fp) { + if (pCacheObj == NULL) { + return; + } + + int64_t now = taosGetTimestampMs(); + doCacheRefresh(pCacheObj, now, fp); +} diff --git a/src/util/src/tfile.c b/src/util/src/tfile.c index eb7a2d5a66..92eeaef126 100644 --- a/src/util/src/tfile.c +++ b/src/util/src/tfile.c @@ -26,40 +26,51 @@ #include "os.h" -#define RANDOM_FILE_FAIL_FACTOR 5 +#ifdef TAOS_RANDOM_FILE_FAIL + +static int random_file_fail_factor = 20; + +void taosSetRandomFileFailFactor(int factor) +{ + random_file_fail_factor = factor; +} +#endif ssize_t taos_tread(int fd, void *buf, size_t count) { #ifdef TAOS_RANDOM_FILE_FAIL - if (rand() % RANDOM_FILE_FAIL_FACTOR == 0) { - errno = EIO; - return -1; + if (random_file_fail_factor > 0) { + if (rand() % random_file_fail_factor == 0) { + errno = EIO; + return -1; + } } #endif - return tread(fd, buf, count); } ssize_t taos_twrite(int fd, void *buf, size_t count) { #ifdef TAOS_RANDOM_FILE_FAIL - if (rand() % RANDOM_FILE_FAIL_FACTOR == 0) { - errno = EIO; - return -1; + if (random_file_fail_factor > 0) { + if (rand() % random_file_fail_factor == 0) { + errno = EIO; + return -1; + } } #endif - return twrite(fd, buf, count); } off_t taos_lseek(int fd, off_t offset, int whence) { #ifdef TAOS_RANDOM_FILE_FAIL - if (rand() % RANDOM_FILE_FAIL_FACTOR == 0) { - errno = EIO; - return -1; + if (random_file_fail_factor > 0) { + if (rand() % random_file_fail_factor == 0) { + errno = EIO; + return -1; + } } #endif - return lseek(fd, offset, whence); } diff --git a/src/query/src/tlosertree.c b/src/util/src/tlosertree.c similarity index 97% rename from src/query/src/tlosertree.c rename to src/util/src/tlosertree.c index 5d471bb927..fa7e4fc340 100644 --- a/src/query/src/tlosertree.c +++ b/src/util/src/tlosertree.c @@ -13,10 +13,10 @@ * along with this program. If not, see . */ +#include "tlosertree.h" #include "os.h" #include "taosmsg.h" -#include "tlosertree.h" -#include "queryLog.h" +#include "tulog.h" // set initial value for loser tree void tLoserTreeInit(SLoserTreeInfo* pTree) { @@ -45,7 +45,7 @@ uint32_t tLoserTreeCreate(SLoserTreeInfo** pTree, int32_t numOfEntries, void* pa *pTree = (SLoserTreeInfo*)calloc(1, sizeof(SLoserTreeInfo) + sizeof(SLoserTreeNode) * totalEntries); if ((*pTree) == NULL) { - qError("allocate memory for loser-tree failed. reason:%s", strerror(errno)); + uError("allocate memory for loser-tree failed. reason:%s", strerror(errno)); return TSDB_CODE_QRY_OUT_OF_MEMORY; } diff --git a/src/util/src/tqueue.c b/src/util/src/tqueue.c index f4f7904968..d9abf0d7c3 100644 --- a/src/util/src/tqueue.c +++ b/src/util/src/tqueue.c @@ -93,16 +93,18 @@ void taosCloseQueue(taos_queue param) { void *taosAllocateQitem(int size) { STaosQnode *pNode = (STaosQnode *)calloc(sizeof(STaosQnode) + size, 1); + if (pNode == NULL) return NULL; + uTrace("item:%p, node:%p is allocated", pNode->item, pNode); return (void *)pNode->item; } void taosFreeQitem(void *param) { if (param == NULL) return; - uTrace("item:%p is freed", param); char *temp = (char *)param; temp -= sizeof(STaosQnode); + uTrace("item:%p, node:%p is freed", param, temp); free(temp); } diff --git a/src/util/src/tskiplist.c b/src/util/src/tskiplist.c index f3c0babe6b..aacc4a5487 100644 --- a/src/util/src/tskiplist.c +++ b/src/util/src/tskiplist.c @@ -5,6 +5,7 @@ * it under the terms of the GNU Affero General Public License, version 3 * or later ("AGPL"), as published by the Free Software Foundation. * + * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. @@ -238,7 +239,7 @@ SSkipListNode *tSkipListPut(SSkipList *pSkipList, SSkipListNode *pNode) { // if the new key is greater than the maximum key of skip list, push back this node at the end of skip list char *newDatakey = SL_GET_NODE_KEY(pSkipList, pNode); - if (pSkipList->size == 0 || pSkipList->comparFn(pSkipList->lastKey, newDatakey) < 0) { + if (pSkipList->size == 0 || pSkipList->comparFn(SL_GET_SL_MAX_KEY(pSkipList), newDatakey) < 0) { return tSkipListPushBack(pSkipList, pNode); } @@ -498,7 +499,7 @@ void tSkipListRemoveNode(SSkipList *pSkipList, SSkipListNode *pNode) { if (pSkipList->lock) { pthread_rwlock_wrlock(pSkipList->lock); } - + for (int32_t j = level - 1; j >= 0; --j) { SSkipListNode* prev = SL_GET_BACKWARD_POINTER(pNode, j); SSkipListNode* next = SL_GET_FORWARD_POINTER(pNode, j); @@ -699,8 +700,6 @@ SSkipListNode* tSkipListPushBack(SSkipList *pSkipList, SSkipListNode *pNode) { SL_GET_BACKWARD_POINTER(pSkipList->pTail, i) = pNode; } - pSkipList->lastKey = SL_GET_NODE_KEY(pSkipList, pNode); - atomic_add_fetch_32(&pSkipList->size, 1); if (pSkipList->lock) { pthread_rwlock_unlock(pSkipList->lock); diff --git a/src/util/src/tsocket.c b/src/util/src/tsocket.c index bd2606d5fb..b225dfa36a 100644 --- a/src/util/src/tsocket.c +++ b/src/util/src/tsocket.c @@ -383,10 +383,15 @@ int taosOpenTcpServerSocket(uint32_t ip, uint16_t port) { return -1; } - if (taosKeepTcpAlive(sockFd) < 0) return -1; + if (taosKeepTcpAlive(sockFd) < 0) { + uError("failed to set tcp server keep-alive option, 0x%x:%hu(%s)", ip, port, strerror(errno)); + close(sockFd); + return -1; + } if (listen(sockFd, 10) < 0) { uError("listen tcp server socket failed, 0x%x:%hu(%s)", ip, port, strerror(errno)); + close(sockFd); return -1; } diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c index 0050de3399..18c9ebf2e1 100644 --- a/src/vnode/src/vnodeMain.c +++ b/src/vnode/src/vnodeMain.c @@ -34,8 +34,7 @@ #define TSDB_VNODE_VERSION_CONTENT_LEN 31 -static int32_t tsOpennedVnodes; -static void *tsDnodeVnodesHash; +static SHashObj*tsDnodeVnodesHash; static void vnodeCleanUp(SVnodeObj *pVnode); static int32_t vnodeSaveCfg(SMDCreateVnodeMsg *pVnodeCfg); static int32_t vnodeReadCfg(SVnodeObj *pVnode); @@ -47,8 +46,6 @@ static int vnodeGetWalInfo(void *ahandle, char *name, uint32_t *index); static void vnodeNotifyRole(void *ahandle, int8_t role); static void vnodeNotifyFileSynced(void *ahandle, uint64_t fversion); -static pthread_once_t vnodeModuleInit = PTHREAD_ONCE_INIT; - #ifndef _SYNC tsync_h syncStart(const SSyncInfo *info) { return NULL; } int32_t syncForwardToPeer(tsync_h shandle, void *pHead, void *mhandle, int qtype) { return 0; } @@ -58,25 +55,28 @@ int syncGetNodesRole(tsync_h shandle, SNodesRole * cfg) { return 0; } void syncConfirmForward(tsync_h shandle, uint64_t version, int32_t code) {} #endif -static void vnodeInit() { +int32_t vnodeInitResources() { vnodeInitWriteFp(); vnodeInitReadFp(); tsDnodeVnodesHash = taosHashInit(TSDB_MAX_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true); if (tsDnodeVnodesHash == NULL) { vError("failed to init vnode list"); + return TSDB_CODE_VND_OUT_OF_MEMORY; } + + return TSDB_CODE_SUCCESS; } void vnodeCleanupResources() { - taosHashCleanup(tsDnodeVnodesHash); - vnodeModuleInit = PTHREAD_ONCE_INIT; - tsDnodeVnodesHash = NULL; + if (tsDnodeVnodesHash != NULL) { + taosHashCleanup(tsDnodeVnodesHash); + tsDnodeVnodesHash = NULL; + } } int32_t vnodeCreate(SMDCreateVnodeMsg *pVnodeCfg) { int32_t code; - pthread_once(&vnodeModuleInit, vnodeInit); SVnodeObj *pTemp = (SVnodeObj *)taosHashGet(tsDnodeVnodesHash, (const char *)&pVnodeCfg->cfg.vgId, sizeof(int32_t)); if (pTemp != NULL) { @@ -144,11 +144,6 @@ int32_t vnodeCreate(SMDCreateVnodeMsg *pVnodeCfg) { } int32_t vnodeDrop(int32_t vgId) { - if (tsDnodeVnodesHash == NULL) { - vDebug("vgId:%d, failed to drop, vgId not exist", vgId); - return TSDB_CODE_VND_INVALID_VGROUP_ID; - } - SVnodeObj **ppVnode = (SVnodeObj **)taosHashGet(tsDnodeVnodesHash, (const char *)&vgId, sizeof(int32_t)); if (ppVnode == NULL || *ppVnode == NULL) { vDebug("vgId:%d, failed to drop, vgId not find", vgId); @@ -165,6 +160,19 @@ int32_t vnodeDrop(int32_t vgId) { int32_t vnodeAlter(void *param, SMDCreateVnodeMsg *pVnodeCfg) { SVnodeObj *pVnode = param; + + // vnode in non-ready state and still needs to return success instead of TSDB_CODE_VND_INVALID_STATUS + // cfgVersion can be corrected by status msg + if (pVnode->status != TAOS_VN_STATUS_READY) { + vDebug("vgId:%d, vnode is not ready, do alter operation later", pVnode->vgId); + return TSDB_CODE_SUCCESS; + } + + // the vnode may always fail to synchronize because of it in low cfgVersion + // so cannot use the following codes + // if (pVnode->syncCfg.replica > 1 && pVnode->role == TAOS_SYNC_ROLE_UNSYNCED) + // return TSDB_CODE_VND_NOT_SYNCED; + pVnode->status = TAOS_VN_STATUS_UPDATING; int32_t code = vnodeSaveCfg(pVnodeCfg); @@ -187,7 +195,6 @@ int32_t vnodeAlter(void *param, SMDCreateVnodeMsg *pVnodeCfg) { int32_t vnodeOpen(int32_t vnode, char *rootDir) { char temp[TSDB_FILENAME_LEN]; - pthread_once(&vnodeModuleInit, vnodeInit); SVnodeObj *pVnode = calloc(sizeof(SVnodeObj), 1); if (pVnode == NULL) { @@ -195,7 +202,6 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) { return TAOS_SYSTEM_ERROR(errno); } - atomic_add_fetch_32(&tsOpennedVnodes, 1); atomic_add_fetch_32(&pVnode->refCount, 1); pVnode->vgId = vnode; @@ -366,13 +372,11 @@ void vnodeRelease(void *pVnodeRaw) { free(pVnode); - int32_t count = atomic_sub_fetch_32(&tsOpennedVnodes, 1); + int32_t count = taosHashGetSize(tsDnodeVnodesHash); vDebug("vgId:%d, vnode is released, vnodes:%d", vgId, count); } void *vnodeGetVnode(int32_t vgId) { - if (tsDnodeVnodesHash == NULL) return NULL; - SVnodeObj **ppVnode = (SVnodeObj **)taosHashGet(tsDnodeVnodesHash, (const char *)&vgId, sizeof(int32_t)); if (ppVnode == NULL || *ppVnode == NULL) { terrno = TSDB_CODE_VND_INVALID_VGROUP_ID; @@ -417,10 +421,19 @@ void *vnodeGetWal(void *pVnode) { } static void vnodeBuildVloadMsg(SVnodeObj *pVnode, SDMStatusMsg *pStatus) { - if (pVnode->status == TAOS_VN_STATUS_DELETING) return; + int64_t totalStorage = 0; + int64_t compStorage = 0; + int64_t pointsWritten = 0; + + if (pVnode->status != TAOS_VN_STATUS_READY) return; if (pStatus->openVnodes >= TSDB_MAX_VNODES) return; - int64_t totalStorage, compStorage, pointsWritten = 0; - tsdbReportStat(pVnode->tsdb, &pointsWritten, &totalStorage, &compStorage); + + // still need report status when unsynced + if (pVnode->syncCfg.replica > 1 && pVnode->role == TAOS_SYNC_ROLE_UNSYNCED) { + } else if (pVnode->tsdb == NULL) { + } else { + tsdbReportStat(pVnode->tsdb, &pointsWritten, &totalStorage, &compStorage); + } SVnodeLoad *pLoad = &pStatus->load[pStatus->openVnodes++]; pLoad->vgId = htonl(pVnode->vgId); @@ -434,8 +447,6 @@ static void vnodeBuildVloadMsg(SVnodeObj *pVnode, SDMStatusMsg *pStatus) { } int32_t vnodeGetVnodeList(int32_t vnodeList[], int32_t *numOfVnodes) { - if (tsDnodeVnodesHash == NULL) return TSDB_CODE_SUCCESS; - SHashMutableIterator *pIter = taosHashCreateIter(tsDnodeVnodesHash); while (taosHashIterNext(pIter)) { SVnodeObj **pVnode = taosHashIterGet(pIter); diff --git a/src/vnode/src/vnodeRead.c b/src/vnode/src/vnodeRead.c index 354caf2af5..f054ae3904 100644 --- a/src/vnode/src/vnodeRead.c +++ b/src/vnode/src/vnodeRead.c @@ -46,9 +46,9 @@ int32_t vnodeProcessRead(void *param, SReadMsg *pReadMsg) { return TSDB_CODE_VND_MSG_NOT_PROCESSED; } - if (pVnode->status == TAOS_VN_STATUS_DELETING || pVnode->status == TAOS_VN_STATUS_CLOSING) { + if (pVnode->status != TAOS_VN_STATUS_READY) { vDebug("vgId:%d, msgType:%s not processed, vnode status is %d", pVnode->vgId, taosMsg[msgType], pVnode->status); - return TSDB_CODE_VND_INVALID_VGROUP_ID; + return TSDB_CODE_VND_INVALID_STATUS; } // TODO: Later, let slave to support query @@ -61,7 +61,7 @@ int32_t vnodeProcessRead(void *param, SReadMsg *pReadMsg) { } static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { - void * pCont = pReadMsg->pCont; + void *pCont = pReadMsg->pCont; int32_t contLen = pReadMsg->contLen; SRspRet *pRet = &pReadMsg->rspRet; @@ -74,19 +74,14 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { killQueryMsg->free = htons(killQueryMsg->free); killQueryMsg->qhandle = htobe64(killQueryMsg->qhandle); - void* handle = NULL; - if ((void**) killQueryMsg->qhandle != NULL) { - handle = *(void**) killQueryMsg->qhandle; - } - - vWarn("QInfo:%p connection %p broken, kill query", handle, pReadMsg->rpcMsg.handle); + vWarn("QInfo:%p connection %p broken, kill query", (void*) killQueryMsg->qhandle, pReadMsg->rpcMsg.handle); assert(pReadMsg->rpcMsg.contLen > 0 && killQueryMsg->free == 1); - void** qhandle = qAcquireQInfo(pVnode->qMgmt, (void**) killQueryMsg->qhandle); + void** qhandle = qAcquireQInfo(pVnode->qMgmt, (uint64_t) killQueryMsg->qhandle); if (qhandle == NULL || *qhandle == NULL) { vWarn("QInfo:%p invalid qhandle, no matched query handle, conn:%p", (void*) killQueryMsg->qhandle, pReadMsg->rpcMsg.handle); } else { - assert(qhandle == (void**) killQueryMsg->qhandle); + assert(*qhandle == (void*) killQueryMsg->qhandle); qReleaseQInfo(pVnode->qMgmt, (void**) &qhandle, true); } @@ -94,10 +89,10 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { } int32_t code = TSDB_CODE_SUCCESS; - qinfo_t pQInfo = NULL; void** handle = NULL; if (contLen != 0) { + qinfo_t pQInfo = NULL; code = qCreateQueryInfo(pVnode->tsdb, pVnode->vgId, pQueryTableMsg, pVnode, NULL, &pQInfo); SQueryTableRsp *pRsp = (SQueryTableRsp *) rpcMallocCont(sizeof(SQueryTableRsp)); @@ -110,8 +105,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { // current connect is broken if (code == TSDB_CODE_SUCCESS) { - // add lock here - handle = qRegisterQInfo(pVnode->qMgmt, pQInfo); + handle = qRegisterQInfo(pVnode->qMgmt, (uint64_t) pQInfo); if (handle == NULL) { // failed to register qhandle pRsp->code = TSDB_CODE_QRY_INVALID_QHANDLE; @@ -119,39 +113,41 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { qKillQuery(pQInfo); } else { assert(*handle == pQInfo); - pRsp->qhandle = htobe64((uint64_t) (handle)); + pRsp->qhandle = htobe64((uint64_t) pQInfo); } - if (handle != NULL && vnodeNotifyCurrentQhandle(pReadMsg->rpcMsg.handle, handle, pVnode->vgId) != TSDB_CODE_SUCCESS) { - vError("vgId:%d, QInfo:%p, query discarded since link is broken, %p", pVnode->vgId, pQInfo, pReadMsg->rpcMsg.handle); + pQInfo = NULL; + if (handle != NULL && vnodeNotifyCurrentQhandle(pReadMsg->rpcMsg.handle, *handle, pVnode->vgId) != TSDB_CODE_SUCCESS) { + vError("vgId:%d, QInfo:%p, query discarded since link is broken, %p", pVnode->vgId, *handle, pReadMsg->rpcMsg.handle); pRsp->code = TSDB_CODE_RPC_NETWORK_UNAVAIL; // NOTE: there two refcount, needs to kill twice // query has not been put into qhandle pool, kill it directly. - qKillQuery(pQInfo); + qKillQuery(*handle); qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true); return pRsp->code; } } else { assert(pQInfo == NULL); } - + if (handle != NULL) { + dnodePutItemIntoReadQueue(pVnode, *handle); + qReleaseQInfo(pVnode->qMgmt, (void**) &handle, false); + } vDebug("vgId:%d, QInfo:%p, dnode query msg disposed", vgId, pQInfo); } else { assert(pCont != NULL); - pQInfo = *(void**)(pCont); - handle = pCont; - code = TSDB_CODE_VND_ACTION_IN_PROGRESS; - - vDebug("vgId:%d, QInfo:%p, dnode query msg in progress", pVnode->vgId, pQInfo); - } - - if (pQInfo != NULL) { - qTableQuery(pQInfo); // do execute query - assert(handle != NULL); + handle = qAcquireQInfo(pVnode->qMgmt, (uint64_t) pCont); + if (handle == NULL) { + vWarn("QInfo:%p invalid qhandle in continuing exec query, conn:%p", (void*) pCont, pReadMsg->rpcMsg.handle); + code = TSDB_CODE_QRY_INVALID_QHANDLE; + } else { + vDebug("vgId:%d, QInfo:%p, dnode query msg in progress", pVnode->vgId, (void*) pCont); + code = TSDB_CODE_VND_ACTION_IN_PROGRESS; + qTableQuery(*handle); // do execute query + } qReleaseQInfo(pVnode->qMgmt, (void**) &handle, false); } - return code; } @@ -160,57 +156,64 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { SRspRet *pRet = &pReadMsg->rspRet; SRetrieveTableMsg *pRetrieve = pCont; - void **pQInfo = (void*) htobe64(pRetrieve->qhandle); + pRetrieve->qhandle = htobe64(pRetrieve->qhandle); pRetrieve->free = htons(pRetrieve->free); - vDebug("vgId:%d, QInfo:%p, retrieve msg is disposed", pVnode->vgId, *pQInfo); + vDebug("vgId:%d, QInfo:%p, retrieve msg is disposed", pVnode->vgId, *(void**) pRetrieve->qhandle); memset(pRet, 0, sizeof(SRspRet)); - int32_t ret = 0; - void** handle = qAcquireQInfo(pVnode->qMgmt, pQInfo); - if (handle == NULL || handle != pQInfo) { - ret = TSDB_CODE_QRY_INVALID_QHANDLE; + int32_t code = TSDB_CODE_SUCCESS; + void** handle = qAcquireQInfo(pVnode->qMgmt, pRetrieve->qhandle); + if (handle == NULL || (*handle) != (void*) pRetrieve->qhandle) { + code = TSDB_CODE_QRY_INVALID_QHANDLE; + vDebug("vgId:%d, invalid qhandle in fetch result, QInfo:%p", pVnode->vgId, (void*) pRetrieve->qhandle); + + pRet->rsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp)); + pRet->len = sizeof(SRetrieveTableRsp); + + memset(pRet->rsp, 0, sizeof(SRetrieveTableRsp)); + SRetrieveTableRsp* pRsp = pRet->rsp; + pRsp->numOfRows = 0; + pRsp->useconds = 0; + pRsp->completed = true; + + return code; } if (pRetrieve->free == 1) { - if (ret == TSDB_CODE_SUCCESS) { - vDebug("vgId:%d, QInfo:%p, retrieve msg received to kill query and free qhandle", pVnode->vgId, pQInfo); - qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true); + vDebug("vgId:%d, QInfo:%p, retrieve msg received to kill query and free qhandle", pVnode->vgId, *handle); + qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true); - pRet->rsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp)); - pRet->len = sizeof(SRetrieveTableRsp); + pRet->rsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp)); + pRet->len = sizeof(SRetrieveTableRsp); - memset(pRet->rsp, 0, sizeof(SRetrieveTableRsp)); - SRetrieveTableRsp* pRsp = pRet->rsp; - pRsp->numOfRows = 0; - pRsp->completed = true; - pRsp->useconds = 0; - } else { // todo handle error - qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true); - } - return ret; + memset(pRet->rsp, 0, sizeof(SRetrieveTableRsp)); + SRetrieveTableRsp* pRsp = pRet->rsp; + pRsp->numOfRows = 0; + pRsp->completed = true; + pRsp->useconds = 0; + + return code; } - int32_t code = qRetrieveQueryResultInfo(*pQInfo); - if (code != TSDB_CODE_SUCCESS || ret != TSDB_CODE_SUCCESS) { - //TODO + bool freeHandle = true; + code = qRetrieveQueryResultInfo(*handle); + if (code != TSDB_CODE_SUCCESS) { + //TODO handle malloc failure pRet->rsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp)); memset(pRet->rsp, 0, sizeof(SRetrieveTableRsp)); - - } else { - // todo check code and handle error in build result set - code = qDumpRetrieveResult(*pQInfo, (SRetrieveTableRsp **)&pRet->rsp, &pRet->len); - - if (qHasMoreResultsToRetrieve(*handle)) { - dnodePutItemIntoReadQueue(pVnode, handle); - pRet->qhandle = handle; - code = TSDB_CODE_SUCCESS; - } else { // no further execution invoked, release the ref to vnode - qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true); + } else { // if failed to dump result, free qhandle immediately + if ((code = qDumpRetrieveResult(*handle, (SRetrieveTableRsp **)&pRet->rsp, &pRet->len)) == TSDB_CODE_SUCCESS) { + if (qHasMoreResultsToRetrieve(*handle)) { + dnodePutItemIntoReadQueue(pVnode, *handle); + pRet->qhandle = *handle; + freeHandle = false; + } } } + qReleaseQInfo(pVnode->qMgmt, (void**) &handle, freeHandle); return code; } @@ -225,4 +228,4 @@ int32_t vnodeNotifyCurrentQhandle(void* handle, void* qhandle, int32_t vgId) { vDebug("QInfo:%p register qhandle to connect:%p", qhandle, handle); return rpcReportProgress(handle, (char*) killQueryMsg, sizeof(SRetrieveTableMsg)); -} \ No newline at end of file +} diff --git a/src/vnode/src/vnodeWrite.c b/src/vnode/src/vnodeWrite.c index 171557acb6..5ed5e747f2 100644 --- a/src/vnode/src/vnodeWrite.c +++ b/src/vnode/src/vnodeWrite.c @@ -49,19 +49,26 @@ int32_t vnodeProcessWrite(void *param1, int qtype, void *param2, void *item) { SVnodeObj *pVnode = (SVnodeObj *)param1; SWalHead *pHead = param2; - if (vnodeProcessWriteMsgFp[pHead->msgType] == NULL) + if (vnodeProcessWriteMsgFp[pHead->msgType] == NULL) { + vDebug("vgId:%d, msgType:%s not processed, no handle", pVnode->vgId, taosMsg[pHead->msgType]); return TSDB_CODE_VND_MSG_NOT_PROCESSED; + } if (!(pVnode->accessState & TSDB_VN_WRITE_ACCCESS)) { + vDebug("vgId:%d, msgType:%s not processed, no write auth", pVnode->vgId, taosMsg[pHead->msgType]); return TSDB_CODE_VND_NO_WRITE_AUTH; } if (pHead->version == 0) { // from client or CQ - if (pVnode->status != TAOS_VN_STATUS_READY) - return TSDB_CODE_VND_INVALID_VGROUP_ID; // it may be in deleting or closing state + if (pVnode->status != TAOS_VN_STATUS_READY) { + vDebug("vgId:%d, msgType:%s not processed, vnode status is %d", pVnode->vgId, taosMsg[pHead->msgType], pVnode->status); + return TSDB_CODE_VND_INVALID_STATUS; // it may be in deleting or closing state + } - if (pVnode->syncCfg.replica > 1 && pVnode->role != TAOS_SYNC_ROLE_MASTER) + if (pVnode->syncCfg.replica > 1 && pVnode->role != TAOS_SYNC_ROLE_MASTER) { + vDebug("vgId:%d, msgType:%s not processed, replica:%d role:%d", pVnode->vgId, taosMsg[pHead->msgType], pVnode->syncCfg.replica, pVnode->role); return TSDB_CODE_RPC_NOT_READY; + } // assign version pVnode->version++; @@ -89,21 +96,25 @@ int32_t vnodeProcessWrite(void *param1, int qtype, void *param2, void *item) { return syncCode; } +void vnodeConfirmForward(void *param, uint64_t version, int32_t code) { + SVnodeObj *pVnode = (SVnodeObj *)param; + syncConfirmForward(pVnode->sync, version, code); +} + static int32_t vnodeProcessSubmitMsg(SVnodeObj *pVnode, void *pCont, SRspRet *pRet) { int32_t code = TSDB_CODE_SUCCESS; - // save insert result into item - vTrace("vgId:%d, submit msg is processed", pVnode->vgId); - - pRet->len = sizeof(SShellSubmitRspMsg); - pRet->rsp = rpcMallocCont(pRet->len); - SShellSubmitRspMsg *pRsp = pRet->rsp; + + // save insert result into item + SShellSubmitRspMsg *pRsp = NULL; + if (pRet) { + pRet->len = sizeof(SShellSubmitRspMsg); + pRet->rsp = rpcMallocCont(pRet->len); + pRsp = pRet->rsp; + } + if (tsdbInsertData(pVnode->tsdb, pCont, pRsp) < 0) code = terrno; - pRsp->numOfFailedBlocks = 0; //TODO - //pRet->len += pRsp->numOfFailedBlocks * sizeof(SShellSubmitRspBlock); //TODO - pRsp->code = 0; - pRsp->numOfRows = htonl(1); return code; } @@ -158,7 +169,7 @@ static int32_t vnodeProcessDropStableMsg(SVnodeObj *pVnode, void *pCont, SRspRet } static int32_t vnodeProcessUpdateTagValMsg(SVnodeObj *pVnode, void *pCont, SRspRet *pRet) { - if (tsdbUpdateTagValue(pVnode->tsdb, (SUpdateTableTagValMsg *)pCont) < 0) { + if (tsdbUpdateTableTagValue(pVnode->tsdb, (SUpdateTableTagValMsg *)pCont) < 0) { return terrno; } return TSDB_CODE_SUCCESS; diff --git a/src/wal/src/walMain.c b/src/wal/src/walMain.c index e079653ab3..94a0fdc956 100644 --- a/src/wal/src/walMain.c +++ b/src/wal/src/walMain.c @@ -28,6 +28,7 @@ #include "taoserror.h" #include "twal.h" #include "tqueue.h" +#include "tfile.h" #define walPrefix "wal" @@ -180,7 +181,7 @@ int walWrite(void *handle, SWalHead *pHead) { taosCalcChecksumAppend(0, (uint8_t *)pHead, sizeof(SWalHead)); int contLen = pHead->len + sizeof(SWalHead); - if(write(pWal->fd, pHead, contLen) != contLen) { + if(twrite(pWal->fd, pHead, contLen) != contLen) { wError("wal:%s, failed to write(%s)", pWal->name, strerror(errno)); terrno = TAOS_SYSTEM_ERROR(errno); } else { @@ -325,7 +326,7 @@ static int walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp) { wDebug("wal:%s, start to restore", name); while (1) { - int ret = read(fd, pHead, sizeof(SWalHead)); + int ret = tread(fd, pHead, sizeof(SWalHead)); if ( ret == 0) break; if (ret != sizeof(SWalHead)) { @@ -340,7 +341,7 @@ static int walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp) { break; } - ret = read(fd, pHead->cont, pHead->len); + ret = tread(fd, pHead->cont, pHead->len); if ( ret != pHead->len) { wWarn("wal:%s, failed to read body, skip, len:%d ret:%d", name, pHead->len, ret); terrno = TAOS_SYSTEM_ERROR(errno); diff --git a/tests/pytest/alter/alter_table_crash.py b/tests/pytest/alter/alter_table_crash.py index aefe9ff26e..d1af022e35 100644 --- a/tests/pytest/alter/alter_table_crash.py +++ b/tests/pytest/alter/alter_table_crash.py @@ -78,5 +78,6 @@ class TDTestCase: tdSql.close() tdLog.success("%s successfully executed" % __file__) + tdCases.addWindows(__file__, TDTestCase()) tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/client/client.py b/tests/pytest/client/client.py index 21df7e6a86..6ac12c6795 100644 --- a/tests/pytest/client/client.py +++ b/tests/pytest/client/client.py @@ -40,6 +40,11 @@ class TDTestCase: ret = tdSql.query('select server_status() as result') tdSql.checkData(0, 0, 1) + ret = tdSql.query('show dnodes') + + ret = tdSql.execute('alter dnode "%s" debugFlag 135' % tdSql.getData(0,1)) + tdLog.info('alter dnode "%s" debugFlag 135 -> ret: %d' % (tdSql.getData(0, 1), ret)) + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/pytest/crash_gen.py b/tests/pytest/crash_gen.py index cc41fd5e7d..9af72af471 100755 --- a/tests/pytest/crash_gen.py +++ b/tests/pytest/crash_gen.py @@ -1,4 +1,4 @@ -#!/usr/bin/python3.7 +# -----!/usr/bin/python3.7 ################################################################### # Copyright (c) 2016 by TAOS Technologies, Inc. # All rights reserved. @@ -11,72 +11,83 @@ ################################################################### # -*- coding: utf-8 -*- -from __future__ import annotations # For type hinting before definition, ref: https://stackoverflow.com/questions/33533148/how-do-i-specify-that-the-return-type-of-a-method-is-the-same-as-the-class-itsel +# For type hinting before definition, ref: +# https://stackoverflow.com/questions/33533148/how-do-i-specify-that-the-return-type-of-a-method-is-the-same-as-the-class-itsel +from __future__ import annotations +import taos +import crash_gen +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.log import * +from queue import Queue, Empty +from typing import IO +from typing import Set +from typing import Dict +from typing import List +from requests.auth import HTTPBasicAuth +import textwrap +import datetime +import logging +import time +import random +import threading +import requests +import copy +import argparse +import getopt import sys import os +import io +import signal import traceback # Require Python 3 if sys.version_info[0] < 3: raise Exception("Must be using Python 3") -import getopt -import argparse -import copy -import threading -import random -import time -import logging -import datetime -import textwrap - -from typing import List -from typing import Dict -from typing import Set - -from util.log import * -from util.dnodes import * -from util.cases import * -from util.sql import * - -import crash_gen -import taos - -# Global variables, tried to keep a small number. +# Global variables, tried to keep a small number. # Command-line/Environment Configurations, will set a bit later # ConfigNameSpace = argparse.Namespace -gConfig = argparse.Namespace() # Dummy value, will be replaced later +gConfig = argparse.Namespace() # Dummy value, will be replaced later logger = None -def runThread(wt: WorkerThread): + +def runThread(wt: WorkerThread): wt.run() + class CrashGenError(Exception): def __init__(self, msg=None, errno=None): - self.msg = msg + self.msg = msg self.errno = errno - + def __str__(self): return self.msg + class WorkerThread: - def __init__(self, pool: ThreadPool, tid, - tc: ThreadCoordinator, - # te: TaskExecutor, - ): # note: main thread context! - # self._curStep = -1 + def __init__(self, pool: ThreadPool, tid, + tc: ThreadCoordinator, + # te: TaskExecutor, + ): # note: main thread context! + # self._curStep = -1 self._pool = pool - self._tid = tid - self._tc = tc # type: ThreadCoordinator + self._tid = tid + self._tc = tc # type: ThreadCoordinator # self.threadIdent = threading.get_ident() self._thread = threading.Thread(target=runThread, args=(self,)) self._stepGate = threading.Event() # Let us have a DB connection of our own - if ( gConfig.per_thread_db_connection ): # type: ignore - self._dbConn = DbConn() + if (gConfig.per_thread_db_connection): # type: ignore + # print("connector_type = {}".format(gConfig.connector_type)) + self._dbConn = DbConn.createNative() if ( + gConfig.connector_type == 'native') else DbConn.createRest() + + self._dbInUse = False # if "use db" was executed already def logDebug(self, msg): logger.debug(" TRD[{}] {}".format(self._tid, msg)) @@ -84,201 +95,296 @@ class WorkerThread: def logInfo(self, msg): logger.info(" TRD[{}] {}".format(self._tid, msg)) - + def dbInUse(self): + return self._dbInUse + + def useDb(self): + if (not self._dbInUse): + self.execSql("use db") + self._dbInUse = True + def getTaskExecutor(self): - return self._tc.getTaskExecutor() + return self._tc.getTaskExecutor() def start(self): self._thread.start() # AFTER the thread is recorded - def run(self): + def run(self): # initialization after thread starts, in the thread context # self.isSleeping = False logger.info("Starting to run thread: {}".format(self._tid)) - if ( gConfig.per_thread_db_connection ): # type: ignore + if (gConfig.per_thread_db_connection): # type: ignore + logger.debug("Worker thread openning database connection") self._dbConn.open() - self._doTaskLoop() - + self._doTaskLoop() + # clean up - if ( gConfig.per_thread_db_connection ): # type: ignore + if (gConfig.per_thread_db_connection): # type: ignore self._dbConn.close() - def _doTaskLoop(self) : + def _doTaskLoop(self): # while self._curStep < self._pool.maxSteps: # tc = ThreadCoordinator(None) - while True: - tc = self._tc # Thread Coordinator, the overall master + while True: + tc = self._tc # Thread Coordinator, the overall master tc.crossStepBarrier() # shared barrier first, INCLUDING the last one - logger.debug("[TRD] Worker thread [{}] exited barrier...".format(self._tid)) + logger.debug( + "[TRD] Worker thread [{}] exited barrier...".format( + self._tid)) self.crossStepGate() # then per-thread gate, after being tapped - logger.debug("[TRD] Worker thread [{}] exited step gate...".format(self._tid)) + logger.debug( + "[TRD] Worker thread [{}] exited step gate...".format( + self._tid)) if not self._tc.isRunning(): - logger.debug("[TRD] Thread Coordinator not running any more, worker thread now stopping...") + logger.debug( + "[TRD] Thread Coordinator not running any more, worker thread now stopping...") break - logger.debug("[TRD] Worker thread [{}] about to fetch task".format(self._tid)) + # Fetch a task from the Thread Coordinator + logger.debug( + "[TRD] Worker thread [{}] about to fetch task".format( + self._tid)) task = tc.fetchTask() - logger.debug("[TRD] Worker thread [{}] about to execute task: {}".format(self._tid, task.__class__.__name__)) + + # Execute such a task + logger.debug( + "[TRD] Worker thread [{}] about to execute task: {}".format( + self._tid, task.__class__.__name__)) task.execute(self) tc.saveExecutedTask(task) - logger.debug("[TRD] Worker thread [{}] finished executing task".format(self._tid)) - - def verifyThreadSelf(self): # ensure we are called by this own thread - if ( threading.get_ident() != self._thread.ident ): + logger.debug( + "[TRD] Worker thread [{}] finished executing task".format( + self._tid)) + + self._dbInUse = False # there may be changes between steps + + def verifyThreadSelf(self): # ensure we are called by this own thread + if (threading.get_ident() != self._thread.ident): raise RuntimeError("Unexpectly called from other threads") - def verifyThreadMain(self): # ensure we are called by the main thread - if ( threading.get_ident() != threading.main_thread().ident ): + def verifyThreadMain(self): # ensure we are called by the main thread + if (threading.get_ident() != threading.main_thread().ident): raise RuntimeError("Unexpectly called from other threads") def verifyThreadAlive(self): - if ( not self._thread.is_alive() ): + if (not self._thread.is_alive()): raise RuntimeError("Unexpected dead thread") # A gate is different from a barrier in that a thread needs to be "tapped" def crossStepGate(self): self.verifyThreadAlive() - self.verifyThreadSelf() # only allowed by ourselves - + self.verifyThreadSelf() # only allowed by ourselves + # Wait again at the "gate", waiting to be "tapped" - logger.debug("[TRD] Worker thread {} about to cross the step gate".format(self._tid)) - self._stepGate.wait() + logger.debug( + "[TRD] Worker thread {} about to cross the step gate".format( + self._tid)) + self._stepGate.wait() self._stepGate.clear() - + # self._curStep += 1 # off to a new step... - def tapStepGate(self): # give it a tap, release the thread waiting there + def tapStepGate(self): # give it a tap, release the thread waiting there self.verifyThreadAlive() - self.verifyThreadMain() # only allowed for main thread - - logger.debug("[TRD] Tapping worker thread {}".format(self._tid)) - self._stepGate.set() # wake up! - time.sleep(0) # let the released thread run a bit + self.verifyThreadMain() # only allowed for main thread - def execSql(self, sql): # TODO: expose DbConn directly - if ( gConfig.per_thread_db_connection ): - return self._dbConn.execute(sql) + logger.debug("[TRD] Tapping worker thread {}".format(self._tid)) + self._stepGate.set() # wake up! + time.sleep(0) # let the released thread run a bit + + def execSql(self, sql): # TODO: expose DbConn directly + if (gConfig.per_thread_db_connection): + return self._dbConn.execute(sql) else: return self._tc.getDbManager().getDbConn().execute(sql) + def querySql(self, sql): # TODO: expose DbConn directly + if (gConfig.per_thread_db_connection): + return self._dbConn.query(sql) + else: + return self._tc.getDbManager().getDbConn().query(sql) + + def getQueryResult(self): + if (gConfig.per_thread_db_connection): + return self._dbConn.getQueryResult() + else: + return self._tc.getDbManager().getDbConn().getQueryResult() + def getDbConn(self): - if ( gConfig.per_thread_db_connection ): - return self._dbConn + if (gConfig.per_thread_db_connection): + return self._dbConn else: return self._tc.getDbManager().getDbConn() # def querySql(self, sql): # not "execute", since we are out side the DB context # if ( gConfig.per_thread_db_connection ): - # return self._dbConn.query(sql) + # return self._dbConn.query(sql) # else: # return self._tc.getDbState().getDbConn().query(sql) +# The coordinator of all worker threads, mostly running in main thread + + class ThreadCoordinator: - def __init__(self, pool, dbManager): - self._curStep = -1 # first step is 0 + def __init__(self, pool: ThreadPool, dbManager): + self._curStep = -1 # first step is 0 self._pool = pool # self._wd = wd - self._te = None # prepare for every new step + self._te = None # prepare for every new step self._dbManager = dbManager - self._executedTasks: List[Task] = [] # in a given step - self._lock = threading.RLock() # sync access for a few things + self._executedTasks: List[Task] = [] # in a given step + self._lock = threading.RLock() # sync access for a few things - self._stepBarrier = threading.Barrier(self._pool.numThreads + 1) # one barrier for all threads + self._stepBarrier = threading.Barrier( + self._pool.numThreads + 1) # one barrier for all threads self._execStats = ExecutionStats() + self._runStatus = MainExec.STATUS_RUNNING def getTaskExecutor(self): return self._te - def getDbManager(self) -> DbManager : + def getDbManager(self) -> DbManager: return self._dbManager def crossStepBarrier(self): self._stepBarrier.wait() - def run(self): + def requestToStop(self): + self._runStatus = MainExec.STATUS_STOPPING + self._execStats.registerFailure("User Interruption") + + def run(self): self._pool.createAndStartThreads(self) # Coordinate all threads step by step - self._curStep = -1 # not started yet - maxSteps = gConfig.max_steps # type: ignore - self._execStats.startExec() # start the stop watch - failed = False - while(self._curStep < maxSteps-1 and not failed): # maxStep==10, last curStep should be 9 - if not gConfig.debug: - print(".", end="", flush=True) # print this only if we are not in debug mode + self._curStep = -1 # not started yet + maxSteps = gConfig.max_steps # type: ignore + self._execStats.startExec() # start the stop watch + transitionFailed = False + hasAbortedTask = False + while(self._curStep < maxSteps - 1 and + (not transitionFailed) and + (self._runStatus == MainExec.STATUS_RUNNING) and + (not hasAbortedTask)): # maxStep==10, last curStep should be 9 + + if not gConfig.debug: + # print this only if we are not in debug mode + print(".", end="", flush=True) logger.debug("[TRD] Main thread going to sleep") - # Now ready to enter a step - self.crossStepBarrier() # let other threads go past the pool barrier, but wait at the thread gate - self._stepBarrier.reset() # Other worker threads should now be at the "gate" + # Now main thread (that's us) is ready to enter a step + # let other threads go past the pool barrier, but wait at the + # thread gate + self.crossStepBarrier() + self._stepBarrier.reset() # Other worker threads should now be at the "gate" # At this point, all threads should be pass the overall "barrier" and before the per-thread "gate" - try: - self._dbManager.getStateMachine().transition(self._executedTasks) # at end of step, transiton the DB state - except taos.error.ProgrammingError as err: - if ( err.msg == 'network unavailable' ): # broken DB connection - logger.info("DB connection broken, execution failed") - traceback.print_stack() - failed = True - self._te = None # Not running any more - self._execStats.registerFailure("Broken DB Connection") - # continue # don't do that, need to tap all threads at end, and maybe signal them to stop - else: - raise - finally: - pass - - self.resetExecutedTasks() # clear the tasks after we are done + # We use this period to do house keeping work, when all worker + # threads are QUIET. + hasAbortedTask = False + for task in self._executedTasks: + if task.isAborted(): + print("Task aborted: {}".format(task)) + hasAbortedTask = True + break + + if hasAbortedTask: # do transition only if tasks are error free + self._execStats.registerFailure("Aborted Task Encountered") + else: + try: + sm = self._dbManager.getStateMachine() + logger.debug("[STT] starting transitions") + # at end of step, transiton the DB state + sm.transition(self._executedTasks) + logger.debug("[STT] transition ended") + # Due to limitation (or maybe not) of the Python library, + # we cannot share connections across threads + if sm.hasDatabase(): + for t in self._pool.threadList: + logger.debug("[DB] use db for all worker threads") + t.useDb() + # t.execSql("use db") # main thread executing "use + # db" on behalf of every worker thread + except taos.error.ProgrammingError as err: + if (err.msg == 'network unavailable'): # broken DB connection + logger.info("DB connection broken, execution failed") + traceback.print_stack() + transitionFailed = True + self._te = None # Not running any more + self._execStats.registerFailure("Broken DB Connection") + # continue # don't do that, need to tap all threads at + # end, and maybe signal them to stop + else: + raise + # finally: + # pass + + self.resetExecutedTasks() # clear the tasks after we are done # Get ready for next step logger.debug("<-- Step {} finished".format(self._curStep)) - self._curStep += 1 # we are about to get into next step. TODO: race condition here! - logger.debug("\r\n--> Step {} starts with main thread waking up".format(self._curStep)) # Now not all threads had time to go to sleep + self._curStep += 1 # we are about to get into next step. TODO: race condition here! + # Now not all threads had time to go to sleep + logger.debug( + "\r\n\n--> Step {} starts with main thread waking up".format(self._curStep)) # A new TE for the new step - if not failed: # only if not failed + if not transitionFailed: # only if not failed self._te = TaskExecutor(self._curStep) - logger.debug("[TRD] Main thread waking up at step {}, tapping worker threads".format(self._curStep)) # Now not all threads had time to go to sleep + logger.debug( + "[TRD] Main thread waking up at step {}, tapping worker threads".format( + self._curStep)) # Now not all threads had time to go to sleep + # Worker threads will wake up at this point, and each execute it's + # own task self.tapAllThreads() logger.debug("Main thread ready to finish up...") - if not failed: # only in regular situations - self.crossStepBarrier() # Cross it one last time, after all threads finish + if not transitionFailed: # only in regular situations + self.crossStepBarrier() # Cross it one last time, after all threads finish self._stepBarrier.reset() logger.debug("Main thread in exclusive zone...") - self._te = None # No more executor, time to end + self._te = None # No more executor, time to end logger.debug("Main thread tapping all threads one last time...") - self.tapAllThreads() # Let the threads run one last time + self.tapAllThreads() # Let the threads run one last time logger.debug("Main thread joining all threads") - self._pool.joinAll() # Get all threads to finish - logger.info("All worker thread finished") + self._pool.joinAll() # Get all threads to finish + logger.info("\nAll worker threads finished") self._execStats.endExec() - def logStats(self): - self._execStats.logStats() + def printStats(self): + self._execStats.printStats() - def tapAllThreads(self): # in a deterministic manner + def isFailed(self): + return self._execStats.isFailed() + + def getExecStats(self): + return self._execStats + + def tapAllThreads(self): # in a deterministic manner wakeSeq = [] - for i in range(self._pool.numThreads): # generate a random sequence - if Dice.throw(2) == 1 : + for i in range(self._pool.numThreads): # generate a random sequence + if Dice.throw(2) == 1: wakeSeq.append(i) else: wakeSeq.insert(0, i) - logger.debug("[TRD] Main thread waking up worker thread: {}".format(str(wakeSeq))) + logger.debug( + "[TRD] Main thread waking up worker threads: {}".format( + str(wakeSeq))) # TODO: set dice seed to a deterministic value for i in wakeSeq: - self._pool.threadList[i].tapStepGate() # TODO: maybe a bit too deep?! - time.sleep(0) # yield + # TODO: maybe a bit too deep?! + self._pool.threadList[i].tapStepGate() + time.sleep(0) # yield def isRunning(self): - return self._te != None + return self._te is not None - def fetchTask(self) -> Task : - if ( not self.isRunning() ): # no task + def fetchTask(self) -> Task: + if (not self.isRunning()): # no task raise RuntimeError("Cannot fetch task when not running") # return self._wd.pickTask() # Alternatively, let's ask the DbState for the appropriate task @@ -289,31 +395,36 @@ class ThreadCoordinator: # logger.debug(" (dice:{}/{}) ".format(i, nTasks)) # # return copy.copy(tasks[i]) # Needs a fresh copy, to save execution results, etc. # return tasks[i].clone() # TODO: still necessary? - taskType = self.getDbManager().getStateMachine().pickTaskType() # pick a task type for current state - return taskType(self.getDbManager(), self._execStats) # create a task from it + # pick a task type for current state + taskType = self.getDbManager().getStateMachine().pickTaskType() + return taskType( + self.getDbManager(), + self._execStats) # create a task from it def resetExecutedTasks(self): - self._executedTasks = [] # should be under single thread + self._executedTasks = [] # should be under single thread def saveExecutedTask(self, task): with self._lock: self._executedTasks.append(task) # We define a class to run a number of threads in locking steps. + + class ThreadPool: def __init__(self, numThreads, maxSteps): self.numThreads = numThreads self.maxSteps = maxSteps # Internal class variables self.curStep = 0 - self.threadList = [] - + self.threadList = [] # type: List[WorkerThread] + # starting to run all the threads, in locking steps def createAndStartThreads(self, tc: ThreadCoordinator): - for tid in range(0, self.numThreads): # Create the threads - workerThread = WorkerThread(self, tid, tc) + for tid in range(0, self.numThreads): # Create the threads + workerThread = WorkerThread(self, tid, tc) self.threadList.append(workerThread) - workerThread.start() # start, but should block immediately before step 0 + workerThread.start() # start, but should block immediately before step 0 def joinAll(self): for workerThread in self.threadList: @@ -322,21 +433,24 @@ class ThreadPool: # A queue of continguous POSITIVE integers, used by DbManager to generate continuous numbers # for new table names + + class LinearQueue(): def __init__(self): self.firstIndex = 1 # 1st ever element self.lastIndex = 0 - self._lock = threading.RLock() # our functions may call each other - self.inUse = set() # the indexes that are in use right now + self._lock = threading.RLock() # our functions may call each other + self.inUse = set() # the indexes that are in use right now def toText(self): - return "[{}..{}], in use: {}".format(self.firstIndex, self.lastIndex, self.inUse) + return "[{}..{}], in use: {}".format( + self.firstIndex, self.lastIndex, self.inUse) # Push (add new element, largest) to the tail, and mark it in use - def push(self): + def push(self): with self._lock: - # if ( self.isEmpty() ): - # self.lastIndex = self.firstIndex + # if ( self.isEmpty() ): + # self.lastIndex = self.firstIndex # return self.firstIndex # Otherwise we have something self.lastIndex += 1 @@ -346,12 +460,12 @@ class LinearQueue(): def pop(self): with self._lock: - if ( self.isEmpty() ): - # raise RuntimeError("Cannot pop an empty queue") - return False # TODO: None? - + if (self.isEmpty()): + # raise RuntimeError("Cannot pop an empty queue") + return False # TODO: None? + index = self.firstIndex - if ( index in self.inUse ): + if (index in self.inUse): return False self.firstIndex += 1 @@ -369,117 +483,322 @@ class LinearQueue(): def allocate(self, i): with self._lock: # logger.debug("LQ allocating item {}".format(i)) - if ( i in self.inUse ): - raise RuntimeError("Cannot re-use same index in queue: {}".format(i)) + if (i in self.inUse): + raise RuntimeError( + "Cannot re-use same index in queue: {}".format(i)) self.inUse.add(i) def release(self, i): with self._lock: # logger.debug("LQ releasing item {}".format(i)) - self.inUse.remove(i) # KeyError possible, TODO: why? + self.inUse.remove(i) # KeyError possible, TODO: why? def size(self): return self.lastIndex + 1 - self.firstIndex def pickAndAllocate(self): - if ( self.isEmpty() ): + if (self.isEmpty()): return None with self._lock: - cnt = 0 # counting the interations + cnt = 0 # counting the interations while True: cnt += 1 - if ( cnt > self.size()*10 ): # 10x iteration already + if (cnt > self.size() * 10): # 10x iteration already # raise RuntimeError("Failed to allocate LinearQueue element") return None - ret = Dice.throwRange(self.firstIndex, self.lastIndex+1) - if ( not ret in self.inUse ): + ret = Dice.throwRange(self.firstIndex, self.lastIndex + 1) + if (ret not in self.inUse): self.allocate(ret) return ret + class DbConn: + TYPE_NATIVE = "native-c" + TYPE_REST = "rest-api" + TYPE_INVALID = "invalid" + + @classmethod + def create(cls, connType): + if connType == cls.TYPE_NATIVE: + return DbConnNative() + elif connType == cls.TYPE_REST: + return DbConnRest() + else: + raise RuntimeError( + "Unexpected connection type: {}".format(connType)) + + @classmethod + def createNative(cls): + return cls.create(cls.TYPE_NATIVE) + + @classmethod + def createRest(cls): + return cls.create(cls.TYPE_REST) + def __init__(self): - self._conn = None - self._cursor = None self.isOpen = False - - def open(self): # Open connection - if ( self.isOpen ): + self._type = self.TYPE_INVALID + + def open(self): + if (self.isOpen): raise RuntimeError("Cannot re-open an existing DB connection") - cfgPath = "../../build/test/cfg" - self._conn = taos.connect(host="127.0.0.1", config=cfgPath) # TODO: make configurable + # below implemented by child classes + self.openByType() + + logger.debug( + "[DB] data connection opened, type = {}".format( + self._type)) + self.isOpen = True + + def resetDb(self): # reset the whole database, etc. + if (not self.isOpen): + raise RuntimeError( + "Cannot reset database until connection is open") + # self._tdSql.prepare() # Recreate database, etc. + + self.execute('drop database if exists db') + logger.debug("Resetting DB, dropped database") + # self._cursor.execute('create database db') + # self._cursor.execute('use db') + # tdSql.execute('show databases') + + def queryScalar(self, sql) -> int: + return self._queryAny(sql) + + def queryString(self, sql) -> str: + return self._queryAny(sql) + + def _queryAny(self, sql): # actual query result as an int + if (not self.isOpen): + raise RuntimeError( + "Cannot query database until connection is open") + nRows = self.query(sql) + if nRows != 1: + raise RuntimeError( + "Unexpected result for query: {}, rows = {}".format( + sql, nRows)) + if self.getResultRows() != 1 or self.getResultCols() != 1: + raise RuntimeError( + "Unexpected result set for query: {}".format(sql)) + return self.getQueryResult()[0][0] + + def execute(self, sql): + raise RuntimeError("Unexpected execution, should be overriden") + + def openByType(self): + raise RuntimeError("Unexpected execution, should be overriden") + + def getQueryResult(self): + raise RuntimeError("Unexpected execution, should be overriden") + + def getResultRows(self): + raise RuntimeError("Unexpected execution, should be overriden") + + def getResultCols(self): + raise RuntimeError("Unexpected execution, should be overriden") + +# Sample: curl -u root:taosdata -d "show databases" localhost:6020/rest/sql + + +class DbConnRest(DbConn): + def __init__(self): + super().__init__() + self._type = self.TYPE_REST + self._url = "http://localhost:6020/rest/sql" # fixed for now + self._result = None + + def openByType(self): # Open connection + pass # do nothing, always open + + def close(self): + if (not self.isOpen): + raise RuntimeError( + "Cannot clean up database until connection is open") + # Do nothing for REST + logger.debug("[DB] REST Database connection closed") + self.isOpen = False + + def _doSql(self, sql): + r = requests.post(self._url, + data=sql, + auth=HTTPBasicAuth('root', 'taosdata')) + rj = r.json() + # Sanity check for the "Json Result" + if ('status' not in rj): + raise RuntimeError("No status in REST response") + + if rj['status'] == 'error': # clearly reported error + if ('code' not in rj): # error without code + raise RuntimeError("REST error return without code") + errno = rj['code'] # May need to massage this in the future + # print("Raising programming error with REST return: {}".format(rj)) + raise taos.error.ProgrammingError( + rj['desc'], errno) # todo: check existance of 'desc' + + if rj['status'] != 'succ': # better be this + raise RuntimeError( + "Unexpected REST return status: {}".format( + rj['status'])) + + nRows = rj['rows'] if ('rows' in rj) else 0 + self._result = rj + return nRows + + def execute(self, sql): + if (not self.isOpen): + raise RuntimeError( + "Cannot execute database commands until connection is open") + logger.debug("[SQL-REST] Executing SQL: {}".format(sql)) + nRows = self._doSql(sql) + logger.debug( + "[SQL-REST] Execution Result, nRows = {}, SQL = {}".format(nRows, sql)) + return nRows + + def query(self, sql): # return rows affected + return self.execute(sql) + + def getQueryResult(self): + return self._result['data'] + + def getResultRows(self): + print(self._result) + raise RuntimeError("TBD") + # return self._tdSql.queryRows + + def getResultCols(self): + print(self._result) + raise RuntimeError("TBD") + + # Duplicate code from TDMySQL, TODO: merge all this into DbConnNative + + +class MyTDSql: + def __init__(self): + self.queryRows = 0 + self.queryCols = 0 + self.affectedRows = 0 + + def init(self, cursor, log=True): + self.cursor = cursor + # if (log): + # caller = inspect.getframeinfo(inspect.stack()[1][0]) + # self.cursor.log(caller.filename + ".sql") + + def close(self): + self.cursor.close() + + def query(self, sql): + self.sql = sql + try: + self.cursor.execute(sql) + self.queryResult = self.cursor.fetchall() + self.queryRows = len(self.queryResult) + self.queryCols = len(self.cursor.description) + except Exception as e: + # caller = inspect.getframeinfo(inspect.stack()[1][0]) + # args = (caller.filename, caller.lineno, sql, repr(e)) + # tdLog.exit("%s(%d) failed: sql:%s, %s" % args) + raise + return self.queryRows + + def execute(self, sql): + self.sql = sql + try: + self.affectedRows = self.cursor.execute(sql) + except Exception as e: + # caller = inspect.getframeinfo(inspect.stack()[1][0]) + # args = (caller.filename, caller.lineno, sql, repr(e)) + # tdLog.exit("%s(%d) failed: sql:%s, %s" % args) + raise + return self.affectedRows + + +class DbConnNative(DbConn): + def __init__(self): + super().__init__() + self._type = self.TYPE_REST + self._conn = None + self._cursor = None + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("communit")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def openByType(self): # Open connection + cfgPath = self.getBuildPath() + "/test/cfg" + self._conn = taos.connect( + host="127.0.0.1", + config=cfgPath) # TODO: make configurable self._cursor = self._conn.cursor() # Get the connection/cursor ready self._cursor.execute('reset query cache') - # self._cursor.execute('use db') # note we do this in _findCurrenState + # self._cursor.execute('use db') # do this at the beginning of every + # step # Open connection - self._tdSql = TDSql() + self._tdSql = MyTDSql() self._tdSql.init(self._cursor) - self.isOpen = True - - def resetDb(self): # reset the whole database, etc. - if ( not self.isOpen ): - raise RuntimeError("Cannot reset database until connection is open") - # self._tdSql.prepare() # Recreate database, etc. - - self._cursor.execute('drop database if exists db') - logger.debug("Resetting DB, dropped database") - # self._cursor.execute('create database db') - # self._cursor.execute('use db') - - # tdSql.execute('show databases') def close(self): - if ( not self.isOpen ): - raise RuntimeError("Cannot clean up database until connection is open") + if (not self.isOpen): + raise RuntimeError( + "Cannot clean up database until connection is open") self._tdSql.close() + logger.debug("[DB] Database connection closed") self.isOpen = False - def execute(self, sql): - if ( not self.isOpen ): - raise RuntimeError("Cannot execute database commands until connection is open") + def execute(self, sql): + if (not self.isOpen): + raise RuntimeError( + "Cannot execute database commands until connection is open") logger.debug("[SQL] Executing SQL: {}".format(sql)) nRows = self._tdSql.execute(sql) - logger.debug("[SQL] Execution Result, nRows = {}, SQL = {}".format(nRows, sql)) + logger.debug( + "[SQL] Execution Result, nRows = {}, SQL = {}".format( + nRows, sql)) return nRows - def query(self, sql) : # return rows affected - if ( not self.isOpen ): - raise RuntimeError("Cannot query database until connection is open") + def query(self, sql): # return rows affected + if (not self.isOpen): + raise RuntimeError( + "Cannot query database until connection is open") logger.debug("[SQL] Executing SQL: {}".format(sql)) nRows = self._tdSql.query(sql) - logger.debug("[SQL] Execution Result, nRows = {}, SQL = {}".format(nRows, sql)) + logger.debug( + "[SQL] Query Result, nRows = {}, SQL = {}".format( + nRows, sql)) return nRows # results are in: return self._tdSql.queryResult def getQueryResult(self): return self._tdSql.queryResult - def _queryAny(self, sql) : # actual query result as an int - if ( not self.isOpen ): - raise RuntimeError("Cannot query database until connection is open") - tSql = self._tdSql - nRows = tSql.query(sql) - if nRows != 1 : - raise RuntimeError("Unexpected result for query: {}, rows = {}".format(sql, nRows)) - if tSql.queryRows != 1 or tSql.queryCols != 1: - raise RuntimeError("Unexpected result set for query: {}".format(sql)) - return tSql.queryResult[0][0] + def getResultRows(self): + return self._tdSql.queryRows + + def getResultCols(self): + return self._tdSql.queryCols - def queryScalar(self, sql) -> int : - return self._queryAny(sql) - def queryString(self, sql) -> str : - return self._queryAny(sql) - class AnyState: - STATE_INVALID = -1 - STATE_EMPTY = 0 # nothing there, no even a DB - STATE_DB_ONLY = 1 # we have a DB, but nothing else + STATE_INVALID = -1 + STATE_EMPTY = 0 # nothing there, no even a DB + STATE_DB_ONLY = 1 # we have a DB, but nothing else STATE_TABLE_ONLY = 2 # we have a table, but totally empty - STATE_HAS_DATA = 3 # we have some data in the table + STATE_HAS_DATA = 3 # we have some data in the table _stateNames = ["Invalid", "Empty", "DB_Only", "Table_Only", "Has_Data"] STATE_VAL_IDX = 0 @@ -494,7 +813,8 @@ class AnyState: self._info = self.getInfo() def __str__(self): - return self._stateNames[self._info[self.STATE_VAL_IDX] + 1] # -1 hack to accomodate the STATE_INVALID case + # -1 hack to accomodate the STATE_INVALID case + return self._stateNames[self._info[self.STATE_VAL_IDX] + 1] def getInfo(self): raise RuntimeError("Must be overriden by child classes") @@ -505,7 +825,9 @@ class AnyState: elif isinstance(other, AnyState): return self.getValIndex() == other.getValIndex() else: - raise RuntimeError("Unexpected comparison, type = {}".format(type(other))) + raise RuntimeError( + "Unexpected comparison, type = {}".format( + type(other))) def verifyTasksToState(self, tasks, newState): raise RuntimeError("Must be overriden by child classes") @@ -515,55 +837,65 @@ class AnyState: def getValue(self): return self._info[self.STATE_VAL_IDX] + def canCreateDb(self): return self._info[self.CAN_CREATE_DB] + def canDropDb(self): return self._info[self.CAN_DROP_DB] + def canCreateFixedSuperTable(self): return self._info[self.CAN_CREATE_FIXED_SUPER_TABLE] + def canDropFixedSuperTable(self): return self._info[self.CAN_DROP_FIXED_SUPER_TABLE] + def canAddData(self): return self._info[self.CAN_ADD_DATA] + def canReadData(self): return self._info[self.CAN_READ_DATA] def assertAtMostOneSuccess(self, tasks, cls): sCnt = 0 - for task in tasks : + for task in tasks: if not isinstance(task, cls): continue if task.isSuccess(): # task.logDebug("Task success found") sCnt += 1 - if ( sCnt >= 2 ): - raise RuntimeError("Unexpected more than 1 success with task: {}".format(cls)) + if (sCnt >= 2): + raise RuntimeError( + "Unexpected more than 1 success with task: {}".format(cls)) def assertIfExistThenSuccess(self, tasks, cls): sCnt = 0 exists = False - for task in tasks : + for task in tasks: if not isinstance(task, cls): continue - exists = True # we have a valid instance + exists = True # we have a valid instance if task.isSuccess(): sCnt += 1 - if ( exists and sCnt <= 0 ): - raise RuntimeError("Unexpected zero success for task: {}".format(cls)) + if (exists and sCnt <= 0): + raise RuntimeError( + "Unexpected zero success for task: {}".format(cls)) def assertNoTask(self, tasks, cls): - for task in tasks : + for task in tasks: if isinstance(task, cls): - raise CrashGenError("This task: {}, is not expected to be present, given the success/failure of others".format(cls.__name__)) + raise CrashGenError( + "This task: {}, is not expected to be present, given the success/failure of others".format(cls.__name__)) def assertNoSuccess(self, tasks, cls): - for task in tasks : + for task in tasks: if isinstance(task, cls): if task.isSuccess(): - raise RuntimeError("Unexpected successful task: {}".format(cls)) + raise RuntimeError( + "Unexpected successful task: {}".format(cls)) def hasSuccess(self, tasks, cls): - for task in tasks : + for task in tasks: if not isinstance(task, cls): continue if task.isSuccess(): @@ -571,35 +903,40 @@ class AnyState: return False def hasTask(self, tasks, cls): - for task in tasks : + for task in tasks: if isinstance(task, cls): return True return False + class StateInvalid(AnyState): def getInfo(self): return [ self.STATE_INVALID, - False, False, # can create/drop Db - False, False, # can create/drop fixed table - False, False, # can insert/read data with fixed table + False, False, # can create/drop Db + False, False, # can create/drop fixed table + False, False, # can insert/read data with fixed table ] # def verifyTasksToState(self, tasks, newState): + class StateEmpty(AnyState): def getInfo(self): return [ self.STATE_EMPTY, - True, False, # can create/drop Db - False, False, # can create/drop fixed table - False, False, # can insert/read data with fixed table + True, False, # can create/drop Db + False, False, # can create/drop fixed table + False, False, # can insert/read data with fixed table ] - def verifyTasksToState(self, tasks, newState): - if ( self.hasSuccess(tasks, TaskCreateDb) ): # at EMPTY, if there's succes in creating DB - if ( not self.hasTask(tasks, TaskDropDb) ) : # and no drop_db tasks - self.assertAtMostOneSuccess(tasks, TaskCreateDb) # we must have at most one. TODO: compare numbers + def verifyTasksToState(self, tasks, newState): + if (self.hasSuccess(tasks, TaskCreateDb) + ): # at EMPTY, if there's succes in creating DB + if (not self.hasTask(tasks, TaskDropDb)): # and no drop_db tasks + # we must have at most one. TODO: compare numbers + self.assertAtMostOneSuccess(tasks, TaskCreateDb) + class StateDbOnly(AnyState): def getInfo(self): @@ -611,32 +948,34 @@ class StateDbOnly(AnyState): ] def verifyTasksToState(self, tasks, newState): - if ( not self.hasTask(tasks, TaskCreateDb) ): - self.assertAtMostOneSuccess(tasks, TaskDropDb) # only if we don't create any more + if (not self.hasTask(tasks, TaskCreateDb)): + # only if we don't create any more + self.assertAtMostOneSuccess(tasks, TaskDropDb) self.assertIfExistThenSuccess(tasks, TaskDropDb) # self.assertAtMostOneSuccess(tasks, CreateFixedTableTask) # not true in massively parrallel cases # Nothing to be said about adding data task # if ( self.hasSuccess(tasks, DropDbTask) ): # dropped the DB - # self.assertHasTask(tasks, DropDbTask) # implied by hasSuccess - # self.assertAtMostOneSuccess(tasks, DropDbTask) - # self._state = self.STATE_EMPTY - if ( self.hasSuccess(tasks, TaskCreateSuperTable) ): # did not drop db, create table success - # self.assertHasTask(tasks, CreateFixedTableTask) # tried to create table - if ( not self.hasTask(tasks, TaskDropSuperTable) ): - self.assertAtMostOneSuccess(tasks, TaskCreateSuperTable) # at most 1 attempt is successful, if we don't drop anything - # self.assertNoTask(tasks, DropDbTask) # should have have tried - # if ( not self.hasSuccess(tasks, AddFixedDataTask) ): # just created table, no data yet - # # can't say there's add-data attempts, since they may all fail - # self._state = self.STATE_TABLE_ONLY - # else: - # self._state = self.STATE_HAS_DATA + # self.assertHasTask(tasks, DropDbTask) # implied by hasSuccess + # self.assertAtMostOneSuccess(tasks, DropDbTask) + # self._state = self.STATE_EMPTY + # if ( self.hasSuccess(tasks, TaskCreateSuperTable) ): # did not drop db, create table success + # # self.assertHasTask(tasks, CreateFixedTableTask) # tried to create table + # if ( not self.hasTask(tasks, TaskDropSuperTable) ): + # self.assertAtMostOneSuccess(tasks, TaskCreateSuperTable) # at most 1 attempt is successful, if we don't drop anything + # self.assertNoTask(tasks, DropDbTask) # should have have tried + # if ( not self.hasSuccess(tasks, AddFixedDataTask) ): # just created table, no data yet + # # can't say there's add-data attempts, since they may all fail + # self._state = self.STATE_TABLE_ONLY + # else: + # self._state = self.STATE_HAS_DATA # What about AddFixedData? # elif ( self.hasSuccess(tasks, AddFixedDataTask) ): # self._state = self.STATE_HAS_DATA # else: # no success in dropping db tasks, no success in create fixed table? read data should also fail - # # raise RuntimeError("Unexpected no-success scenario") # We might just landed all failure tasks, + # # raise RuntimeError("Unexpected no-success scenario") # We might just landed all failure tasks, # self._state = self.STATE_DB_ONLY # no change + class StateSuperTableOnly(AnyState): def getInfo(self): return [ @@ -647,8 +986,12 @@ class StateSuperTableOnly(AnyState): ] def verifyTasksToState(self, tasks, newState): - if ( self.hasSuccess(tasks, TaskDropSuperTable) ): # we are able to drop the table - self.assertAtMostOneSuccess(tasks, TaskDropSuperTable) + if (self.hasSuccess(tasks, TaskDropSuperTable) + ): # we are able to drop the table + #self.assertAtMostOneSuccess(tasks, TaskDropSuperTable) + # we must have had recreted it + self.hasSuccess(tasks, TaskCreateSuperTable) + # self._state = self.STATE_DB_ONLY # elif ( self.hasSuccess(tasks, AddFixedDataTask) ): # no success dropping the table, but added data # self.assertNoTask(tasks, DropFixedTableTask) # not true in massively parrallel cases @@ -661,6 +1004,7 @@ class StateSuperTableOnly(AnyState): # raise RuntimeError("Unexpected no-success scenarios") # TODO: need to revamp!! + class StateHasData(AnyState): def getInfo(self): return [ @@ -671,165 +1015,221 @@ class StateHasData(AnyState): ] def verifyTasksToState(self, tasks, newState): - if ( newState.equals(AnyState.STATE_EMPTY) ): + if (newState.equals(AnyState.STATE_EMPTY)): self.hasSuccess(tasks, TaskDropDb) - if ( not self.hasTask(tasks, TaskCreateDb) ) : - self.assertAtMostOneSuccess(tasks, TaskDropDb) # TODO: dicy - elif ( newState.equals(AnyState.STATE_DB_ONLY) ): # in DB only - if ( not self.hasTask(tasks, TaskCreateDb)): # without a create_db task - self.assertNoTask(tasks, TaskDropDb) # we must have drop_db task + if (not self.hasTask(tasks, TaskCreateDb)): + self.assertAtMostOneSuccess(tasks, TaskDropDb) # TODO: dicy + elif (newState.equals(AnyState.STATE_DB_ONLY)): # in DB only + if (not self.hasTask(tasks, TaskCreateDb) + ): # without a create_db task + # we must have drop_db task + self.assertNoTask(tasks, TaskDropDb) self.hasSuccess(tasks, TaskDropSuperTable) # self.assertAtMostOneSuccess(tasks, DropFixedSuperTableTask) # TODO: dicy - elif ( newState.equals(AnyState.STATE_TABLE_ONLY) ): # data deleted - self.assertNoTask(tasks, TaskDropDb) - self.assertNoTask(tasks, TaskDropSuperTable) - self.assertNoTask(tasks, TaskAddData) + # elif ( newState.equals(AnyState.STATE_TABLE_ONLY) ): # data deleted + # self.assertNoTask(tasks, TaskDropDb) + # self.assertNoTask(tasks, TaskDropSuperTable) + # self.assertNoTask(tasks, TaskAddData) # self.hasSuccess(tasks, DeleteDataTasks) - else: # should be STATE_HAS_DATA - self.assertNoTask(tasks, TaskDropDb) - if (not self.hasTask(tasks, TaskCreateSuperTable)) : # if we didn't create the table - self.assertNoTask(tasks, TaskDropSuperTable) # we should not have a task that drops it + else: # should be STATE_HAS_DATA + if (not self.hasTask(tasks, TaskCreateDb) + ): # only if we didn't create one + # we shouldn't have dropped it + self.assertNoTask(tasks, TaskDropDb) + if (not self.hasTask(tasks, TaskCreateSuperTable) + ): # if we didn't create the table + # we should not have a task that drops it + self.assertNoTask(tasks, TaskDropSuperTable) # self.assertIfExistThenSuccess(tasks, ReadFixedDataTask) -class StateMechine : + +class StateMechine: def __init__(self, dbConn): self._dbConn = dbConn - self._curState = self._findCurrentState() # starting state - self._stateWeights = [1,3,5,15] # transitition target probabilities, indexed with value of STATE_EMPTY, STATE_DB_ONLY, etc. - + self._curState = self._findCurrentState() # starting state + # transitition target probabilities, indexed with value of STATE_EMPTY, + # STATE_DB_ONLY, etc. + self._stateWeights = [1, 3, 5, 15] + def getCurrentState(self): return self._curState + def hasDatabase(self): + return self._curState.canDropDb() # ha, can drop DB means it has one + # May be slow, use cautionsly... - def getTaskTypes(self): # those that can run (directly/indirectly) from the current state - allTaskClasses = StateTransitionTask.__subclasses__() # all state transition tasks + def getTaskTypes(self): # those that can run (directly/indirectly) from the current state + def typesToStrings(types): + ss = [] + for t in types: + ss.append(t.__name__) + return ss + + allTaskClasses = StateTransitionTask.__subclasses__() # all state transition tasks firstTaskTypes = [] for tc in allTaskClasses: - # t = tc(self) # create task object + # t = tc(self) # create task object if tc.canBeginFrom(self._curState): firstTaskTypes.append(tc) - # now we have all the tasks that can begin directly from the current state, let's figure out the INDIRECT ones - taskTypes = firstTaskTypes.copy() # have to have these - for task1 in firstTaskTypes: # each task type gathered so far - endState = task1.getEndState() # figure the end state - if endState == None: # does not change end state - continue # no use, do nothing - for tc in allTaskClasses: # what task can further begin from there? + # now we have all the tasks that can begin directly from the current + # state, let's figure out the INDIRECT ones + taskTypes = firstTaskTypes.copy() # have to have these + for task1 in firstTaskTypes: # each task type gathered so far + endState = task1.getEndState() # figure the end state + if endState is None: # does not change end state + continue # no use, do nothing + for tc in allTaskClasses: # what task can further begin from there? if tc.canBeginFrom(endState) and (tc not in firstTaskTypes): - taskTypes.append(tc) # gather it + taskTypes.append(tc) # gather it if len(taskTypes) <= 0: - raise RuntimeError("No suitable task types found for state: {}".format(self._curState)) - logger.debug("[OPS] Tasks found for state {}: {}".format(self._curState, taskTypes)) + raise RuntimeError( + "No suitable task types found for state: {}".format( + self._curState)) + logger.debug( + "[OPS] Tasks found for state {}: {}".format( + self._curState, + typesToStrings(taskTypes))) return taskTypes def _findCurrentState(self): dbc = self._dbConn - ts = time.time() # we use this to debug how fast/slow it is to do the various queries to find the current DB state - if dbc.query("show databases") == 0 : # no database?! + ts = time.time() # we use this to debug how fast/slow it is to do the various queries to find the current DB state + if dbc.query("show databases") == 0: # no database?! # logger.debug("Found EMPTY state") - logger.debug("[STT] empty database found, between {} and {}".format(ts, time.time())) + logger.debug( + "[STT] empty database found, between {} and {}".format( + ts, time.time())) return StateEmpty() - dbc.execute("use db") # did not do this when openning connection - if dbc.query("show tables") == 0 : # no tables + # did not do this when openning connection, and this is NOT the worker + # thread, which does this on their own + dbc.execute("use db") + if dbc.query("show tables") == 0: # no tables # logger.debug("Found DB ONLY state") - logger.debug("[STT] DB_ONLY found, between {} and {}".format(ts, time.time())) + logger.debug( + "[STT] DB_ONLY found, between {} and {}".format( + ts, time.time())) return StateDbOnly() - if dbc.query("SELECT * FROM db.{}".format(DbManager.getFixedSuperTableName()) ) == 0 : # no regular tables + if dbc.query("SELECT * FROM db.{}".format(DbManager.getFixedSuperTableName()) + ) == 0: # no regular tables # logger.debug("Found TABLE_ONLY state") - logger.debug("[STT] SUPER_TABLE_ONLY found, between {} and {}".format(ts, time.time())) + logger.debug( + "[STT] SUPER_TABLE_ONLY found, between {} and {}".format( + ts, time.time())) return StateSuperTableOnly() - else: # has actual tables + else: # has actual tables # logger.debug("Found HAS_DATA state") - logger.debug("[STT] HAS_DATA found, between {} and {}".format(ts, time.time())) + logger.debug( + "[STT] HAS_DATA found, between {} and {}".format( + ts, time.time())) return StateHasData() def transition(self, tasks): - if ( len(tasks) == 0 ): # before 1st step, or otherwise empty - return # do nothing + if (len(tasks) == 0): # before 1st step, or otherwise empty + logger.debug("[STT] Starting State: {}".format(self._curState)) + return # do nothing - self._dbConn.execute("show dnodes") # this should show up in the server log, separating steps + # this should show up in the server log, separating steps + self._dbConn.execute("show dnodes") # Generic Checks, first based on the start state if self._curState.canCreateDb(): self._curState.assertIfExistThenSuccess(tasks, TaskCreateDb) - # self.assertAtMostOneSuccess(tasks, CreateDbTask) # not really, in case of multiple creation and drops + # self.assertAtMostOneSuccess(tasks, CreateDbTask) # not really, in + # case of multiple creation and drops if self._curState.canDropDb(): self._curState.assertIfExistThenSuccess(tasks, TaskDropDb) - # self.assertAtMostOneSuccess(tasks, DropDbTask) # not really in case of drop-create-drop + # self.assertAtMostOneSuccess(tasks, DropDbTask) # not really in + # case of drop-create-drop # if self._state.canCreateFixedTable(): # self.assertIfExistThenSuccess(tasks, CreateFixedTableTask) # Not true, DB may be dropped - # self.assertAtMostOneSuccess(tasks, CreateFixedTableTask) # not really, in case of create-drop-create + # self.assertAtMostOneSuccess(tasks, CreateFixedTableTask) # not + # really, in case of create-drop-create # if self._state.canDropFixedTable(): # self.assertIfExistThenSuccess(tasks, DropFixedTableTask) # Not True, the whole DB may be dropped - # self.assertAtMostOneSuccess(tasks, DropFixedTableTask) # not really in case of drop-create-drop + # self.assertAtMostOneSuccess(tasks, DropFixedTableTask) # not + # really in case of drop-create-drop # if self._state.canAddData(): - # self.assertIfExistThenSuccess(tasks, AddFixedDataTask) # not true actually + # self.assertIfExistThenSuccess(tasks, AddFixedDataTask) # not true + # actually # if self._state.canReadData(): # Nothing for sure newState = self._findCurrentState() logger.debug("[STT] New DB state determined: {}".format(newState)) - self._curState.verifyTasksToState(tasks, newState) # can old state move to new state through the tasks? + # can old state move to new state through the tasks? + self._curState.verifyTasksToState(tasks, newState) self._curState = newState def pickTaskType(self): - taskTypes = self.getTaskTypes() # all the task types we can choose from at curent state + # all the task types we can choose from at curent state + taskTypes = self.getTaskTypes() weights = [] for tt in taskTypes: endState = tt.getEndState() - if endState != None : - weights.append(self._stateWeights[endState.getValIndex()]) # TODO: change to a method + if endState is not None: + # TODO: change to a method + weights.append(self._stateWeights[endState.getValIndex()]) else: - weights.append(10) # read data task, default to 10: TODO: change to a constant + # read data task, default to 10: TODO: change to a constant + weights.append(10) i = self._weighted_choice_sub(weights) - # logger.debug(" (weighted random:{}/{}) ".format(i, len(taskTypes))) + # logger.debug(" (weighted random:{}/{}) ".format(i, len(taskTypes))) return taskTypes[i] - def _weighted_choice_sub(self, weights): # ref: https://eli.thegreenplace.net/2010/01/22/weighted-random-generation-in-python/ - rnd = random.random() * sum(weights) # TODO: use our dice to ensure it being determinstic? + # ref: + # https://eli.thegreenplace.net/2010/01/22/weighted-random-generation-in-python/ + def _weighted_choice_sub(self, weights): + # TODO: use our dice to ensure it being determinstic? + rnd = random.random() * sum(weights) for i, w in enumerate(weights): rnd -= w if rnd < 0: return i # Manager of the Database Data/Connection -class DbManager(): - def __init__(self, resetDb = True): + + +class DbManager(): + def __init__(self, resetDb=True): self.tableNumQueue = LinearQueue() - self._lastTick = self.setupLastTick() # datetime.datetime(2019, 1, 1) # initial date time tick - self._lastInt = 0 # next one is initial integer + # datetime.datetime(2019, 1, 1) # initial date time tick + self._lastTick = self.setupLastTick() + self._lastInt = 0 # next one is initial integer self._lock = threading.RLock() - + # self.openDbServerConnection() - self._dbConn = DbConn() + self._dbConn = DbConn.createNative() if ( + gConfig.connector_type == 'native') else DbConn.createRest() try: - self._dbConn.open() # may throw taos.error.ProgrammingError: disconnected + self._dbConn.open() # may throw taos.error.ProgrammingError: disconnected except taos.error.ProgrammingError as err: # print("Error type: {}, msg: {}, value: {}".format(type(err), err.msg, err)) - if ( err.msg == 'client disconnected' ): # cannot open DB connection - print("Cannot establish DB connection, please re-run script without parameter, and follow the instructions.") - sys.exit() + if (err.msg == 'client disconnected'): # cannot open DB connection + print( + "Cannot establish DB connection, please re-run script without parameter, and follow the instructions.") + sys.exit(2) else: - raise - except: + raise + except BaseException: print("[=] Unexpected exception") - raise + raise - if resetDb : - self._dbConn.resetDb() # drop and recreate DB + if resetDb: + self._dbConn.resetDb() # drop and recreate DB + + # Do this after dbConn is in proper shape + self._stateMachine = StateMechine(self._dbConn) - self._stateMachine = StateMechine(self._dbConn) # Do this after dbConn is in proper shape - def getDbConn(self): return self._dbConn - def getStateMachine(self): + def getStateMachine(self) -> StateMechine: return self._stateMachine # def getState(self): @@ -844,15 +1244,18 @@ class DbManager(): def setupLastTick(self): t1 = datetime.datetime(2020, 6, 1) t2 = datetime.datetime.now() - elSec = int(t2.timestamp() - t1.timestamp()) # maybe a very large number, takes 69 years to exceed Python int range - elSec2 = ( elSec % (8 * 12 * 30 * 24 * 60 * 60 / 500 ) ) * 500 # a number representing seconds within 10 years + # maybe a very large number, takes 69 years to exceed Python int range + elSec = int(t2.timestamp() - t1.timestamp()) + elSec2 = (elSec % (8 * 12 * 30 * 24 * 60 * 60 / 500)) * \ + 500 # a number representing seconds within 10 years # print("elSec = {}".format(elSec)) - t3 = datetime.datetime(2012, 1, 1) # default "keep" is 10 years - t4 = datetime.datetime.fromtimestamp( t3.timestamp() + elSec2) # see explanation above + t3 = datetime.datetime(2012, 1, 1) # default "keep" is 10 years + t4 = datetime.datetime.fromtimestamp( + t3.timestamp() + elSec2) # see explanation above logger.info("Setting up TICKS to start from: {}".format(t4)) return t4 - def pickAndAllocateTable(self): # pick any table, and "use" it + def pickAndAllocateTable(self): # pick any table, and "use" it return self.tableNumQueue.pickAndAllocate() def addTable(self): @@ -864,13 +1267,17 @@ class DbManager(): def getFixedSuperTableName(cls): return "fs_table" - def releaseTable(self, i): # return the table back, so others can use it + def releaseTable(self, i): # return the table back, so others can use it self.tableNumQueue.release(i) def getNextTick(self): - with self._lock: # prevent duplicate tick - self._lastTick += datetime.timedelta(0, 1) # add one second to it - return self._lastTick + with self._lock: # prevent duplicate tick + if Dice.throw(10) == 0: # 1 in 10 chance + return self._lastTick + datetime.timedelta(0, -100) + else: # regular + # add one second to it + self._lastTick += datetime.timedelta(0, 1) + return self._lastTick def getNextInt(self): with self._lock: @@ -878,139 +1285,267 @@ class DbManager(): return self._lastInt def getNextBinary(self): - return "Beijing_Shanghai_Los_Angeles_New_York_San_Francisco_Chicago_Beijing_Shanghai_Los_Angeles_New_York_San_Francisco_Chicago_{}".format(self.getNextInt()) + return "Beijing_Shanghai_Los_Angeles_New_York_San_Francisco_Chicago_Beijing_Shanghai_Los_Angeles_New_York_San_Francisco_Chicago_{}".format( + self.getNextInt()) def getNextFloat(self): return 0.9 + self.getNextInt() - + def getTableNameToDelete(self): - tblNum = self.tableNumQueue.pop() # TODO: race condition! - if ( not tblNum ): # maybe false + tblNum = self.tableNumQueue.pop() # TODO: race condition! + if (not tblNum): # maybe false return False - + return "table_{}".format(tblNum) def cleanUp(self): - self._dbConn.close() + self._dbConn.close() + class TaskExecutor(): + class BoundedList: + def __init__(self, size=10): + self._size = size + self._list = [] + + def add(self, n: int): + if not self._list: # empty + self._list.append(n) + return + # now we should insert + nItems = len(self._list) + insPos = 0 + for i in range(nItems): + insPos = i + if n <= self._list[i]: # smaller than this item, time to insert + break # found the insertion point + insPos += 1 # insert to the right + + if insPos == 0: # except for the 1st item, # TODO: elimiate first item as gating item + return # do nothing + + # print("Inserting at postion {}, value: {}".format(insPos, n)) + self._list.insert(insPos, n) # insert + + newLen = len(self._list) + if newLen <= self._size: + return # do nothing + elif newLen == (self._size + 1): + del self._list[0] # remove the first item + else: + raise RuntimeError("Corrupt Bounded List") + + def __str__(self): + return repr(self._list) + + _boundedList = BoundedList() + def __init__(self, curStep): self._curStep = curStep + @classmethod + def getBoundedList(cls): + return cls._boundedList + def getCurStep(self): return self._curStep - def execute(self, task: Task, wt: WorkerThread): # execute a task on a thread + def execute(self, task: Task, wt: WorkerThread): # execute a task on a thread task.execute(wt) + def recordDataMark(self, n: int): + # print("[{}]".format(n), end="", flush=True) + self._boundedList.add(n) + # def logInfo(self, msg): # logger.info(" T[{}.x]: ".format(self._curStep) + msg) # def logDebug(self, msg): # logger.debug(" T[{}.x]: ".format(self._curStep) + msg) + class Task(): taskSn = 100 @classmethod def allocTaskNum(cls): - Task.taskSn += 1 # IMPORTANT: cannot use cls.taskSn, since each sub class will have a copy + Task.taskSn += 1 # IMPORTANT: cannot use cls.taskSn, since each sub class will have a copy # logger.debug("Allocating taskSN: {}".format(Task.taskSn)) return Task.taskSn - def __init__(self, dbManager: DbManager, execStats: ExecutionStats): + def __init__(self, dbManager: DbManager, execStats: ExecutionStats): self._dbManager = dbManager - self._workerThread = None + self._workerThread = None self._err = None + self._aborted = False self._curStep = None - self._numRows = None # Number of rows affected + self._numRows = None # Number of rows affected - # Assign an incremental task serial number + # Assign an incremental task serial number self._taskNum = self.allocTaskNum() # logger.debug("Creating new task {}...".format(self._taskNum)) self._execStats = execStats + self._lastSql = "" # last SQL executed/attempted def isSuccess(self): - return self._err == None + return self._err is None - def clone(self): # TODO: why do we need this again? + def isAborted(self): + return self._aborted + + def clone(self): # TODO: why do we need this again? newTask = self.__class__(self._dbManager, self._execStats) return newTask def logDebug(self, msg): - self._workerThread.logDebug("Step[{}.{}] {}".format(self._curStep, self._taskNum, msg)) + self._workerThread.logDebug( + "Step[{}.{}] {}".format( + self._curStep, self._taskNum, msg)) def logInfo(self, msg): - self._workerThread.logInfo("Step[{}.{}] {}".format(self._curStep, self._taskNum, msg)) + self._workerThread.logInfo( + "Step[{}.{}] {}".format( + self._curStep, self._taskNum, msg)) def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): - raise RuntimeError("To be implemeted by child classes, class name: {}".format(self.__class__.__name__)) + raise RuntimeError( + "To be implemeted by child classes, class name: {}".format( + self.__class__.__name__)) def execute(self, wt: WorkerThread): wt.verifyThreadSelf() - self._workerThread = wt # type: ignore + self._workerThread = wt # type: ignore te = wt.getTaskExecutor() self._curStep = te.getCurStep() - self.logDebug("[-] executing task {}...".format(self.__class__.__name__)) + self.logDebug( + "[-] executing task {}...".format(self.__class__.__name__)) self._err = None - self._execStats.beginTaskType(self.__class__.__name__) # mark beginning + self._execStats.beginTaskType( + self.__class__.__name__) # mark beginning try: - self._executeInternal(te, wt) # TODO: no return value? + self._executeInternal(te, wt) # TODO: no return value? except taos.error.ProgrammingError as err: - self.logDebug("[=] Taos library exception: errno={:X}, msg: {}".format(err.errno, err)) - self._err = err - except: - self.logDebug("[=] Unexpected exception") + errno2 = err.errno if ( + err.errno > 0) else 0x80000000 + err.errno # correct error scheme + if (gConfig.continue_on_exception): # user choose to continue + self.logDebug( + "[=] Continue after TAOS exception: errno=0x{:X}, msg: {}, SQL: {}".format( + errno2, err, self._lastSql)) + self._err = err + elif (errno2 in [ + 0x05, # TSDB_CODE_RPC_NOT_READY + 0x200, 0x360, 0x362, 0x36A, 0x36B, 0x36D, + 0x381, 0x380, 0x383, + 0x386, # DB is being dropped?! + 0x503, + 0x510, # vnode not in ready state + 0x600, + 1000 # REST catch-all error + ]): # allowed errors + self.logDebug( + "[=] Acceptable Taos library exception: errno=0x{:X}, msg: {}, SQL: {}".format( + errno2, err, self._lastSql)) + print("_", end="", flush=True) + self._err = err + else: + errMsg = "[=] Unexpected Taos library exception: errno=0x{:X}, msg: {}, SQL: {}".format( + errno2, err, self._lastSql) + self.logDebug(errMsg) + if gConfig.debug: + # raise # so that we see full stack + traceback.print_exc() + print( + "\n\n----------------------------\nProgram ABORTED Due to Unexpected TAOS Error: \n\n{}\n".format(errMsg) + + "----------------------------\n") + # sys.exit(-1) + self._err = err + self._aborted = True + except Exception as e: + self.logInfo("Non-TAOS exception encountered") + self._err = e + self._aborted = True + traceback.print_exc() + except BaseException as e: + self.logInfo("Python base exception encountered") + self._err = e + self._aborted = True + traceback.print_exc() + except BaseException: + self.logDebug( + "[=] Unexpected exception, SQL: {}".format( + self._lastSql)) raise self._execStats.endTaskType(self.__class__.__name__, self.isSuccess()) - - self.logDebug("[X] task execution completed, {}, status: {}".format(self.__class__.__name__, "Success" if self.isSuccess() else "Failure")) - self._execStats.incExecCount(self.__class__.__name__, self.isSuccess()) # TODO: merge with above. + + self.logDebug("[X] task execution completed, {}, status: {}".format( + self.__class__.__name__, "Success" if self.isSuccess() else "Failure")) + # TODO: merge with above. + self._execStats.incExecCount(self.__class__.__name__, self.isSuccess()) def execSql(self, sql): + self._lastSql = sql return self._dbManager.execute(sql) - + def execWtSql(self, wt: WorkerThread, sql): # execute an SQL on the worker thread + self._lastSql = sql + return wt.execSql(sql) + + def queryWtSql(self, wt: WorkerThread, sql): # execute an SQL on the worker thread + self._lastSql = sql + return wt.querySql(sql) + + def getQueryResult(self, wt: WorkerThread): # execute an SQL on the worker thread + return wt.getQueryResult() + + class ExecutionStats: def __init__(self): - self._execTimes: Dict[str, [int, int]] = {} # total/success times for a task + # total/success times for a task + self._execTimes: Dict[str, [int, int]] = {} self._tasksInProgress = 0 self._lock = threading.Lock() self._firstTaskStartTime = None self._execStartTime = None - self._elapsedTime = 0.0 # total elapsed time - self._accRunTime = 0.0 # accumulated run time + self._elapsedTime = 0.0 # total elapsed time + self._accRunTime = 0.0 # accumulated run time self._failed = False self._failureReason = None + def __str__(self): + return "[ExecStats: _failed={}, _failureReason={}".format( + self._failed, self._failureReason) + + def isFailed(self): + return self._failed + def startExec(self): self._execStartTime = time.time() def endExec(self): self._elapsedTime = time.time() - self._execStartTime - def incExecCount(self, klassName, isSuccess): # TODO: add a lock here + def incExecCount(self, klassName, isSuccess): # TODO: add a lock here if klassName not in self._execTimes: self._execTimes[klassName] = [0, 0] - t = self._execTimes[klassName] # tuple for the data - t[0] += 1 # index 0 has the "total" execution times + t = self._execTimes[klassName] # tuple for the data + t[0] += 1 # index 0 has the "total" execution times if isSuccess: - t[1] += 1 # index 1 has the "success" execution times + t[1] += 1 # index 1 has the "success" execution times def beginTaskType(self, klassName): with self._lock: - if self._tasksInProgress == 0 : # starting a new round - self._firstTaskStartTime = time.time() # I am now the first task + if self._tasksInProgress == 0: # starting a new round + self._firstTaskStartTime = time.time() # I am now the first task self._tasksInProgress += 1 def endTaskType(self, klassName, isSuccess): with self._lock: self._tasksInProgress -= 1 - if self._tasksInProgress == 0 : # all tasks have stopped + if self._tasksInProgress == 0: # all tasks have stopped self._accRunTime += (time.time() - self._firstTaskStartTime) self._firstTaskStartTime = None @@ -1018,33 +1553,52 @@ class ExecutionStats: self._failed = True self._failureReason = reason - def logStats(self): - logger.info("----------------------------------------------------------------------") - logger.info("| Crash_Gen test {}, with the following stats:". - format("FAILED (reason: {})".format(self._failureReason) if self._failed else "SUCCEEDED")) + def printStats(self): + logger.info( + "----------------------------------------------------------------------") + logger.info( + "| Crash_Gen test {}, with the following stats:". format( + "FAILED (reason: {})".format( + self._failureReason) if self._failed else "SUCCEEDED")) logger.info("| Task Execution Times (success/total):") execTimesAny = 0 - for k, n in self._execTimes.items(): + for k, n in self._execTimes.items(): execTimesAny += n[0] - logger.info("| {0:<24}: {1}/{2}".format(k,n[1],n[0])) - - logger.info("| Total Tasks Executed (success or not): {} ".format(execTimesAny)) - logger.info("| Total Tasks In Progress at End: {}".format(self._tasksInProgress)) - logger.info("| Total Task Busy Time (elapsed time when any task is in progress): {:.3f} seconds".format(self._accRunTime)) - logger.info("| Average Per-Task Execution Time: {:.3f} seconds".format(self._accRunTime/execTimesAny)) - logger.info("| Total Elapsed Time (from wall clock): {:.3f} seconds".format(self._elapsedTime)) - logger.info("----------------------------------------------------------------------") - + logger.info("| {0:<24}: {1}/{2}".format(k, n[1], n[0])) + + logger.info( + "| Total Tasks Executed (success or not): {} ".format(execTimesAny)) + logger.info( + "| Total Tasks In Progress at End: {}".format( + self._tasksInProgress)) + logger.info( + "| Total Task Busy Time (elapsed time when any task is in progress): {:.3f} seconds".format( + self._accRunTime)) + logger.info( + "| Average Per-Task Execution Time: {:.3f} seconds".format(self._accRunTime / execTimesAny)) + logger.info( + "| Total Elapsed Time (from wall clock): {:.3f} seconds".format( + self._elapsedTime)) + logger.info( + "| Top numbers written: {}".format( + TaskExecutor.getBoundedList())) + logger.info( + "----------------------------------------------------------------------") class StateTransitionTask(Task): + LARGE_NUMBER_OF_TABLES = 35 + SMALL_NUMBER_OF_TABLES = 3 + LARGE_NUMBER_OF_RECORDS = 50 + SMALL_NUMBER_OF_RECORDS = 3 + @classmethod - def getInfo(cls): # each sub class should supply their own information + def getInfo(cls): # each sub class should supply their own information raise RuntimeError("Overriding method expected") - _endState = None + _endState = None @classmethod - def getEndState(cls): # TODO: optimize by calling it fewer times + def getEndState(cls): # TODO: optimize by calling it fewer times raise RuntimeError("Overriding method expected") # @classmethod @@ -1060,20 +1614,26 @@ class StateTransitionTask(Task): # return state.getValue() in cls.getBeginStates() raise RuntimeError("must be overriden") + @classmethod + def getRegTableName(cls, i): + return "db.reg_table_{}".format(i) + def execute(self, wt: WorkerThread): super().execute(wt) - + + class TaskCreateDb(StateTransitionTask): @classmethod def getEndState(cls): - return StateDbOnly() + return StateDbOnly() @classmethod def canBeginFrom(cls, state: AnyState): return state.canCreateDb() def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): - wt.execSql("create database db") + self.execWtSql(wt, "create database db") + class TaskDropDb(StateTransitionTask): @classmethod @@ -1085,9 +1645,10 @@ class TaskDropDb(StateTransitionTask): return state.canDropDb() def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): - wt.execSql("drop database db") + self.execWtSql(wt, "drop database db") logger.debug("[OPS] database dropped at {}".format(time.time())) + class TaskCreateSuperTable(StateTransitionTask): @classmethod def getEndState(cls): @@ -1098,87 +1659,135 @@ class TaskCreateSuperTable(StateTransitionTask): return state.canCreateFixedSuperTable() def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): - tblName = self._dbManager.getFixedSuperTableName() - wt.execSql("create table db.{} (ts timestamp, speed int) tags (b binary(200), f float) ".format(tblName)) - # No need to create the regular tables, INSERT will do that automatically + if not wt.dbInUse(): # no DB yet, to the best of our knowledge + logger.debug("Skipping task, no DB yet") + return + + tblName = self._dbManager.getFixedSuperTableName() + # wt.execSql("use db") # should always be in place + self.execWtSql( + wt, + "create table db.{} (ts timestamp, speed int) tags (b binary(200), f float) ".format(tblName)) + # No need to create the regular tables, INSERT will do that + # automatically class TaskReadData(StateTransitionTask): @classmethod def getEndState(cls): - return None # meaning doesn't affect state + return None # meaning doesn't affect state @classmethod def canBeginFrom(cls, state: AnyState): return state.canReadData() def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): - sTbName = self._dbManager.getFixedSuperTableName() - dbc = wt.getDbConn() - dbc.query("select TBNAME from db.{}".format(sTbName)) # TODO: analyze result set later - if random.randrange(5) == 0 : # 1 in 5 chance, simulate a broken connection. TODO: break connection in all situations - dbc.close() - dbc.open() + sTbName = self._dbManager.getFixedSuperTableName() + self.queryWtSql(wt, "select TBNAME from db.{}".format( + sTbName)) # TODO: analyze result set later + + if random.randrange( + 5) == 0: # 1 in 5 chance, simulate a broken connection. TODO: break connection in all situations + wt.getDbConn().close() + wt.getDbConn().open() else: - rTables = dbc.getQueryResult() + # wt.getDbConn().getQueryResult() + rTables = self.getQueryResult(wt) # print("rTables[0] = {}, type = {}".format(rTables[0], type(rTables[0]))) - for rTbName in rTables : # regular tables - dbc.query("select * from db.{}".format(rTbName[0])) # TODO: check success failure + for rTbName in rTables: # regular tables + self.execWtSql(wt, "select * from db.{}".format(rTbName[0])) # tdSql.query(" cars where tbname in ('carzero', 'carone')") + class TaskDropSuperTable(StateTransitionTask): @classmethod def getEndState(cls): - return StateDbOnly() + return StateDbOnly() @classmethod def canBeginFrom(cls, state: AnyState): return state.canDropFixedSuperTable() def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): - tblName = self._dbManager.getFixedSuperTableName() - wt.execSql("drop table db.{}".format(tblName)) + # 1/2 chance, we'll drop the regular tables one by one, in a randomized + # sequence + if Dice.throw(2) == 0: + tblSeq = list(range( + 2 + (self.LARGE_NUMBER_OF_TABLES if gConfig.larger_data else self.SMALL_NUMBER_OF_TABLES))) + random.shuffle(tblSeq) + tickOutput = False # if we have spitted out a "d" character for "drop regular table" + isSuccess = True + for i in tblSeq: + regTableName = self.getRegTableName( + i) # "db.reg_table_{}".format(i) + try: + self.execWtSql(wt, "drop table {}".format( + regTableName)) # nRows always 0, like MySQL + except taos.error.ProgrammingError as err: + # correcting for strange error number scheme + errno2 = err.errno if ( + err.errno > 0) else 0x80000000 + err.errno + if (errno2 in [0x362]): # mnode invalid table name + isSuccess = False + logger.debug( + "[DB] Acceptable error when dropping a table") + continue # try to delete next regular table + + if (not tickOutput): + tickOutput = True # Print only one time + if isSuccess: + print("d", end="", flush=True) + else: + print("f", end="", flush=True) + + # Drop the super table itself + tblName = self._dbManager.getFixedSuperTableName() + self.execWtSql(wt, "drop table db.{}".format(tblName)) + class TaskAlterTags(StateTransitionTask): @classmethod def getEndState(cls): - return None # meaning doesn't affect state + return None # meaning doesn't affect state @classmethod def canBeginFrom(cls, state: AnyState): - return state.canDropFixedSuperTable() # if we can drop it, we can alter tags + return state.canDropFixedSuperTable() # if we can drop it, we can alter tags def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): - tblName = self._dbManager.getFixedSuperTableName() + tblName = self._dbManager.getFixedSuperTableName() dice = Dice.throw(4) - if dice == 0 : - wt.execSql("alter table db.{} add tag extraTag int".format(tblName)) - elif dice == 1 : - wt.execSql("alter table db.{} drop tag extraTag".format(tblName)) - elif dice == 2 : - wt.execSql("alter table db.{} drop tag newTag".format(tblName)) - else: # dice == 3 - wt.execSql("alter table db.{} change tag extraTag newTag".format(tblName)) + if dice == 0: + sql = "alter table db.{} add tag extraTag int".format(tblName) + elif dice == 1: + sql = "alter table db.{} drop tag extraTag".format(tblName) + elif dice == 2: + sql = "alter table db.{} drop tag newTag".format(tblName) + else: # dice == 3 + sql = "alter table db.{} change tag extraTag newTag".format( + tblName) + + self.execWtSql(wt, sql) + class TaskAddData(StateTransitionTask): - activeTable : Set[int] = set() # Track which table is being actively worked on - LARGE_NUMBER_OF_TABLES = 35 - SMALL_NUMBER_OF_TABLES = 3 - LARGE_NUMBER_OF_RECORDS = 50 - SMALL_NUMBER_OF_RECORDS = 3 + # Track which table is being actively worked on + activeTable: Set[int] = set() - # We use these two files to record operations to DB, useful for power-off tests + # We use these two files to record operations to DB, useful for power-off + # tests fAddLogReady = None fAddLogDone = None @classmethod def prepToRecordOps(cls): - if gConfig.record_ops : - if ( cls.fAddLogReady == None ): - logger.info("Recording in a file operations to be performed...") + if gConfig.record_ops: + if (cls.fAddLogReady is None): + logger.info( + "Recording in a file operations to be performed...") cls.fAddLogReady = open("add_log_ready.txt", "w") - if ( cls.fAddLogDone == None ): + if (cls.fAddLogDone is None): logger.info("Recording in a file operations completed...") cls.fAddLogDone = open("add_log_done.txt", "w") @@ -1189,105 +1798,92 @@ class TaskAddData(StateTransitionTask): @classmethod def canBeginFrom(cls, state: AnyState): return state.canAddData() - + def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): ds = self._dbManager - wt.execSql("use db") # TODO: seems to be an INSERT bug to require this - tblSeq = list(range(self.LARGE_NUMBER_OF_TABLES if gConfig.larger_data else self.SMALL_NUMBER_OF_TABLES)) - random.shuffle(tblSeq) - for i in tblSeq: - if ( i in self.activeTable ): # wow already active - # logger.info("Concurrent data insertion into table: {}".format(i)) - # print("ct({})".format(i), end="", flush=True) # Concurrent insertion into table + # wt.execSql("use db") # TODO: seems to be an INSERT bug to require + # this + tblSeq = list( + range( + self.LARGE_NUMBER_OF_TABLES if gConfig.larger_data else self.SMALL_NUMBER_OF_TABLES)) + random.shuffle(tblSeq) + for i in tblSeq: + if (i in self.activeTable): # wow already active + # logger.info("Concurrent data insertion into table: {}".format(i)) + # print("ct({})".format(i), end="", flush=True) # Concurrent + # insertion into table print("x", end="", flush=True) else: - self.activeTable.add(i) # marking it active - # No need to shuffle data sequence, unless later we decide to do non-increment insertion - for j in range(self.LARGE_NUMBER_OF_RECORDS if gConfig.larger_data else self.SMALL_NUMBER_OF_RECORDS) : # number of records per table + self.activeTable.add(i) # marking it active + # No need to shuffle data sequence, unless later we decide to do + # non-increment insertion + regTableName = self.getRegTableName( + i) # "db.reg_table_{}".format(i) + for j in range( + self.LARGE_NUMBER_OF_RECORDS if gConfig.larger_data else self.SMALL_NUMBER_OF_RECORDS): # number of records per table nextInt = ds.getNextInt() - regTableName = "db.reg_table_{}".format(i) if gConfig.record_ops: self.prepToRecordOps() - self.fAddLogReady.write("Ready to write {} to {}\n".format(nextInt, regTableName)) + self.fAddLogReady.write( + "Ready to write {} to {}\n".format( + nextInt, regTableName)) self.fAddLogReady.flush() os.fsync(self.fAddLogReady) sql = "insert into {} using {} tags ('{}', {}) values ('{}', {});".format( - regTableName, - ds.getFixedSuperTableName(), + regTableName, + ds.getFixedSuperTableName(), ds.getNextBinary(), ds.getNextFloat(), ds.getNextTick(), nextInt) - wt.execSql(sql) + self.execWtSql(wt, sql) + # Successfully wrote the data into the DB, let's record it + # somehow + te.recordDataMark(nextInt) if gConfig.record_ops: - self.fAddLogDone.write("Wrote {} to {}\n".format(nextInt, regTableName)) + self.fAddLogDone.write( + "Wrote {} to {}\n".format( + nextInt, regTableName)) self.fAddLogDone.flush() os.fsync(self.fAddLogDone) - self.activeTable.discard(i) # not raising an error, unlike remove + self.activeTable.discard(i) # not raising an error, unlike remove # Deterministic random number generator class Dice(): - seeded = False # static, uninitialized + seeded = False # static, uninitialized @classmethod - def seed(cls, s): # static + def seed(cls, s): # static if (cls.seeded): - raise RuntimeError("Cannot seed the random generator more than once") + raise RuntimeError( + "Cannot seed the random generator more than once") cls.verifyRNG() random.seed(s) cls.seeded = True # TODO: protect against multi-threading @classmethod - def verifyRNG(cls): # Verify that the RNG is determinstic + def verifyRNG(cls): # Verify that the RNG is determinstic random.seed(0) x1 = random.randrange(0, 1000) x2 = random.randrange(0, 1000) x3 = random.randrange(0, 1000) - if ( x1 != 864 or x2!=394 or x3!=776 ): + if (x1 != 864 or x2 != 394 or x3 != 776): raise RuntimeError("System RNG is not deterministic") @classmethod - def throw(cls, stop): # get 0 to stop-1 + def throw(cls, stop): # get 0 to stop-1 return cls.throwRange(0, stop) @classmethod - def throwRange(cls, start, stop): # up to stop-1 - if ( not cls.seeded ): + def throwRange(cls, start, stop): # up to stop-1 + if (not cls.seeded): raise RuntimeError("Cannot throw dice before seeding it") return random.randrange(start, stop) -# Anyone needing to carry out work should simply come here -# class WorkDispatcher(): -# def __init__(self, dbState): -# # self.totalNumMethods = 2 -# self.tasks = [ -# # CreateTableTask(dbState), # Obsolete -# # DropTableTask(dbState), -# # AddDataTask(dbState), -# ] - -# def throwDice(self): -# max = len(self.tasks) - 1 -# dRes = random.randint(0, max) -# # logger.debug("Threw the dice in range [{},{}], and got: {}".format(0,max,dRes)) -# return dRes - -# def pickTask(self): -# dice = self.throwDice() -# return self.tasks[dice] - -# def doWork(self, workerThread): -# task = self.pickTask() -# task.execute(workerThread) - class LoggingFilter(logging.Filter): def filter(self, record: logging.LogRecord): - if ( record.levelno >= logging.INFO ) : - return True # info or above always log - - msg = record.msg - # print("type = {}, value={}".format(type(msg), msg)) - # sys.exit() + if (record.levelno >= logging.INFO): + return True # info or above always log # Commenting out below to adjust... @@ -1295,10 +1891,499 @@ class LoggingFilter(logging.Filter): # return False return True - + +class MyLoggingAdapter(logging.LoggerAdapter): + def process(self, msg, kwargs): + return "[{}]{}".format(threading.get_ident() % 10000, msg), kwargs + # return '[%s] %s' % (self.extra['connid'], msg), kwargs + + +class SvcManager: + def __init__(self): + print("Starting TDengine Service Manager") + signal.signal(signal.SIGTERM, self.sigIntHandler) + signal.signal(signal.SIGINT, self.sigIntHandler) + signal.signal(signal.SIGUSR1, self.sigUsrHandler) # different handler! + + self.inSigHandler = False + # self._status = MainExec.STATUS_RUNNING # set inside + # _startTaosService() + self.svcMgrThread = None + + def _doMenu(self): + choice = "" + while True: + print("\nInterrupting Service Program, Choose an Action: ") + print("1: Resume") + print("2: Terminate") + print("3: Restart") + # Remember to update the if range below + # print("Enter Choice: ", end="", flush=True) + while choice == "": + choice = input("Enter Choice: ") + if choice != "": + break # done with reading repeated input + if choice in ["1", "2", "3"]: + break # we are done with whole method + print("Invalid choice, please try again.") + choice = "" # reset + return choice + + def sigUsrHandler(self, signalNumber, frame): + print("Interrupting main thread execution upon SIGUSR1") + if self.inSigHandler: # already + print("Ignoring repeated SIG...") + return # do nothing if it's already not running + self.inSigHandler = True + + choice = self._doMenu() + if choice == "1": + # TODO: can the sub-process be blocked due to us not reading from + # queue? + self.sigHandlerResume() + elif choice == "2": + self.stopTaosService() + elif choice == "3": + self.stopTaosService() + self.startTaosService() + else: + raise RuntimeError("Invalid menu choice: {}".format(choice)) + + self.inSigHandler = False + + def sigIntHandler(self, signalNumber, frame): + print("Sig INT Handler starting...") + if self.inSigHandler: + print("Ignoring repeated SIG_INT...") + return + self.inSigHandler = True + + self.stopTaosService() + print("INT signal handler returning...") + self.inSigHandler = False + + def sigHandlerResume(self): + print("Resuming TDengine service manager thread (main thread)...\n\n") + + def _checkServiceManagerThread(self): + if self.svcMgrThread: # valid svc mgr thread + if self.svcMgrThread.isStopped(): # done? + self.svcMgrThread.procIpcBatch() # one last time. TODO: appropriate? + self.svcMgrThread = None # no more + + def _procIpcAll(self): + while self.svcMgrThread: # for as long as the svc mgr thread is still here + self.svcMgrThread.procIpcBatch() # regular processing, + time.sleep(0.5) # pause, before next round + self._checkServiceManagerThread() + print( + "Service Manager Thread (with subprocess) has ended, main thread now exiting...") + + def startTaosService(self): + if self.svcMgrThread: + raise RuntimeError( + "Cannot start TAOS service when one may already be running") + self.svcMgrThread = ServiceManagerThread() # create the object + self.svcMgrThread.start() + print("TAOS service started, printing out output...") + self.svcMgrThread.procIpcBatch( + trimToTarget=10, + forceOutput=True) # for printing 10 lines + print("TAOS service started") + + def stopTaosService(self, outputLines=20): + print("Terminating Service Manager Thread (SMT) execution...") + if not self.svcMgrThread: + raise RuntimeError("Unexpected empty svc mgr thread") + self.svcMgrThread.stop() + if self.svcMgrThread.isStopped(): + self.svcMgrThread.procIpcBatch(outputLines) # one last time + self.svcMgrThread = None + print("----- End of TDengine Service Output -----\n") + print("SMT execution terminated") + else: + print("WARNING: SMT did not terminate as expected") + + def run(self): + self.startTaosService() + self._procIpcAll() # pump/process all the messages + if self.svcMgrThread: # if sig handler hasn't destroyed it by now + self.stopTaosService() # should have started already + + +class ServiceManagerThread: + MAX_QUEUE_SIZE = 10000 + + def __init__(self): + self._tdeSubProcess = None + self._thread = None + self._status = None + + def getStatus(self): + return self._status + + def isRunning(self): + # return self._thread and self._thread.is_alive() + return self._status == MainExec.STATUS_RUNNING + + def isStopping(self): + return self._status == MainExec.STATUS_STOPPING + + def isStopped(self): + return self._status == MainExec.STATUS_STOPPED + + # Start the thread (with sub process), and wait for the sub service + # to become fully operational + def start(self): + if self._thread: + raise RuntimeError("Unexpected _thread") + if self._tdeSubProcess: + raise RuntimeError("TDengine sub process already created/running") + + self._status = MainExec.STATUS_STARTING + + self._tdeSubProcess = TdeSubProcess() + self._tdeSubProcess.start() + + self._ipcQueue = Queue() + self._thread = threading.Thread( + target=self.svcOutputReader, + args=(self._tdeSubProcess.getStdOut(), self._ipcQueue)) + self._thread.daemon = True # thread dies with the program + self._thread.start() + + # wait for service to start + for i in range(0, 10): + time.sleep(1.0) + # self.procIpcBatch() # don't pump message during start up + print("_zz_", end="", flush=True) + if self._status == MainExec.STATUS_RUNNING: + logger.info("[] TDengine service READY to process requests") + return # now we've started + # TODO: handle this better? + raise RuntimeError("TDengine service did not start successfully") + + def stop(self): + # can be called from both main thread or signal handler + print("Terminating TDengine service running as the sub process...") + if self.isStopped(): + print("Service already stopped") + return + if self.isStopping(): + print("Service is already being stopped") + return + # Linux will send Control-C generated SIGINT to the TDengine process + # already, ref: + # https://unix.stackexchange.com/questions/176235/fork-and-how-signals-are-delivered-to-processes + if not self._tdeSubProcess: + raise RuntimeError("sub process object missing") + + self._status = MainExec.STATUS_STOPPING + self._tdeSubProcess.stop() + + if self._tdeSubProcess.isRunning(): # still running + print( + "FAILED to stop sub process, it is still running... pid = {}".format( + self.subProcess.pid)) + else: + self._tdeSubProcess = None # not running any more + self.join() # stop the thread, change the status, etc. + + def join(self): + # TODO: sanity check + if not self.isStopping(): + raise RuntimeError( + "Unexpected status when ending svc mgr thread: {}".format( + self._status)) + + if self._thread: + self._thread.join() + self._thread = None + self._status = MainExec.STATUS_STOPPED + else: + print("Joining empty thread, doing nothing") + + def _trimQueue(self, targetSize): + if targetSize <= 0: + return # do nothing + q = self._ipcQueue + if (q.qsize() <= targetSize): # no need to trim + return + + logger.debug("Triming IPC queue to target size: {}".format(targetSize)) + itemsToTrim = q.qsize() - targetSize + for i in range(0, itemsToTrim): + try: + q.get_nowait() + except Empty: + break # break out of for loop, no more trimming + + TD_READY_MSG = "TDengine is initialized successfully" + + def procIpcBatch(self, trimToTarget=0, forceOutput=False): + self._trimQueue(trimToTarget) # trim if necessary + # Process all the output generated by the underlying sub process, + # managed by IO thread + print("<", end="", flush=True) + while True: + try: + line = self._ipcQueue.get_nowait() # getting output at fast speed + self._printProgress("_o") + except Empty: + # time.sleep(2.3) # wait only if there's no output + # no more output + print(".>", end="", flush=True) + return # we are done with THIS BATCH + else: # got line, printing out + if forceOutput: + logger.info(line) + else: + logger.debug(line) + print(">", end="", flush=True) + + _ProgressBars = ["--", "//", "||", "\\\\"] + + def _printProgress(self, msg): # TODO: assuming 2 chars + print(msg, end="", flush=True) + pBar = self._ProgressBars[Dice.throw(4)] + print(pBar, end="", flush=True) + print('\b\b\b\b', end="", flush=True) + + def svcOutputReader(self, out: IO, queue): + # Important Reference: https://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python + # print("This is the svcOutput Reader...") + # for line in out : + for line in iter(out.readline, b''): + # print("Finished reading a line: {}".format(line)) + # print("Adding item to queue...") + line = line.decode("utf-8").rstrip() + # This might block, and then causing "out" buffer to block + queue.put(line) + self._printProgress("_i") + + if self._status == MainExec.STATUS_STARTING: # we are starting, let's see if we have started + if line.find(self.TD_READY_MSG) != -1: # found + self._status = MainExec.STATUS_RUNNING + + # Trim the queue if necessary: TODO: try this 1 out of 10 times + self._trimQueue(self.MAX_QUEUE_SIZE * 9 // 10) # trim to 90% size + + if self.isStopping(): # TODO: use thread status instead + # WAITING for stopping sub process to finish its outptu + print("_w", end="", flush=True) + + # queue.put(line) + # meaning sub process must have died + print("\nNo more output from IO thread managing TDengine service") + out.close() + + +class TdeSubProcess: + def __init__(self): + self.subProcess = None + + def getStdOut(self): + return self.subProcess.stdout + + def isRunning(self): + return self.subProcess is not None + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("communit")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def start(self): + ON_POSIX = 'posix' in sys.builtin_module_names + + taosdPath = self.getBuildPath() + "/build/bin/taosd" + cfgPath = self.getBuildPath() + "/test/cfg" + + svcCmd = [taosdPath, '-c', cfgPath] + # svcCmd = ['vmstat', '1'] + if self.subProcess: # already there + raise RuntimeError("Corrupt process state") + + self.subProcess = subprocess.Popen( + svcCmd, + stdout=subprocess.PIPE, + # bufsize=1, # not supported in binary mode + close_fds=ON_POSIX) # had text=True, which interferred with reading EOF + + def stop(self): + if not self.subProcess: + print("Sub process already stopped") + return + + retCode = self.subProcess.poll() + if retCode: # valid return code, process ended + self.subProcess = None + else: # process still alive, let's interrupt it + print( + "Sub process is running, sending SIG_INT and waiting for it to terminate...") + # sub process should end, then IPC queue should end, causing IO + # thread to end + self.subProcess.send_signal(signal.SIGINT) + try: + self.subProcess.wait(10) + except subprocess.TimeoutExpired as err: + print("Time out waiting for TDengine service process to exit") + else: + print("TDengine service process terminated successfully from SIG_INT") + self.subProcess = None + + +class ClientManager: + def __init__(self): + print("Starting service manager") + signal.signal(signal.SIGTERM, self.sigIntHandler) + signal.signal(signal.SIGINT, self.sigIntHandler) + + self._status = MainExec.STATUS_RUNNING + self.tc = None + + def sigIntHandler(self, signalNumber, frame): + if self._status != MainExec.STATUS_RUNNING: + print("Ignoring repeated SIGINT...") + return # do nothing if it's already not running + self._status = MainExec.STATUS_STOPPING # immediately set our status + + print("Terminating program...") + self.tc.requestToStop() + + def _printLastNumbers(self): # to verify data durability + dbManager = DbManager(resetDb=False) + dbc = dbManager.getDbConn() + if dbc.query("show databases") == 0: # no databae + return + if dbc.query("show tables") == 0: # no tables + return + + dbc.execute("use db") + sTbName = dbManager.getFixedSuperTableName() + + # get all regular tables + # TODO: analyze result set later + dbc.query("select TBNAME from db.{}".format(sTbName)) + rTables = dbc.getQueryResult() + + bList = TaskExecutor.BoundedList() + for rTbName in rTables: # regular tables + dbc.query("select speed from db.{}".format(rTbName[0])) + numbers = dbc.getQueryResult() + for row in numbers: + # print("<{}>".format(n), end="", flush=True) + bList.add(row[0]) + + print("Top numbers in DB right now: {}".format(bList)) + print("TDengine client execution is about to start in 2 seconds...") + time.sleep(2.0) + dbManager = None # release? + + def prepare(self): + self._printLastNumbers() + + def run(self): + if gConfig.auto_start_service: + svcMgr = SvcManager() + svcMgr.startTaosService() + + self._printLastNumbers() + + dbManager = DbManager() # Regular function + thPool = ThreadPool(gConfig.num_threads, gConfig.max_steps) + self.tc = ThreadCoordinator(thPool, dbManager) + + self.tc.run() + # print("exec stats: {}".format(self.tc.getExecStats())) + # print("TC failed = {}".format(self.tc.isFailed())) + if gConfig.auto_start_service: + svcMgr.stopTaosService() + # Print exec status, etc., AFTER showing messages from the server + self.conclude() + # print("TC failed (2) = {}".format(self.tc.isFailed())) + # Linux return code: ref https://shapeshed.com/unix-exit-codes/ + return 1 if self.tc.isFailed() else 0 + + def conclude(self): + self.tc.printStats() + self.tc.getDbManager().cleanUp() + + +class MainExec: + STATUS_STARTING = 1 + STATUS_RUNNING = 2 + STATUS_STOPPING = 3 + STATUS_STOPPED = 4 + + @classmethod + def runClient(cls): + clientManager = ClientManager() + return clientManager.run() + + @classmethod + def runService(cls): + svcManager = SvcManager() + svcManager.run() + + @classmethod + def runTemp(cls): # for debugging purposes + # # Hack to exercise reading from disk, imcreasing coverage. TODO: fix + # dbc = dbState.getDbConn() + # sTbName = dbState.getFixedSuperTableName() + # dbc.execute("create database if not exists db") + # if not dbState.getState().equals(StateEmpty()): + # dbc.execute("use db") + + # rTables = None + # try: # the super table may not exist + # sql = "select TBNAME from db.{}".format(sTbName) + # logger.info("Finding out tables in super table: {}".format(sql)) + # dbc.query(sql) # TODO: analyze result set later + # logger.info("Fetching result") + # rTables = dbc.getQueryResult() + # logger.info("Result: {}".format(rTables)) + # except taos.error.ProgrammingError as err: + # logger.info("Initial Super table OPS error: {}".format(err)) + + # # sys.exit() + # if ( not rTables == None): + # # print("rTables[0] = {}, type = {}".format(rTables[0], type(rTables[0]))) + # try: + # for rTbName in rTables : # regular tables + # ds = dbState + # logger.info("Inserting into table: {}".format(rTbName[0])) + # sql = "insert into db.{} values ('{}', {});".format( + # rTbName[0], + # ds.getNextTick(), ds.getNextInt()) + # dbc.execute(sql) + # for rTbName in rTables : # regular tables + # dbc.query("select * from db.{}".format(rTbName[0])) # TODO: check success failure + # logger.info("Initial READING operation is successful") + # except taos.error.ProgrammingError as err: + # logger.info("Initial WRITE/READ error: {}".format(err)) + + # Sandbox testing code + # dbc = dbState.getDbConn() + # while True: + # rows = dbc.query("show databases") + # print("Rows: {}, time={}".format(rows, time.time())) + return + def main(): - # Super cool Python argument library: https://docs.python.org/3/library/argparse.html + # Super cool Python argument library: + # https://docs.python.org/3/library/argparse.html parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent('''\ @@ -1308,93 +2393,92 @@ def main(): 2. You run the server there before this script: ./build/bin/taosd -c test/cfg ''')) - parser.add_argument('-d', '--debug', action='store_true', - help='Turn on DEBUG mode for more logging (default: false)') - parser.add_argument('-l', '--larger-data', action='store_true', - help='Write larger amount of data during write operations (default: false)') - parser.add_argument('-p', '--per-thread-db-connection', action='store_true', - help='Use a single shared db connection (default: false)') - parser.add_argument('-r', '--record-ops', action='store_true', - help='Use a pair of always-fsynced fils to record operations performing + performed, for power-off tests (default: false)') - parser.add_argument('-s', '--max-steps', action='store', default=100, type=int, - help='Maximum number of steps to run (default: 100)') - parser.add_argument('-t', '--num-threads', action='store', default=10, type=int, - help='Number of threads to run (default: 10)') + + parser.add_argument( + '-a', + '--auto-start-service', + action='store_true', + help='Automatically start/stop the TDengine service (default: false)') + parser.add_argument( + '-c', + '--connector-type', + action='store', + default='native', + type=str, + help='Connector type to use: native, rest, or mixed (default: 10)') + parser.add_argument( + '-d', + '--debug', + action='store_true', + help='Turn on DEBUG mode for more logging (default: false)') + parser.add_argument( + '-e', + '--run-tdengine', + action='store_true', + help='Run TDengine service in foreground (default: false)') + parser.add_argument( + '-l', + '--larger-data', + action='store_true', + help='Write larger amount of data during write operations (default: false)') + parser.add_argument( + '-p', + '--per-thread-db-connection', + action='store_true', + help='Use a single shared db connection (default: false)') + parser.add_argument( + '-r', + '--record-ops', + action='store_true', + help='Use a pair of always-fsynced fils to record operations performing + performed, for power-off tests (default: false)') + parser.add_argument( + '-s', + '--max-steps', + action='store', + default=1000, + type=int, + help='Maximum number of steps to run (default: 100)') + parser.add_argument( + '-t', + '--num-threads', + action='store', + default=5, + type=int, + help='Number of threads to run (default: 10)') + parser.add_argument( + '-x', + '--continue-on-exception', + action='store_true', + help='Continue execution after encountering unexpected/disallowed errors/exceptions (default: false)') global gConfig gConfig = parser.parse_args() - if len(sys.argv) == 1: - parser.print_help() - sys.exit() + # Logging Stuff global logger - logger = logging.getLogger('CrashGen') - logger.addFilter(LoggingFilter()) - if ( gConfig.debug ): - logger.setLevel(logging.DEBUG) # default seems to be INFO + _logger = logging.getLogger('CrashGen') # real logger + _logger.addFilter(LoggingFilter()) + ch = logging.StreamHandler() + _logger.addHandler(ch) + + # Logging adapter, to be used as a logger + logger = MyLoggingAdapter(_logger, []) + + if (gConfig.debug): + logger.setLevel(logging.DEBUG) # default seems to be INFO else: logger.setLevel(logging.INFO) - ch = logging.StreamHandler() - logger.addHandler(ch) - # resetDb = False # DEBUG only - # dbState = DbState(resetDb) # DBEUG only! - dbManager = DbManager() # Regular function - Dice.seed(0) # initial seeding of dice - tc = ThreadCoordinator( - ThreadPool(gConfig.num_threads, gConfig.max_steps), - # WorkDispatcher(dbState), # Obsolete? - dbManager - ) + Dice.seed(0) # initial seeding of dice - # # Hack to exercise reading from disk, imcreasing coverage. TODO: fix - # dbc = dbState.getDbConn() - # sTbName = dbState.getFixedSuperTableName() - # dbc.execute("create database if not exists db") - # if not dbState.getState().equals(StateEmpty()): - # dbc.execute("use db") + # Run server or client + if gConfig.run_tdengine: # run server + MainExec.runService() + else: + return MainExec.runClient() - # rTables = None - # try: # the super table may not exist - # sql = "select TBNAME from db.{}".format(sTbName) - # logger.info("Finding out tables in super table: {}".format(sql)) - # dbc.query(sql) # TODO: analyze result set later - # logger.info("Fetching result") - # rTables = dbc.getQueryResult() - # logger.info("Result: {}".format(rTables)) - # except taos.error.ProgrammingError as err: - # logger.info("Initial Super table OPS error: {}".format(err)) - - # # sys.exit() - # if ( not rTables == None): - # # print("rTables[0] = {}, type = {}".format(rTables[0], type(rTables[0]))) - # try: - # for rTbName in rTables : # regular tables - # ds = dbState - # logger.info("Inserting into table: {}".format(rTbName[0])) - # sql = "insert into db.{} values ('{}', {});".format( - # rTbName[0], - # ds.getNextTick(), ds.getNextInt()) - # dbc.execute(sql) - # for rTbName in rTables : # regular tables - # dbc.query("select * from db.{}".format(rTbName[0])) # TODO: check success failure - # logger.info("Initial READING operation is successful") - # except taos.error.ProgrammingError as err: - # logger.info("Initial WRITE/READ error: {}".format(err)) - - - - # Sandbox testing code - # dbc = dbState.getDbConn() - # while True: - # rows = dbc.query("show databases") - # print("Rows: {}, time={}".format(rows, time.time())) - - tc.run() - tc.logStats() - dbManager.cleanUp() - - # logger.info("Crash_Gen execution finished") if __name__ == "__main__": - main() + exitCode = main() + # print("Exiting with code: {}".format(exitCode)) + sys.exit(exitCode) diff --git a/tests/pytest/crash_gen.sh b/tests/pytest/crash_gen.sh index c845b39764..f6be6aae49 100755 --- a/tests/pytest/crash_gen.sh +++ b/tests/pytest/crash_gen.sh @@ -31,11 +31,22 @@ then exit -1 fi +CURR_DIR=`pwd` +IN_TDINTERNAL="community" +if [[ "$CURR_DIR" == *"$IN_TDINTERNAL"* ]]; then + TAOS_DIR=$CURR_DIR/../../.. +else + TAOS_DIR=$CURR_DIR/../.. +fi +TAOSD_DIR=`find $TAOS_DIR -name "taosd"|grep bin|head -n1` + +LIB_DIR=`echo $TAOSD_DIR|rev|cut -d '/' -f 3,4,5,6|rev`/lib + # First we need to set up a path for Python to find our own TAOS modules, so that "import" can work. export PYTHONPATH=$(pwd)/../../src/connector/python/linux/python3 # Then let us set up the library path so that our compiled SO file can be loaded by Python -export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$(pwd)/../../build/build/lib +export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$LIB_DIR # Now we are all let, and let's see if we can find a crash. Note we pass all params -./crash_gen.py $@ +python3 ./crash_gen.py $@ diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 066dda5d97..5ee33c421e 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -1,7 +1,6 @@ #!/bin/bash ulimit -c unlimited -python3 ./test.py -f client/client.py python3 ./test.py -f insert/basic.py python3 ./test.py -f insert/int.py python3 ./test.py -f insert/float.py @@ -122,7 +121,7 @@ python3 ./test.py -f import_merge/importTORestart.py python3 ./test.py -f import_merge/importTPORestart.py python3 ./test.py -f import_merge/importTRestart.py python3 ./test.py -f import_merge/importInsertThenImport.py - +python3 ./test.py -f import_merge/importCSV.py # user python3 ./test.py -f user/user_create.py python3 ./test.py -f user/pass_len.py @@ -146,8 +145,18 @@ python3 ./test.py -f query/queryJoin.py python3 ./test.py -f query/select_last_crash.py #stream +python3 ./test.py -f stream/metric_1.py +python3 ./test.py -f stream/new.py python3 ./test.py -f stream/stream1.py python3 ./test.py -f stream/stream2.py +python3 ./test.py -f stream/parser.py #alter table python3 ./test.py -f alter/alter_table_crash.py + +# client +python3 ./test.py -f client/client.py + +# Misc +python3 testCompress.py +python3 testNoCompress.py diff --git a/tests/pytest/import_merge/importCSV.py b/tests/pytest/import_merge/importCSV.py new file mode 100644 index 0000000000..b4441949a1 --- /dev/null +++ b/tests/pytest/import_merge/importCSV.py @@ -0,0 +1,94 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import csv +import random +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.csvfile = "/tmp/file.csv" + self.rows = 10000 + self.ntables = 1 + self.startTime = 1520000010000 + def genRandomStr(self, maxLen): + H = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789' + salt = '' + if maxLen <= 1: + maxLen = 2 + l = random.randint(1,maxLen) + for i in range(l): + salt += random.choice(H) + return salt + def createCSVFile(self): + f = open(self.csvfile,'w',encoding='utf-8') + csv_writer = csv.writer(f, quoting=csv.QUOTE_NONNUMERIC) + for i in range(self.rows): + csv_writer.writerow([self.startTime + i, + self.genRandomStr(5), + self.genRandomStr(6), + self.genRandomStr(7), + self.genRandomStr(8), + self.genRandomStr(9), + self.genRandomStr(10), + self.genRandomStr(11), + self.genRandomStr(12), + self.genRandomStr(13), + self.genRandomStr(14)]) + f.close() + def destroyCSVFile(self): + os.remove(self.csvfile) + def run(self): + self.createCSVFile() + + tdDnodes.stop(1) + tdDnodes.deploy(1) + tdDnodes.start(1) + + tdSql.execute('reset query cache') + tdSql.execute('drop database if exists db') + tdSql.execute('create database db') + tdSql.execute('use db') + tdSql.execute('''create table tbx (ts TIMESTAMP, + collect_area NCHAR(5), + device_id BINARY(6), + imsi BINARY(7), + imei BINARY(8), + mdn BINARY(9), + net_type BINARY(10), + mno NCHAR(11), + province NCHAR(12), + city NCHAR(13), + alarm BINARY(14))''') + + tdSql.execute("import into tbx file \'%s\'"%(self.csvfile)) + tdSql.query('select * from tbx') + tdSql.checkRows(self.rows) + + def stop(self): + self.destroyCSVFile() + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + tdDnodes.stop(1) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/insert/writeDBNonStop.py b/tests/pytest/insert/writeDBNonStop.py index c89853ffb6..bdc93f0469 100644 --- a/tests/pytest/insert/writeDBNonStop.py +++ b/tests/pytest/insert/writeDBNonStop.py @@ -67,7 +67,7 @@ class DBWriteNonStop: self.cursor.execute( "select first(ts), last(ts), min(speed), max(speed), avg(speed), count(*) from st") data = self.cursor.fetchall() - end = datetime.now() + end = datetime.now() self.writeDataToCSVFile(data, (end - start).seconds) time.sleep(.001) @@ -75,8 +75,9 @@ class DBWriteNonStop: self.cursor.close() self.conn.close() + test = DBWriteNonStop() test.connectDB() test.createTable() test.insertData() -test.closeConn() \ No newline at end of file +test.closeConn() diff --git a/tests/pytest/query/filterAllIntTypes.py b/tests/pytest/query/filterAllIntTypes.py index a2bab63c88..4d91168dae 100644 --- a/tests/pytest/query/filterAllIntTypes.py +++ b/tests/pytest/query/filterAllIntTypes.py @@ -89,17 +89,25 @@ class TDTestCase: tdSql.checkRows(101) # range for int type on column - tdSql.query("select * from st%s where num > 50 and num < 100" % curType) - tdSql.checkRows(49) + tdSql.query( + "select * from st%s where num > 50 and num < 100" % + curType) + tdSql.checkRows(49) - tdSql.query("select * from st%s where num >= 50 and num < 100" % curType) - tdSql.checkRows(50) + tdSql.query( + "select * from st%s where num >= 50 and num < 100" % + curType) + tdSql.checkRows(50) - tdSql.query("select * from st%s where num > 50 and num <= 100" % curType) - tdSql.checkRows(50) + tdSql.query( + "select * from st%s where num > 50 and num <= 100" % + curType) + tdSql.checkRows(50) - tdSql.query("select * from st%s where num >= 50 and num <= 100" % curType) - tdSql.checkRows(51) + tdSql.query( + "select * from st%s where num >= 50 and num <= 100" % + curType) + tdSql.checkRows(51) # > for int type on tag tdSql.query("select * from st%s where id > 5" % curType) @@ -135,16 +143,22 @@ class TDTestCase: # range for int type on tag tdSql.query("select * from st%s where id > 5 and id < 7" % curType) - tdSql.checkRows(10) + tdSql.checkRows(10) - tdSql.query("select * from st%s where id >= 5 and id < 7" % curType) - tdSql.checkRows(20) + tdSql.query( + "select * from st%s where id >= 5 and id < 7" % + curType) + tdSql.checkRows(20) - tdSql.query("select * from st%s where id > 5 and id <= 7" % curType) - tdSql.checkRows(20) + tdSql.query( + "select * from st%s where id > 5 and id <= 7" % + curType) + tdSql.checkRows(20) - tdSql.query("select * from st%s where id >= 5 and id <= 7" % curType) - tdSql.checkRows(30) + tdSql.query( + "select * from st%s where id >= 5 and id <= 7" % + curType) + tdSql.checkRows(30) print( "======= Verify filter for %s type finished =========" % diff --git a/tests/pytest/query/filterCombo.py b/tests/pytest/query/filterCombo.py index f72b913c92..e769addb52 100644 --- a/tests/pytest/query/filterCombo.py +++ b/tests/pytest/query/filterCombo.py @@ -52,8 +52,7 @@ class TDTestCase: # illegal condition tdSql.error( - "select * from db.st where ts != '2020-05-13 10:00:00.002' OR tagtype < 2") - tdSql.error("select * from db.st where tagtype <> 1 OR tagtype < 2") + "select * from db.st where ts != '2020-05-13 10:00:00.002' OR tagtype < 2") def stop(self): tdSql.close() diff --git a/tests/pytest/query/queryJoin.py b/tests/pytest/query/queryJoin.py index 6d135e1006..6ea240a334 100644 --- a/tests/pytest/query/queryJoin.py +++ b/tests/pytest/query/queryJoin.py @@ -77,7 +77,7 @@ class TDTestCase: # join queries tdSql.query( "select * from stb_p, stb_t where stb_p.ts=stb_t.ts and stb_p.id = stb_t.id") - tdSql.checkRows(6) + tdSql.checkRows(6) tdSql.error( "select ts, pressure, temperature, id, dscrption from stb_p, stb_t where stb_p.ts=stb_t.ts and stb_p.id = stb_t.id") @@ -108,7 +108,7 @@ class TDTestCase: tdSql.query("select stb_t.ts, stb_t.dscrption, stb_t.temperature, stb_t.id, stb_p.dscrption, stb_p.pressure from stb_p, stb_t where stb_p.ts=stb_t.ts and stb_p.id = stb_t.id") tdSql.checkRows(6) - + tdSql.query("select stb_t.ts, stb_t.dscrption, stb_t.temperature, stb_t.id, stb_p.dscrption, stb_p.pressure from stb_p, stb_t where stb_p.ts=stb_t.ts and stb_p.id = stb_t.id") tdSql.checkRows(6) diff --git a/tests/pytest/query/queryMetaData.py b/tests/pytest/query/queryMetaData.py index 7b95e4a81c..67df20cb9a 100755 --- a/tests/pytest/query/queryMetaData.py +++ b/tests/pytest/query/queryMetaData.py @@ -55,9 +55,9 @@ class MetadataQuery: def createTablesAndInsertData(self, threadID): cursor = self.connectDB() - cursor.execute("use test") + cursor.execute("use test") - tablesPerThread = int (self.tables / self.numOfTherads) + tablesPerThread = int(self.tables / self.numOfTherads) base = threadID * tablesPerThread for i in range(tablesPerThread): cursor.execute( @@ -68,24 +68,72 @@ class MetadataQuery: %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')''' % - (base + i + 1, - (base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100, - (base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100, - (base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100, - (base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100, - (base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100, - (base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100)) - + (base + i + 1, (base + i) % + 100, (base + i) % + 10000, (base + i) % + 1000000, (base + i) % + 100000000, (base + i) % + 100 * 1.1, (base + i) % + 100 * 2.3, (base + i) % + 2, (base + i) % + 100, (base + i) % + 100, (base + i) % + 100, (base + i) % + 10000, (base + i) % + 1000000, (base + i) % + 100000000, (base + i) % + 100 * 1.1, (base + i) % + 100 * 2.3, (base + i) % + 2, (base + i) % + 100, (base + i) % + 100, (base + i) % + 100, (base + i) % + 10000, (base + i) % + 1000000, (base + i) % + 100000000, (base + i) % + 100 * 1.1, (base + i) % + 100 * 2.3, (base + i) % + 2, (base + i) % + 100, (base + i) % + 100, (base + i) % + 100, (base + i) % + 10000, (base + i) % + 1000000, (base + i) % + 100000000, (base + i) % + 100 * 1.1, (base + i) % + 100 * 2.3, (base + i) % + 2, (base + i) % + 100, (base + i) % + 100, (base + i) % + 100, (base + i) % + 10000, (base + i) % + 1000000, (base + i) % + 100000000, (base + i) % + 100 * 1.1, (base + i) % + 100 * 2.3, (base + i) % + 2, (base + i) % + 100, (base + i) % + 100, (base + i) % + 100, (base + i) % + 10000, (base + i) % + 1000000, (base + i) % + 100000000, (base + i) % + 100 * 1.1, (base + i) % + 100 * 2.3, (base + i) % + 2, (base + i) % + 100, (base + i) % + 100)) + cursor.execute( "insert into t%d values(%d, 1) (%d, 2) (%d, 3) (%d, 4) (%d, 5)" % (base + i + 1, self.ts + 1, self.ts + 2, self.ts + 3, self.ts + 4, self.ts + 5)) - cursor.close() + cursor.close() def queryData(self, query): cursor = self.connectDB() cursor.execute("use test") - print("================= query tag data =================") + print("================= query tag data =================") startTime = datetime.now() cursor.execute(query) cursor.fetchall() @@ -107,15 +155,15 @@ if __name__ == '__main__': print( "================= Create %d tables and insert %d records into each table =================" % (t.tables, t.records)) - startTime = datetime.now() + startTime = datetime.now() threads = [] for i in range(t.numOfTherads): thread = threading.Thread( target=t.createTablesAndInsertData, args=(i,)) thread.start() threads.append(thread) - - for th in threads: + + for th in threads: th.join() endTime = datetime.now() diff --git a/tests/pytest/query/queryMetaPerformace.py b/tests/pytest/query/queryMetaPerformace.py index 0570311b08..8ab7105c9d 100644 --- a/tests/pytest/query/queryMetaPerformace.py +++ b/tests/pytest/query/queryMetaPerformace.py @@ -19,6 +19,7 @@ import time from datetime import datetime import numpy as np + class MyThread(threading.Thread): def __init__(self, func, args=()): @@ -35,17 +36,23 @@ class MyThread(threading.Thread): except Exception: return None + class MetadataQuery: def initConnection(self): self.tables = 100 self.records = 10 - self.numOfTherads =5 + self.numOfTherads = 5 self.ts = 1537146000000 self.host = "127.0.0.1" self.user = "root" self.password = "taosdata" self.config = "/etc/taos" - self.conn = taos.connect( self.host, self.user, self.password, self.config) + self.conn = taos.connect( + self.host, + self.user, + self.password, + self.config) + def connectDB(self): return self.conn.cursor() @@ -69,7 +76,7 @@ class MetadataQuery: cursor.execute("use test") base = threadID * self.tables - tablesPerThread = int (self.tables / self.numOfTherads) + tablesPerThread = int(self.tables / self.numOfTherads) for i in range(tablesPerThread): cursor.execute( '''create table t%d using meters tags( @@ -79,20 +86,69 @@ class MetadataQuery: %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')''' % - (base + i + 1, - (base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100, - (base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100, - (base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100, - (base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100, - (base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100, - (base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100)) + (base + i + 1, (base + i) % + 100, (base + i) % + 10000, (base + i) % + 1000000, (base + i) % + 100000000, (base + i) % + 100 * 1.1, (base + i) % + 100 * 2.3, (base + i) % + 2, (base + i) % + 100, (base + i) % + 100, (base + i) % + 100, (base + i) % + 10000, (base + i) % + 1000000, (base + i) % + 100000000, (base + i) % + 100 * 1.1, (base + i) % + 100 * 2.3, (base + i) % + 2, (base + i) % + 100, (base + i) % + 100, (base + i) % + 100, (base + i) % + 10000, (base + i) % + 1000000, (base + i) % + 100000000, (base + i) % + 100 * 1.1, (base + i) % + 100 * 2.3, (base + i) % + 2, (base + i) % + 100, (base + i) % + 100, (base + i) % + 100, (base + i) % + 10000, (base + i) % + 1000000, (base + i) % + 100000000, (base + i) % + 100 * 1.1, (base + i) % + 100 * 2.3, (base + i) % + 2, (base + i) % + 100, (base + i) % + 100, (base + i) % + 100, (base + i) % + 10000, (base + i) % + 1000000, (base + i) % + 100000000, (base + i) % + 100 * 1.1, (base + i) % + 100 * 2.3, (base + i) % + 2, (base + i) % + 100, (base + i) % + 100, (base + i) % + 100, (base + i) % + 10000, (base + i) % + 1000000, (base + i) % + 100000000, (base + i) % + 100 * 1.1, (base + i) % + 100 * 2.3, (base + i) % + 2, (base + i) % + 100, (base + i) % + 100)) for j in range(self.records): cursor.execute( "insert into t%d values(%d, %d)" % (base + i + 1, self.ts + j, j)) cursor.close() - def queryWithTagId(self, threadId, tagId, queryNum): - print("---------thread%d start-----------"%threadId) + + def queryWithTagId(self, threadId, tagId, queryNum): + print("---------thread%d start-----------" % threadId) query = '''select tgcol1, tgcol2, tgcol3, tgcol4, tgcol5, tgcol6, tgcol7, tgcol8, tgcol9, tgcol10, tgcol11, tgcol12, tgcol13, tgcol14, tgcol15, tgcol16, tgcol17, tgcol18, tgcol19, tgcol20, tgcol21, tgcol22, tgcol23, tgcol24, tgcol25, tgcol26, tgcol27, @@ -103,18 +159,19 @@ class MetadataQuery: latancy = [] cursor = self.connectDB() cursor.execute("use test") - for i in range(queryNum): + for i in range(queryNum): startTime = time.time() - cursor.execute(query.format(id = tagId, condition = i)) + cursor.execute(query.format(id=tagId, condition=i)) cursor.fetchall() - latancy.append((time.time() - startTime)) - print("---------thread%d end-----------"%threadId) + latancy.append((time.time() - startTime)) + print("---------thread%d end-----------" % threadId) return latancy + def queryData(self, query): cursor = self.connectDB() cursor.execute("use test") - print("================= query tag data =================") + print("================= query tag data =================") startTime = datetime.now() cursor.execute(query) cursor.fetchall() @@ -124,7 +181,7 @@ class MetadataQuery: (endTime - startTime).seconds) cursor.close() - #self.conn.close() + # self.conn.close() if __name__ == '__main__': @@ -132,18 +189,33 @@ if __name__ == '__main__': t = MetadataQuery() t.initConnection() - latancys = [] - threads = [] + latancys = [] + threads = [] tagId = 1 - queryNum = 1000 + queryNum = 1000 for i in range(t.numOfTherads): - thread = MyThread(t.queryWithTagId, args = (i, tagId, queryNum)) - threads.append(thread) + thread = MyThread(t.queryWithTagId, args=(i, tagId, queryNum)) + threads.append(thread) thread.start() - for i in range(t.numOfTherads): + for i in range(t.numOfTherads): threads[i].join() - latancys.extend(threads[i].get_result()) - print("Total query: %d"%(queryNum * t.numOfTherads)) - print("statistic(s): mean= %f, P50 = %f, P75 = %f, P95 = %f, P99 = %f" - %(sum(latancys)/(queryNum * t.numOfTherads), np.percentile(latancys, 50), np.percentile(latancys, 75), np.percentile(latancys, 95), np.percentile(latancys, 99))) - + latancys.extend(threads[i].get_result()) + print("Total query: %d" % (queryNum * t.numOfTherads)) + print( + "statistic(s): mean= %f, P50 = %f, P75 = %f, P95 = %f, P99 = %f" % + (sum(latancys) / + ( + queryNum * + t.numOfTherads), + np.percentile( + latancys, + 50), + np.percentile( + latancys, + 75), + np.percentile( + latancys, + 95), + np.percentile( + latancys, + 99))) diff --git a/tests/pytest/query/queryNormal.py b/tests/pytest/query/queryNormal.py index 814c627d89..712a56d2d7 100644 --- a/tests/pytest/query/queryNormal.py +++ b/tests/pytest/query/queryNormal.py @@ -36,18 +36,17 @@ class TDTestCase: "insert into tb2 using stb1 tags(2,'tb2', '表2') values ('2020-04-18 15:00:02.000', 3, 2.1), ('2020-04-18 15:00:03.000', 4, 2.2)") # inner join --- bug - tdSql.query("select * from tb1 a, tb2 b where a.ts = b.ts") - tdSql.checkRows(1) + tdSql.error("select * from tb1 a, tb2 b where a.ts = b.ts") # join 3 tables -- bug exists - tdSql.query("select stb_t.ts, stb_t.dscrption, stb_t.temperature, stb_p.id, stb_p.dscrption, stb_p.pressure,stb_v.velocity from stb_p, stb_t, stb_v where stb_p.ts=stb_t.ts and stb_p.ts=stb_v.ts and stb_p.id = stb_t.id") + tdSql.error("select stb_t.ts, stb_t.dscrption, stb_t.temperature, stb_p.id, stb_p.dscrption, stb_p.pressure,stb_v.velocity from stb_p, stb_t, stb_v where stb_p.ts=stb_t.ts and stb_p.ts=stb_v.ts and stb_p.id = stb_t.id") # query show stable tdSql.query("show stables") tdSql.checkRows(1) # query show tables - tdSql.query("show table") + tdSql.query("show tables") tdSql.checkRows(2) # query count @@ -71,16 +70,13 @@ class TDTestCase: tdSql.checkRows(2) # query first ... as - tdSql.query("select first(*) as begin from stb1") - tdSql.checkData(0, 1, 1) + tdSql.error("select first(*) as begin from stb1") # query last ... as - tdSql.query("select last(*) as end from stb1") - tdSql.checkData(0, 1, 4) + tdSql.error("select last(*) as end from stb1") # query last_row ... as - tdSql.query("select last_row(*) as end from stb1") - tdSql.checkData(0, 1, 4) + tdSql.error("select last_row(*) as end from stb1") # query group .. by tdSql.query("select sum(c1), t2 from stb1 group by t2") @@ -95,8 +91,7 @@ class TDTestCase: tdSql.checkRows(1) # query ... alias for table ---- bug - tdSql.query("select t.ts from tb1 t") - tdSql.checkRows(2) + tdSql.error("select t.ts from tb1 t") # query ... tbname tdSql.query("select tbname from stb1") @@ -104,7 +99,7 @@ class TDTestCase: # query ... tbname count ---- bug tdSql.query("select count(tbname) from stb1") - tdSql.checkRows(2) + tdSql.checkData(0, 0, 2) # query ... select database ---- bug tdSql.query("SELECT database()") diff --git a/tests/pytest/query/select_last_crash.py b/tests/pytest/query/select_last_crash.py index 9aeb122f82..9b580a24ac 100644 --- a/tests/pytest/query/select_last_crash.py +++ b/tests/pytest/query/select_last_crash.py @@ -23,7 +23,6 @@ class TDTestCase: tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor()) - self.rowNum = 5000 self.ts = 1537146000000 @@ -36,15 +35,13 @@ class TDTestCase: "create table t1 using st tags('dev_001')") for i in range(self.rowNum): - tdSql.execute("insert into t1 values(%d, 'taosdata%d', %d)" % (self.ts + i, i + 1, i + 1)) + tdSql.execute( + "insert into t1 values(%d, 'taosdata%d', %d)" % + (self.ts + i, i + 1, i + 1)) tdSql.query("select last(*) from st") tdSql.checkRows(1) - - print( - "======= Verify filter for %s type finished =========" % - curType) - + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/pytest/random-test/random-test-multi-threading-3.py b/tests/pytest/random-test/random-test-multi-threading-3.py index 7079a5c118..a8e2c26ae5 100644 --- a/tests/pytest/random-test/random-test-multi-threading-3.py +++ b/tests/pytest/random-test/random-test-multi-threading-3.py @@ -330,7 +330,6 @@ class Test (Thread): self.q.put(-1) tdLog.exit("second thread failed, first thread exit too") - elif (self.threadId == 2): while True: self.dbEvent.wait() diff --git a/tests/pytest/regressiontest.sh b/tests/pytest/regressiontest.sh index eada5f67f7..ccc6635ced 100755 --- a/tests/pytest/regressiontest.sh +++ b/tests/pytest/regressiontest.sh @@ -1,7 +1,6 @@ #!/bin/bash ulimit -c unlimited -python3 ./test.py -f client/client.py python3 ./test.py -f insert/basic.py python3 ./test.py -f insert/int.py python3 ./test.py -f insert/float.py @@ -122,7 +121,7 @@ python3 ./test.py -f import_merge/importTORestart.py python3 ./test.py -f import_merge/importTPORestart.py python3 ./test.py -f import_merge/importTRestart.py python3 ./test.py -f import_merge/importInsertThenImport.py - +python3 ./test.py -f import_merge/importCSV.py # user python3 ./test.py -f user/user_create.py python3 ./test.py -f user/pass_len.py @@ -138,6 +137,9 @@ python3 ./test.py -f query/filterOtherTypes.py python3 ./test.py -f query/queryError.py python3 ./test.py -f query/querySort.py python3 ./test.py -f query/queryJoin.py +python3 ./test.py -f query/filterCombo.py +python3 ./test.py -f query/queryNormal.py +python3 ./test.py -f query/select_last_crash.py #stream python3 ./test.py -f stream/stream1.py @@ -146,4 +148,9 @@ python3 ./test.py -f stream/stream2.py #alter table python3 ./test.py -f alter/alter_table_crash.py +# client +python3 ./test.py -f client/client.py +# Misc +python3 testCompress.py +python3 testNoCompress.py diff --git a/tests/pytest/smoketest.sh b/tests/pytest/smoketest.sh index 6b21912dd5..7c14b673e5 100755 --- a/tests/pytest/smoketest.sh +++ b/tests/pytest/smoketest.sh @@ -1,10 +1,6 @@ #!/bin/bash ulimit -c unlimited -# client -python3 ./test.py $1 -f client/client.py -python3 ./test.py $1 -s && sleep 1 - # insert python3 ./test.py $1 -f insert/basic.py python3 ./test.py $1 -s && sleep 1 @@ -35,3 +31,7 @@ python3 ./test.py $1 -s && sleep 1 python3 ./test.py $1 -f query/filter.py python3 ./test.py $1 -s && sleep 1 +# client +python3 ./test.py $1 -f client/client.py +python3 ./test.py $1 -s && sleep 1 + diff --git a/tests/pytest/stream/metric_1.py b/tests/pytest/stream/metric_1.py new file mode 100644 index 0000000000..b4cccac69c --- /dev/null +++ b/tests/pytest/stream/metric_1.py @@ -0,0 +1,104 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import time +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def createFuncStream(self, expr, suffix, value): + tbname = "strm_" + suffix + tdLog.info("create stream table %s" % tbname) + tdSql.query("select %s from stb interval(1d)" % expr) + tdSql.checkData(0, 1, value) + tdSql.execute("create table %s as select %s from stb interval(1d)" % (tbname, expr)) + + def checkStreamData(self, suffix, value): + sql = "select * from strm_" + suffix + tdSql.waitedQuery(sql, 1, 120) + tdSql.checkData(0, 1, value) + + def run(self): + tbNum = 10 + rowNum = 20 + + tdSql.prepare() + + tdLog.info("===== preparing data =====") + tdSql.execute( + "create table stb(ts timestamp, tbcol int, tbcol2 float) tags(tgcol int)") + for i in range(tbNum): + tdSql.execute("create table tb%d using stb tags(%d)" % (i, i)) + for j in range(rowNum): + tdSql.execute( + "insert into tb%d values (now - %dm, %d, %d)" % + (i, 1440 - j, j, j)) + time.sleep(0.1) + + self.createFuncStream("count(*)", "c1", 200) + self.createFuncStream("count(tbcol)", "c2", 200) + self.createFuncStream("count(tbcol2)", "c3", 200) + self.createFuncStream("avg(tbcol)", "av", 9.5) + self.createFuncStream("sum(tbcol)", "su", 1900) + self.createFuncStream("min(tbcol)", "mi", 0) + self.createFuncStream("max(tbcol)", "ma", 19) + self.createFuncStream("first(tbcol)", "fi", 0) + self.createFuncStream("last(tbcol)", "la", 19) + #tdSql.query("select stddev(tbcol) from stb interval(1d)") + #tdSql.query("select leastsquares(tbcol, 1, 1) from stb interval(1d)") + tdSql.query("select top(tbcol, 1) from stb interval(1d)") + tdSql.query("select bottom(tbcol, 1) from stb interval(1d)") + #tdSql.query("select percentile(tbcol, 1) from stb interval(1d)") + #tdSql.query("select diff(tbcol) from stb interval(1d)") + + tdSql.query("select count(tbcol) from stb where ts < now + 4m interval(1d)") + tdSql.checkData(0, 1, 200) + #tdSql.execute("create table strm_wh as select count(tbcol) from stb where ts < now + 4m interval(1d)") + + self.createFuncStream("count(tbcol)", "as", 200) + + tdSql.query("select count(tbcol) from stb interval(1d) group by tgcol") + tdSql.checkData(0, 1, 20) + + tdSql.query("select count(tbcol) from stb where ts < now + 4m interval(1d) group by tgcol") + tdSql.checkData(0, 1, 20) + + self.checkStreamData("c1", 200) + self.checkStreamData("c2", 200) + self.checkStreamData("c3", 200) + self.checkStreamData("av", 9.5) + self.checkStreamData("su", 1900) + self.checkStreamData("mi", 0) + self.checkStreamData("ma", 19) + self.checkStreamData("fi", 0) + self.checkStreamData("la", 19) + #self.checkStreamData("wh", 200) + self.checkStreamData("as", 200) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) + + diff --git a/tests/pytest/stream/new.py b/tests/pytest/stream/new.py new file mode 100644 index 0000000000..b8503f0b4e --- /dev/null +++ b/tests/pytest/stream/new.py @@ -0,0 +1,71 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import time +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + rowNum = 200 + totalNum = 200 + tdSql.prepare() + + tdLog.info("=============== step1") + tdSql.execute("create table mt(ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)") + for i in range(5): + tdSql.execute("create table tb%d using mt tags(%d)" % (i, i)) + for j in range(rowNum): + tdSql.execute("insert into tb%d values(now + %ds, %d, %d)" % (i, j, j, j)) + time.sleep(0.1) + + tdLog.info("=============== step2") + tdSql.query("select count(*), count(tbcol), count(tbcol2) from mt interval(10s)") + tdSql.execute("create table st as select count(*), count(tbcol), count(tbcol2) from mt interval(10s)") + + tdLog.info("=============== step3") + tdSql.waitedQuery("select * from st", 1, 120) + v = tdSql.getData(0, 3) + if v >= 51: + tdLog.exit("value is %d, which is larger than 51" % v) + + tdLog.info("=============== step4") + for i in range(5, 10): + tdSql.execute("create table tb%d using mt tags(%d)" % (i, i)) + for j in range(rowNum): + tdSql.execute("insert into tb%d values(now + %ds, %d, %d)" % (i, j, j, j)) + + tdLog.info("=============== step5") + tdLog.sleep(30) + tdSql.waitedQuery("select * from st order by ts desc", 1, 120) + v = tdSql.getData(0, 3) + if v <= 51: + tdLog.exit("value is %d, which is smaller than 51" % v) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) + + diff --git a/tests/pytest/stream/parser.py b/tests/pytest/stream/parser.py new file mode 100644 index 0000000000..3b231d2b39 --- /dev/null +++ b/tests/pytest/stream/parser.py @@ -0,0 +1,182 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import time +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + ''' + def bug2222(self): + tdSql.prepare() + tdSql.execute("create table superreal(ts timestamp, addr binary(5), val float) tags (deviceNo binary(20))") + tdSql.execute("create table real_001 using superreal tags('001')") + tdSql.execute("create table tj_001 as select sum(val) from real_001 interval(1m)") + + t = datetime.datetime.now() + for i in range(60): + ts = t.strftime("%Y-%m-%d %H:%M") + t += datetime.timedelta(minutes=1) + sql = "insert into real_001 values('%s:0%d', '1', %d)" % (ts, 0, i) + for j in range(4): + sql += ",('%s:0%d', '%d', %d)" % (ts, j + 1, j + 1, i) + tdSql.execute(sql) + time.sleep(60 + random.random() * 60 - 30) + ''' + + def tbase300(self): + tdLog.debug("begin tbase300") + + tdSql.prepare() + tdSql.execute("create table mt(ts timestamp, c1 int, c2 int) tags(t1 int)") + tdSql.execute("create table tb1 using mt tags(1)"); + tdSql.execute("create table tb2 using mt tags(2)"); + tdSql.execute("create table strm as select count(*), avg(c1), sum(c2), max(c1), min(c2),first(c1), last(c2) from mt interval(4s) sliding(2s)") + #tdSql.execute("create table strm as select count(*), avg(c1), sum(c2), max(c1), min(c2), first(c1) from mt interval(4s) sliding(2s)") + tdLog.sleep(10) + tdSql.execute("insert into tb2 values(now, 1, 1)"); + tdSql.execute("insert into tb1 values(now, 1, 1)"); + tdLog.sleep(4) + tdSql.query("select * from mt") + tdSql.query("select * from strm") + tdSql.execute("drop table tb1") + + tdSql.waitedQuery("select * from strm", 1, 100) + if tdSql.queryRows < 1 or tdSql.queryRows > 2: + tdLog.exit("rows should be 1 or 2") + + tdSql.execute("drop table tb2") + tdSql.execute("drop table mt") + tdSql.execute("drop table strm") + + def tbase304(self): + tdLog.debug("begin tbase304") + # we cannot reset query cache in server side, as a workaround, + # set super table name to mt304, need to change back to mt later + tdSql.execute("create table mt304 (ts timestamp, c1 int) tags(t1 int, t2 int)") + tdSql.execute("create table tb1 using mt304 tags(1, 1)") + tdSql.execute("create table tb2 using mt304 tags(1, -1)") + time.sleep(0.1) + tdSql.execute("create table strm as select count(*), avg(c1) from mt304 where t2 >= 0 interval(4s) sliding(2s)") + tdSql.execute("insert into tb1 values (now,1)") + tdSql.execute("insert into tb2 values (now,2)") + + tdSql.waitedQuery("select * from strm", 1, 100) + if tdSql.queryRows < 1 or tdSql.queryRows > 2: + tdLog.exit("rows should be 1 or 2") + + tdSql.checkData(0, 1, 1) + tdSql.checkData(0, 2, 1.000000000) + tdSql.execute("alter table mt304 drop tag t2") + tdSql.execute("insert into tb2 values (now,2)") + tdSql.execute("insert into tb1 values (now,1)") + tdSql.query("select * from strm") + tdSql.execute("alter table mt304 add tag t2 int") + tdLog.sleep(1) + tdSql.query("select * from strm") + + def wildcardFilterOnTags(self): + tdLog.debug("begin wildcardFilterOnTag") + tdSql.prepare() + tdSql.execute("create table stb (ts timestamp, c1 int, c2 binary(10)) tags(t1 binary(10))") + tdSql.execute("create table tb1 using stb tags('a1')") + tdSql.execute("create table tb2 using stb tags('b2')") + tdSql.execute("create table tb3 using stb tags('a3')") + tdSql.execute("create table strm as select count(*), avg(c1), first(c2) from stb where t1 like 'a%' interval(4s) sliding(2s)") + tdSql.query("describe strm") + tdSql.checkRows(4) + + tdLog.sleep(1) + tdSql.execute("insert into tb1 values (now, 0, 'tb1')") + tdLog.sleep(4) + tdSql.execute("insert into tb2 values (now, 2, 'tb2')") + tdLog.sleep(4) + tdSql.execute("insert into tb3 values (now, 0, 'tb3')") + + tdSql.waitedQuery("select * from strm", 4, 60) + tdSql.checkRows(4) + tdSql.checkData(0, 2, 0.000000000) + if tdSql.getData(0, 3) == 'tb2': + tdLog.exit("unexpected value of data03") + if tdSql.getData(1, 3) == 'tb2': + tdLog.exit("unexpected value of data13") + if tdSql.getData(2, 3) == 'tb2': + tdLog.exit("unexpected value of data23") + if tdSql.getData(3, 3) == 'tb2': + tdLog.exit("unexpected value of data33") + + tdLog.info("add table tb4 to see if stream still works correctly") + # The vnode client needs to refresh metadata cache to allow strm calculate tb4's data. + # But the current refreshing frequency is every 10 min + # commented out the case below to save running time + tdSql.execute("create table tb4 using stb tags('a4')") + tdSql.execute("insert into tb4 values(now, 4, 'tb4')") + tdSql.waitedQuery("select * from strm order by ts desc", 6, 60) + tdSql.checkRows(6) + tdSql.checkData(0, 2, 4) + tdSql.checkData(0, 3, "tb4") + + tdLog.info("change tag values to see if stream still works correctly") + tdSql.execute("alter table tb4 set tag t1='b4'") + tdLog.sleep(3) + tdSql.execute("insert into tb1 values (now, 1, 'tb1_a1')") + tdLog.sleep(4) + tdSql.execute("insert into tb4 values (now, -4, 'tb4_b4')") + tdSql.waitedQuery("select * from strm order by ts desc", 8, 100) + tdSql.checkRows(8) + tdSql.checkData(0, 2, 1) + tdSql.checkData(0, 3, "tb1_a1") + + def datatypes(self): + tdLog.debug("begin data types") + tdSql.prepare() + tdSql.execute("create table stb3 (ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 binary(15), c6 nchar(15), c7 bool) tags(t1 int, t2 binary(15))") + tdSql.execute("create table tb0 using stb3 tags(0, 'tb0')") + tdSql.execute("create table tb1 using stb3 tags(1, 'tb1')") + tdSql.execute("create table tb2 using stb3 tags(2, 'tb2')") + tdSql.execute("create table tb3 using stb3 tags(3, 'tb3')") + tdSql.execute("create table tb4 using stb3 tags(4, 'tb4')") + + tdSql.execute("create table strm0 as select count(ts), count(c1), max(c2), min(c4), first(c5), last(c6) from stb3 where ts < now + 30s interval(4s) sliding(2s)") + #tdSql.execute("create table strm0 as select count(ts), count(c1), max(c2), min(c4), first(c5) from stb where ts < now + 30s interval(4s) sliding(2s)") + tdLog.sleep(1) + tdSql.execute("insert into tb0 values (now, 0, 0, 0, 0, 'binary0', '涛思0', true) tb1 values (now, 1, 1, 1, 1, 'binary1', '涛思1', false) tb2 values (now, 2, 2, 2, 2, 'binary2', '涛思2', true) tb3 values (now, 3, 3, 3, 3, 'binary3', '涛思3', false) tb4 values (now, 4, 4, 4, 4, 'binary4', '涛思4', true) ") + + tdSql.waitedQuery("select * from strm0 order by ts desc", 2, 120) + tdSql.checkRows(2) + + tdSql.execute("insert into tb0 values (now, 10, 10, 10, 10, 'binary0', '涛思0', true) tb1 values (now, 11, 11, 11, 11, 'binary1', '涛思1', false) tb2 values (now, 12, 12, 12, 12, 'binary2', '涛思2', true) tb3 values (now, 13, 13, 13, 13, 'binary3', '涛思3', false) tb4 values (now, 14, 14, 14, 14, 'binary4', '涛思4', true) ") + tdSql.waitedQuery("select * from strm0 order by ts desc", 4, 120) + tdSql.checkRows(4) + + def run(self): + self.tbase300() + self.tbase304() + self.wildcardFilterOnTags() + self.datatypes() + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/stream/stream1.py b/tests/pytest/stream/stream1.py index 86244d29e0..c657379441 100644 --- a/tests/pytest/stream/stream1.py +++ b/tests/pytest/stream/stream1.py @@ -55,10 +55,7 @@ class TDTestCase: tdSql.checkRows(tbNum + 1) tdLog.info("===== step3 =====") - tdLog.info("sleeping 120 seconds") - time.sleep(120) - tdSql.query("select * from s0") - + tdSql.waitedQuery("select * from s0", 1, 120) try: tdSql.checkData(0, 1, rowNum) tdSql.checkData(0, 2, rowNum) @@ -82,10 +79,7 @@ class TDTestCase: tdSql.checkRows(tbNum + 1) tdLog.info("===== step7 =====") - tdLog.info("sleeping 120 seconds") - time.sleep(120) - - tdSql.query("select * from s0") + tdSql.waitedQuery("select * from s0", 1, 120) try: tdSql.checkData(0, 1, rowNum) tdSql.checkData(0, 2, rowNum) @@ -108,10 +102,7 @@ class TDTestCase: tdSql.checkRows(tbNum + 2) tdLog.info("===== step9 =====") - tdLog.info("sleeping 120 seconds") - time.sleep(120) - - tdSql.query("select * from s1") + tdSql.waitedQuery("select * from s1", 1, 120) try: tdSql.checkData(0, 1, rowNum * tbNum) tdSql.checkData(0, 2, rowNum * tbNum) @@ -134,9 +125,7 @@ class TDTestCase: tdSql.checkRows(tbNum + 2) tdLog.info("===== step13 =====") - tdLog.info("sleeping 120 seconds") - time.sleep(120) - tdSql.query("select * from s1") + tdSql.waitedQuery("select * from s1", 1, 120) try: tdSql.checkData(0, 1, rowNum * tbNum) tdSql.checkData(0, 2, rowNum * tbNum) diff --git a/tests/pytest/stream/stream2.py b/tests/pytest/stream/stream2.py index f1932daf47..44882f5972 100644 --- a/tests/pytest/stream/stream2.py +++ b/tests/pytest/stream/stream2.py @@ -53,8 +53,7 @@ class TDTestCase: tdSql.checkRows(tbNum + 1) tdLog.info("===== step3 =====") - time.sleep(120) - tdSql.query("select * from s0") + tdSql.waitedQuery("select * from s0", 1, 120) try: tdSql.checkData(0, 1, rowNum) except Exception as e: @@ -81,8 +80,7 @@ class TDTestCase: tdLog.info(repr(e)) tdLog.info("===== step7 =====") - time.sleep(120) - tdSql.query("select * from s0") + tdSql.waitedQuery("select * from s0", 1, 120) try: tdSql.checkData(0, 1, rowNum) tdSql.checkData(0, 2, rowNum) @@ -107,8 +105,7 @@ class TDTestCase: tdSql.checkRows(tbNum + 2) tdLog.info("===== step9 =====") - time.sleep(120) - tdSql.query("select * from s1") + tdSql.waitedQuery("select * from s1", 1, 120) try: tdSql.checkData(0, 1, totalNum) tdSql.checkData(0, 2, totalNum) @@ -137,8 +134,7 @@ class TDTestCase: tdLog.info(repr(e)) tdLog.info("===== step13 =====") - time.sleep(120) - tdSql.query("select * from s1") + tdSql.waitedQuery("select * from s1", 1, 120) try: tdSql.checkData(0, 1, totalNum) #tdSql.checkData(0, 2, None) diff --git a/tests/pytest/testCompress.py b/tests/pytest/testCompress.py new file mode 100644 index 0000000000..0f5d9ef3b1 --- /dev/null +++ b/tests/pytest/testCompress.py @@ -0,0 +1,136 @@ +#!/usr/bin/python +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### +# install pip +# pip install src/connector/python/linux/python2/ + +# -*- coding: utf-8 -*- +import sys +import getopt +import subprocess +from distutils.log import warn as printf + +from util.log import * +from util.dnodes import * +from util.cases import * +from util.sql import * + +import taos + + +if __name__ == "__main__": + fileName = "all" + deployPath = "" + testCluster = False + valgrind = 0 + logSql = True + stop = 0 + opts, args = getopt.gnu_getopt(sys.argv[1:], 'l:sgh', [ + 'logSql', 'stop', 'valgrind', 'help']) + for key, value in opts: + if key in ['-h', '--help']: + tdLog.printNoPrefix( + 'A collection of test cases written using Python') + tdLog.printNoPrefix('-l logSql Flag') + tdLog.printNoPrefix('-s stop All dnodes') + tdLog.printNoPrefix('-g valgrind Test Flag') + sys.exit(0) + + if key in ['-l', '--logSql']: + if (value.upper() == "TRUE"): + logSql = True + elif (value.upper() == "FALSE"): + logSql = False + else: + tdLog.printNoPrefix("logSql value %s is invalid" % logSql) + sys.exit(0) + + if key in ['-g', '--valgrind']: + valgrind = 1 + + if key in ['-s', '--stop']: + stop = 1 + + if (stop != 0): + if (valgrind == 0): + toBeKilled = "taosd" + else: + toBeKilled = "valgrind.bin" + + killCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -HUP > /dev/null 2>&1" % toBeKilled + + psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled + processID = subprocess.check_output(psCmd, shell=True) + + while(processID): + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output(psCmd, shell=True) + + for port in range(6030, 6041): + usePortPID = "lsof -i tcp:%d | grep LISTEn | awk '{print $2}'" % port + processID = subprocess.check_output(usePortPID, shell=True) + + if processID: + killCmd = "kill -9 %s" % processID + os.system(killCmd) + fuserCmd = "fuser -k -n tcp %d" % port + os.system(fuserCmd) + if valgrind: + time.sleep(2) + + tdLog.info('stop All dnodes') + sys.exit(0) + + tdDnodes.init(deployPath) + tdDnodes.setTestCluster(testCluster) + tdDnodes.setValgrind(valgrind) + + tdDnodes.stopAll() + tdDnodes.addSimExtraCfg("compressMsgSize", "10240") + tdDnodes.deploy(1) + tdDnodes.start(1) + + host = '127.0.0.1' + + tdLog.info("Procedures for tdengine deployed in %s" % (host)) + + tdCases.logSql(logSql) + + conn = taos.connect( + host, + config=tdDnodes.getSimCfgPath()) + + tdSql.init(conn.cursor(), True) + + tdSql.execute("CREATE DATABASE IF NOT EXISTS t10b") + tdSql.execute("USE t10b") + tdSql.execute( + "CREATE TABLE IF NOT EXISTS s_sensor_info (ts TIMESTAMP, temperature INT, humidity FLOAT)") + + start_time = int(round(time.time() * 1000)) + for i in range(1, 1000): + tdSql.execute("IMPORT INTO s_sensor_info VALUES (1575129600000, 16, 19.405090) (1575129601000, 22, 14.377142) (1575129602000, 16, 16.868231) (1575129603000, 20, 11.565193) (1575129604000, 31, 13.009119) (1575129605000, 29, 18.136400) (1575129606000, 17, 13.806572) (1575129607000, 23, 14.688898) (1575129608000, 26, 12.931019) (1575129609000, 32, 12.185531) (1575129610000, 30, 13.608714) (1575129611000, 23, 18.624914) (1575129612000, 22, 12.970826) (1575129613000, 22, 12.065827) (1575129614000, 25, 16.967192) (1575129615000, 16, 10.283031) (1575129616000, 22, 16.072535) (1575129617000, 24, 10.794536) (1575129618000, 32, 10.591207) (1575129619000, 20, 13.015227) (1575129620000, 28, 15.410999) (1575129621000, 29, 12.785076) (1575129622000, 28, 15.305857) (1575129623000, 33, 12.820810) (1575129624000, 34, 13.618055) (1575129625000, 32, 12.971123) (1575129626000, 24, 10.974546) (1575129627000, 15, 10.742910) (1575129628000, 23, 16.810783) (1575129629000, 18, 13.115224) (1575129630000, 26, 17.418489) (1575129631000, 20, 17.302315) (1575129632000, 21, 14.283571) (1575129633000, 16, 16.826534) (1575129634000, 18, 19.222122) (1575129635000, 18, 14.931420) (1575129636000, 17, 19.549454) (1575129637000, 22, 16.908388) (1575129638000, 32, 15.637796) (1575129639000, 31, 15.517650) (1575129640000, 18, 14.038033) (1575129641000, 32, 19.859648) (1575129642000, 16, 13.220840) (1575129643000, 28, 16.445398) (1575129644000, 26, 16.695753) (1575129645000, 33, 13.696928) (1575129646000, 21, 15.352819) (1575129647000, 15, 12.388407) (1575129648000, 27, 11.267529) (1575129649000, 20, 14.103228) (1575129650000, 20, 16.250950) (1575129651000, 30, 16.236088) (1575129652000, 22, 18.305340) (1575129653000, 25, 17.360685) (1575129654000, 25, 14.978681) (1575129655000, 33, 14.096183) (1575129656000, 26, 10.019039) (1575129657000, 19, 19.158213) (1575129658000, 22, 15.593924) (1575129659000, 26, 18.780119) (1575129660000, 21, 16.001656) (1575129661000, 16, 18.458328) (1575129662000, 21, 16.417843) (1575129663000, 28, 11.736558) (1575129664000, 34, 18.143946) (1575129665000, 27, 10.303225) (1575129666000, 20, 19.756748) (1575129667000, 22, 12.940063) (1575129668000, 23, 11.509640) (1575129669000, 19, 18.319309) (1575129670000, 19, 16.278345) (1575129671000, 27, 10.898361) (1575129672000, 31, 13.922162) (1575129673000, 15, 19.296116) (1575129674000, 26, 15.885763) (1575129675000, 15, 15.525804) (1575129676000, 19, 19.579538) (1575129677000, 20, 11.073811) (1575129678000, 16, 13.932510) (1575129679000, 17, 11.900328) (1575129680000, 22, 16.540415) (1575129681000, 33, 15.203803) (1575129682000, 17, 11.518434) (1575129683000, 17, 13.152081) (1575129684000, 18, 11.378041) (1575129685000, 21, 15.390745) (1575129686000, 30, 15.127818) (1575129687000, 19, 16.530401) (1575129688000, 32, 16.542702) (1575129689000, 26, 16.366442) (1575129690000, 25, 10.306822) (1575129691000, 15, 13.691117) (1575129692000, 15, 13.476817) (1575129693000, 25, 12.529998) (1575129694000, 22, 15.550021) (1575129695000, 20, 15.064971) (1575129696000, 24, 13.313683) (1575129697000, 23, 17.002878) (1575129698000, 30, 19.991594) (1575129699000, 15, 11.116746) (1575129699990, 16, 19.405090) (1575129700990, 22, 14.377142) (1575129701990, 16, 16.868231) (1575129702990, 20, 11.565193) (1575129703990, 31, 13.009119) (1575129704990, 29, 18.136400) (1575129705990, 17, 13.806572) (1575129706990, 23, 14.688898) (1575129707990, 26, 12.931019) (1575129708990, 32, 12.185531) (1575129709990, 30, 13.608714) (1575129710990, 23, 18.624914) (1575129711990, 22, 12.970826) (1575129712990, 22, 12.065827) (1575129713990, 25, 16.967192) (1575129714990, 16, 10.283031) (1575129715990, 22, 16.072535) (1575129716990, 24, 10.794536) (1575129717990, 32, 10.591207) (1575129718990, 20, 13.015227) (1575129719990, 28, 15.410999) (1575129720990, 29, 12.785076) (1575129721990, 28, 15.305857) (1575129722990, 33, 12.820810) (1575129723990, 34, 13.618055) (1575129724990, 32, 12.971123) (1575129725990, 24, 10.974546) (1575129726990, 15, 10.742910) (1575129727990, 23, 16.810783) (1575129728990, 18, 13.115224) (1575129729990, 26, 17.418489) (1575129730990, 20, 17.302315) (1575129731990, 21, 14.283571) (1575129732990, 16, 16.826534) (1575129733990, 18, 19.222122) (1575129734990, 18, 14.931420) (1575129735990, 17, 19.549454) (1575129736990, 22, 16.908388) (1575129737990, 32, 15.637796) (1575129738990, 31, 15.517650) (1575129739990, 18, 14.038033) (1575129740990, 32, 19.859648) (1575129741990, 16, 13.220840) (1575129742990, 28, 16.445398) (1575129743990, 26, 16.695753) (1575129744990, 33, 13.696928) (1575129745990, 21, 15.352819) (1575129746990, 15, 12.388407) (1575129747990, 27, 11.267529) (1575129748990, 20, 14.103228) (1575129749990, 20, 16.250950) (1575129750990, 30, 16.236088) (1575129751990, 22, 18.305340) (1575129752990, 25, 17.360685) (1575129753990, 25, 14.978681) (1575129754990, 33, 14.096183) (1575129755990, 26, 10.019039) (1575129756990, 19, 19.158213) (1575129757990, 22, 15.593924) (1575129758990, 26, 18.780119) (1575129759990, 21, 16.001656) (1575129760990, 16, 18.458328) (1575129761990, 21, 16.417843) (1575129762990, 28, 11.736558) (1575129763990, 34, 18.143946) (1575129764990, 27, 10.303225) (1575129765990, 20, 19.756748) (1575129766990, 22, 12.940063) (1575129767990, 23, 11.509640) (1575129768990, 19, 18.319309) (1575129769990, 19, 16.278345) (1575129770990, 27, 10.898361) (1575129771990, 31, 13.922162) (1575129772990, 15, 19.296116) (1575129773990, 26, 15.885763) (1575129774990, 15, 15.525804) (1575129775990, 19, 19.579538) (1575129776990, 20, 11.073811) (1575129777990, 16, 13.932510) (1575129778990, 17, 11.900328) (1575129779990, 22, 16.540415) (1575129780990, 33, 15.203803) (1575129781990, 17, 11.518434) (1575129782990, 17, 13.152081) (1575129783990, 18, 11.378041) (1575129784990, 21, 15.390745) (1575129785990, 30, 15.127818) (1575129786990, 19, 16.530401) (1575129787990, 32, 16.542702) (1575129788990, 26, 16.366442) (1575129789990, 25, 10.306822) (1575129790990, 15, 13.691117) (1575129791990, 15, 13.476817) (1575129792990, 25, 12.529998) (1575129793990, 22, 15.550021) (1575129794990, 20, 15.064971) (1575129795990, 24, 13.313683) (1575129796990, 23, 17.002878) (1575129797990, 30, 19.991594) (1575129798990, 15, 11.116746) (1575129799980, 16, 19.405090) (1575129800980, 22, 14.377142) (1575129801980, 16, 16.868231) (1575129802980, 20, 11.565193) (1575129803980, 31, 13.009119) (1575129804980, 29, 18.136400) (1575129805980, 17, 13.806572) (1575129806980, 23, 14.688898) (1575129807980, 26, 12.931019) (1575129808980, 32, 12.185531) (1575129809980, 30, 13.608714) (1575129810980, 23, 18.624914) (1575129811980, 22, 12.970826) (1575129812980, 22, 12.065827) (1575129813980, 25, 16.967192) (1575129814980, 16, 10.283031) (1575129815980, 22, 16.072535) (1575129816980, 24, 10.794536) (1575129817980, 32, 10.591207) (1575129818980, 20, 13.015227) (1575129819980, 28, 15.410999) (1575129820980, 29, 12.785076) (1575129821980, 28, 15.305857) (1575129822980, 33, 12.820810) (1575129823980, 34, 13.618055) (1575129824980, 32, 12.971123) (1575129825980, 24, 10.974546) (1575129826980, 15, 10.742910) (1575129827980, 23, 16.810783) (1575129828980, 18, 13.115224) (1575129829980, 26, 17.418489) (1575129830980, 20, 17.302315) (1575129831980, 21, 14.283571) (1575129832980, 16, 16.826534) (1575129833980, 18, 19.222122) (1575129834980, 18, 14.931420) (1575129835980, 17, 19.549454) (1575129836980, 22, 16.908388) (1575129837980, 32, 15.637796) (1575129838980, 31, 15.517650) (1575129839980, 18, 14.038033) (1575129840980, 32, 19.859648) (1575129841980, 16, 13.220840) (1575129842980, 28, 16.445398) (1575129843980, 26, 16.695753) (1575129844980, 33, 13.696928) (1575129845980, 21, 15.352819) (1575129846980, 15, 12.388407) (1575129847980, 27, 11.267529) (1575129848980, 20, 14.103228) (1575129849980, 20, 16.250950) (1575129850980, 30, 16.236088) (1575129851980, 22, 18.305340) (1575129852980, 25, 17.360685) (1575129853980, 25, 14.978681) (1575129854980, 33, 14.096183) (1575129855980, 26, 10.019039) (1575129856980, 19, 19.158213) (1575129857980, 22, 15.593924) (1575129858980, 26, 18.780119) (1575129859980, 21, 16.001656) (1575129860980, 16, 18.458328) (1575129861980, 21, 16.417843) (1575129862980, 28, 11.736558) (1575129863980, 34, 18.143946) (1575129864980, 27, 10.303225) (1575129865980, 20, 19.756748) (1575129866980, 22, 12.940063) (1575129867980, 23, 11.509640) (1575129868980, 19, 18.319309) (1575129869980, 19, 16.278345) (1575129870980, 27, 10.898361) (1575129871980, 31, 13.922162) (1575129872980, 15, 19.296116) (1575129873980, 26, 15.885763) (1575129874980, 15, 15.525804) (1575129875980, 19, 19.579538) (1575129876980, 20, 11.073811) (1575129877980, 16, 13.932510) (1575129878980, 17, 11.900328) (1575129879980, 22, 16.540415) (1575129880980, 33, 15.203803) (1575129881980, 17, 11.518434) (1575129882980, 17, 13.152081) (1575129883980, 18, 11.378041) (1575129884980, 21, 15.390745) (1575129885980, 30, 15.127818) (1575129886980, 19, 16.530401) (1575129887980, 32, 16.542702) (1575129888980, 26, 16.366442) (1575129889980, 25, 10.306822) (1575129890980, 15, 13.691117) (1575129891980, 15, 13.476817) (1575129892980, 25, 12.529998) (1575129893980, 22, 15.550021) (1575129894980, 20, 15.064971) (1575129895980, 24, 13.313683) (1575129896980, 23, 17.002878) (1575129897980, 30, 19.991594) (1575129898980, 15, 11.116746) (1575129899970, 16, 19.405090) (1575129900970, 22, 14.377142) (1575129901970, 16, 16.868231) (1575129902970, 20, 11.565193) (1575129903970, 31, 13.009119) (1575129904970, 29, 18.136400) (1575129905970, 17, 13.806572) (1575129906970, 23, 14.688898) (1575129907970, 26, 12.931019) (1575129908970, 32, 12.185531) (1575129909970, 30, 13.608714) (1575129910970, 23, 18.624914) (1575129911970, 22, 12.970826) (1575129912970, 22, 12.065827) (1575129913970, 25, 16.967192) (1575129914970, 16, 10.283031) (1575129915970, 22, 16.072535) (1575129916970, 24, 10.794536) (1575129917970, 32, 10.591207) (1575129918970, 20, 13.015227) (1575129919970, 28, 15.410999) (1575129920970, 29, 12.785076) (1575129921970, 28, 15.305857) (1575129922970, 33, 12.820810) (1575129923970, 34, 13.618055) (1575129924970, 32, 12.971123) (1575129925970, 24, 10.974546) (1575129926970, 15, 10.742910) (1575129927970, 23, 16.810783) (1575129928970, 18, 13.115224) (1575129929970, 26, 17.418489) (1575129930970, 20, 17.302315) (1575129931970, 21, 14.283571) (1575129932970, 16, 16.826534) (1575129933970, 18, 19.222122) (1575129934970, 18, 14.931420) (1575129935970, 17, 19.549454) (1575129936970, 22, 16.908388) (1575129937970, 32, 15.637796) (1575129938970, 31, 15.517650) (1575129939970, 18, 14.038033) (1575129940970, 32, 19.859648) (1575129941970, 16, 13.220840) (1575129942970, 28, 16.445398) (1575129943970, 26, 16.695753) (1575129944970, 33, 13.696928) (1575129945970, 21, 15.352819) (1575129946970, 15, 12.388407) (1575129947970, 27, 11.267529) (1575129948970, 20, 14.103228) (1575129949970, 20, 16.250950) (1575129950970, 30, 16.236088) (1575129951970, 22, 18.305340) (1575129952970, 25, 17.360685) (1575129953970, 25, 14.978681) (1575129954970, 33, 14.096183) (1575129955970, 26, 10.019039) (1575129956970, 19, 19.158213) (1575129957970, 22, 15.593924) (1575129958970, 26, 18.780119) (1575129959970, 21, 16.001656) (1575129960970, 16, 18.458328) (1575129961970, 21, 16.417843) (1575129962970, 28, 11.736558) (1575129963970, 34, 18.143946) (1575129964970, 27, 10.303225) (1575129965970, 20, 19.756748) (1575129966970, 22, 12.940063) (1575129967970, 23, 11.509640) (1575129968970, 19, 18.319309) (1575129969970, 19, 16.278345) (1575129970970, 27, 10.898361) (1575129971970, 31, 13.922162) (1575129972970, 15, 19.296116) (1575129973970, 26, 15.885763) (1575129974970, 15, 15.525804) (1575129975970, 19, 19.579538) (1575129976970, 20, 11.073811) (1575129977970, 16, 13.932510) (1575129978970, 17, 11.900328) (1575129979970, 22, 16.540415) (1575129980970, 33, 15.203803) (1575129981970, 17, 11.518434) (1575129982970, 17, 13.152081) (1575129983970, 18, 11.378041) (1575129984970, 21, 15.390745) (1575129985970, 30, 15.127818) (1575129986970, 19, 16.530401) (1575129987970, 32, 16.542702) (1575129988970, 26, 16.366442) (1575129989970, 25, 10.306822) (1575129990970, 15, 13.691117) (1575129991970, 15, 13.476817) (1575129992970, 25, 12.529998) (1575129993970, 22, 15.550021) (1575129994970, 20, 15.064971) (1575129995970, 24, 13.313683) (1575129996970, 23, 17.002878) (1575129997970, 30, 19.991594) (1575129998970, 15, 11.116746) (1575129999960, 16, 19.405090) (1575130000960, 22, 14.377142) (1575130001960, 16, 16.868231) (1575130002960, 20, 11.565193) (1575130003960, 31, 13.009119) (1575130004960, 29, 18.136400) (1575130005960, 17, 13.806572) (1575130006960, 23, 14.688898) (1575130007960, 26, 12.931019) (1575130008960, 32, 12.185531) (1575130009960, 30, 13.608714) (1575130010960, 23, 18.624914) (1575130011960, 22, 12.970826) (1575130012960, 22, 12.065827) (1575130013960, 25, 16.967192) (1575130014960, 16, 10.283031) (1575130015960, 22, 16.072535) (1575130016960, 24, 10.794536) (1575130017960, 32, 10.591207) (1575130018960, 20, 13.015227) (1575130019960, 28, 15.410999) (1575130020960, 29, 12.785076) (1575130021960, 28, 15.305857) (1575130022960, 33, 12.820810) (1575130023960, 34, 13.618055) (1575130024960, 32, 12.971123) (1575130025960, 24, 10.974546) (1575130026960, 15, 10.742910) (1575130027960, 23, 16.810783) (1575130028960, 18, 13.115224) (1575130029960, 26, 17.418489) (1575130030960, 20, 17.302315) (1575130031960, 21, 14.283571) (1575130032960, 16, 16.826534) (1575130033960, 18, 19.222122) (1575130034960, 18, 14.931420) (1575130035960, 17, 19.549454) (1575130036960, 22, 16.908388) (1575130037960, 32, 15.637796) (1575130038960, 31, 15.517650) (1575130039960, 18, 14.038033) (1575130040960, 32, 19.859648) (1575130041960, 16, 13.220840) (1575130042960, 28, 16.445398) (1575130043960, 26, 16.695753) (1575130044960, 33, 13.696928) (1575130045960, 21, 15.352819) (1575130046960, 15, 12.388407) (1575130047960, 27, 11.267529) (1575130048960, 20, 14.103228) (1575130049960, 20, 16.250950) (1575130050960, 30, 16.236088) (1575130051960, 22, 18.305340) (1575130052960, 25, 17.360685) (1575130053960, 25, 14.978681) (1575130054960, 33, 14.096183) (1575130055960, 26, 10.019039) (1575130056960, 19, 19.158213) (1575130057960, 22, 15.593924) (1575130058960, 26, 18.780119) (1575130059960, 21, 16.001656) (1575130060960, 16, 18.458328) (1575130061960, 21, 16.417843) (1575130062960, 28, 11.736558) (1575130063960, 34, 18.143946) (1575130064960, 27, 10.303225) (1575130065960, 20, 19.756748) (1575130066960, 22, 12.940063) (1575130067960, 23, 11.509640) (1575130068960, 19, 18.319309) (1575130069960, 19, 16.278345) (1575130070960, 27, 10.898361) (1575130071960, 31, 13.922162) (1575130072960, 15, 19.296116) (1575130073960, 26, 15.885763) (1575130074960, 15, 15.525804) (1575130075960, 19, 19.579538) (1575130076960, 20, 11.073811) (1575130077960, 16, 13.932510) (1575130078960, 17, 11.900328) (1575130079960, 22, 16.540415) (1575130080960, 33, 15.203803) (1575130081960, 17, 11.518434) (1575130082960, 17, 13.152081) (1575130083960, 18, 11.378041) (1575130084960, 21, 15.390745) (1575130085960, 30, 15.127818) (1575130086960, 19, 16.530401) (1575130087960, 32, 16.542702) (1575130088960, 26, 16.366442) (1575130089960, 25, 10.306822) (1575130090960, 15, 13.691117) (1575130091960, 15, 13.476817) (1575130092960, 25, 12.529998) (1575130093960, 22, 15.550021) (1575130094960, 20, 15.064971) (1575130095960, 24, 13.313683) (1575130096960, 23, 17.002878) (1575130097960, 30, 19.991594) (1575130098960, 15, 11.116746) (1575130099950, 16, 19.405090) (1575130100950, 22, 14.377142) (1575130101950, 16, 16.868231) (1575130102950, 20, 11.565193) (1575130103950, 31, 13.009119) (1575130104950, 29, 18.136400) (1575130105950, 17, 13.806572) (1575130106950, 23, 14.688898) (1575130107950, 26, 12.931019) (1575130108950, 32, 12.185531) (1575130109950, 30, 13.608714) (1575130110950, 23, 18.624914) (1575130111950, 22, 12.970826) (1575130112950, 22, 12.065827) (1575130113950, 25, 16.967192) (1575130114950, 16, 10.283031) (1575130115950, 22, 16.072535) (1575130116950, 24, 10.794536) (1575130117950, 32, 10.591207) (1575130118950, 20, 13.015227) (1575130119950, 28, 15.410999) (1575130120950, 29, 12.785076) (1575130121950, 28, 15.305857) (1575130122950, 33, 12.820810) (1575130123950, 34, 13.618055) (1575130124950, 32, 12.971123) (1575130125950, 24, 10.974546) (1575130126950, 15, 10.742910) (1575130127950, 23, 16.810783) (1575130128950, 18, 13.115224) (1575130129950, 26, 17.418489) (1575130130950, 20, 17.302315) (1575130131950, 21, 14.283571) (1575130132950, 16, 16.826534) (1575130133950, 18, 19.222122) (1575130134950, 18, 14.931420) (1575130135950, 17, 19.549454) (1575130136950, 22, 16.908388) (1575130137950, 32, 15.637796) (1575130138950, 31, 15.517650) (1575130139950, 18, 14.038033) (1575130140950, 32, 19.859648) (1575130141950, 16, 13.220840) (1575130142950, 28, 16.445398) (1575130143950, 26, 16.695753) (1575130144950, 33, 13.696928) (1575130145950, 21, 15.352819) (1575130146950, 15, 12.388407) (1575130147950, 27, 11.267529) (1575130148950, 20, 14.103228) (1575130149950, 20, 16.250950) (1575130150950, 30, 16.236088) (1575130151950, 22, 18.305340) (1575130152950, 25, 17.360685) (1575130153950, 25, 14.978681) (1575130154950, 33, 14.096183) (1575130155950, 26, 10.019039) (1575130156950, 19, 19.158213) (1575130157950, 22, 15.593924) (1575130158950, 26, 18.780119) (1575130159950, 21, 16.001656) (1575130160950, 16, 18.458328) (1575130161950, 21, 16.417843) (1575130162950, 28, 11.736558) (1575130163950, 34, 18.143946) (1575130164950, 27, 10.303225) (1575130165950, 20, 19.756748) (1575130166950, 22, 12.940063) (1575130167950, 23, 11.509640) (1575130168950, 19, 18.319309) (1575130169950, 19, 16.278345) (1575130170950, 27, 10.898361) (1575130171950, 31, 13.922162) (1575130172950, 15, 19.296116) (1575130173950, 26, 15.885763) (1575130174950, 15, 15.525804) (1575130175950, 19, 19.579538) (1575130176950, 20, 11.073811) (1575130177950, 16, 13.932510) (1575130178950, 17, 11.900328) (1575130179950, 22, 16.540415) (1575130180950, 33, 15.203803) (1575130181950, 17, 11.518434) (1575130182950, 17, 13.152081) (1575130183950, 18, 11.378041) (1575130184950, 21, 15.390745) (1575130185950, 30, 15.127818) (1575130186950, 19, 16.530401) (1575130187950, 32, 16.542702) (1575130188950, 26, 16.366442) (1575130189950, 25, 10.306822) (1575130190950, 15, 13.691117) (1575130191950, 15, 13.476817) (1575130192950, 25, 12.529998) (1575130193950, 22, 15.550021) (1575130194950, 20, 15.064971) (1575130195950, 24, 13.313683) (1575130196950, 23, 17.002878) (1575130197950, 30, 19.991594) (1575130198950, 15, 11.116746) (1575130199940, 16, 19.405090) (1575130200940, 22, 14.377142) (1575130201940, 16, 16.868231) (1575130202940, 20, 11.565193) (1575130203940, 31, 13.009119) (1575130204940, 29, 18.136400) (1575130205940, 17, 13.806572) (1575130206940, 23, 14.688898) (1575130207940, 26, 12.931019) (1575130208940, 32, 12.185531) (1575130209940, 30, 13.608714) (1575130210940, 23, 18.624914) (1575130211940, 22, 12.970826) (1575130212940, 22, 12.065827) (1575130213940, 25, 16.967192) (1575130214940, 16, 10.283031) (1575130215940, 22, 16.072535) (1575130216940, 24, 10.794536) (1575130217940, 32, 10.591207) (1575130218940, 20, 13.015227) (1575130219940, 28, 15.410999) (1575130220940, 29, 12.785076) (1575130221940, 28, 15.305857) (1575130222940, 33, 12.820810) (1575130223940, 34, 13.618055) (1575130224940, 32, 12.971123) (1575130225940, 24, 10.974546) (1575130226940, 15, 10.742910) (1575130227940, 23, 16.810783) (1575130228940, 18, 13.115224) (1575130229940, 26, 17.418489) (1575130230940, 20, 17.302315) (1575130231940, 21, 14.283571) (1575130232940, 16, 16.826534) (1575130233940, 18, 19.222122) (1575130234940, 18, 14.931420) (1575130235940, 17, 19.549454) (1575130236940, 22, 16.908388) (1575130237940, 32, 15.637796) (1575130238940, 31, 15.517650) (1575130239940, 18, 14.038033) (1575130240940, 32, 19.859648) (1575130241940, 16, 13.220840) (1575130242940, 28, 16.445398) (1575130243940, 26, 16.695753) (1575130244940, 33, 13.696928) (1575130245940, 21, 15.352819) (1575130246940, 15, 12.388407) (1575130247940, 27, 11.267529) (1575130248940, 20, 14.103228) (1575130249940, 20, 16.250950) (1575130250940, 30, 16.236088) (1575130251940, 22, 18.305340) (1575130252940, 25, 17.360685) (1575130253940, 25, 14.978681) (1575130254940, 33, 14.096183) (1575130255940, 26, 10.019039) (1575130256940, 19, 19.158213) (1575130257940, 22, 15.593924) (1575130258940, 26, 18.780119) (1575130259940, 21, 16.001656) (1575130260940, 16, 18.458328) (1575130261940, 21, 16.417843) (1575130262940, 28, 11.736558) (1575130263940, 34, 18.143946) (1575130264940, 27, 10.303225) (1575130265940, 20, 19.756748) (1575130266940, 22, 12.940063) (1575130267940, 23, 11.509640) (1575130268940, 19, 18.319309) (1575130269940, 19, 16.278345) (1575130270940, 27, 10.898361) (1575130271940, 31, 13.922162) (1575130272940, 15, 19.296116) (1575130273940, 26, 15.885763) (1575130274940, 15, 15.525804) (1575130275940, 19, 19.579538) (1575130276940, 20, 11.073811) (1575130277940, 16, 13.932510) (1575130278940, 17, 11.900328) (1575130279940, 22, 16.540415) (1575130280940, 33, 15.203803) (1575130281940, 17, 11.518434) (1575130282940, 17, 13.152081) (1575130283940, 18, 11.378041) (1575130284940, 21, 15.390745) (1575130285940, 30, 15.127818) (1575130286940, 19, 16.530401) (1575130287940, 32, 16.542702) (1575130288940, 26, 16.366442) (1575130289940, 25, 10.306822) (1575130290940, 15, 13.691117) (1575130291940, 15, 13.476817) (1575130292940, 25, 12.529998) (1575130293940, 22, 15.550021) (1575130294940, 20, 15.064971) (1575130295940, 24, 13.313683) (1575130296940, 23, 17.002878) (1575130297940, 30, 19.991594) (1575130298940, 15, 11.116746) (1575130299930, 16, 19.405090) (1575130300930, 22, 14.377142) (1575130301930, 16, 16.868231) (1575130302930, 20, 11.565193) (1575130303930, 31, 13.009119) (1575130304930, 29, 18.136400) (1575130305930, 17, 13.806572) (1575130306930, 23, 14.688898) (1575130307930, 26, 12.931019) (1575130308930, 32, 12.185531) (1575130309930, 30, 13.608714) (1575130310930, 23, 18.624914) (1575130311930, 22, 12.970826) (1575130312930, 22, 12.065827) (1575130313930, 25, 16.967192) (1575130314930, 16, 10.283031) (1575130315930, 22, 16.072535) (1575130316930, 24, 10.794536) (1575130317930, 32, 10.591207) (1575130318930, 20, 13.015227) (1575130319930, 28, 15.410999) (1575130320930, 29, 12.785076) (1575130321930, 28, 15.305857) (1575130322930, 33, 12.820810) (1575130323930, 34, 13.618055) (1575130324930, 32, 12.971123) (1575130325930, 24, 10.974546) (1575130326930, 15, 10.742910) (1575130327930, 23, 16.810783) (1575130328930, 18, 13.115224) (1575130329930, 26, 17.418489) (1575130330930, 20, 17.302315) (1575130331930, 21, 14.283571) (1575130332930, 16, 16.826534) (1575130333930, 18, 19.222122) (1575130334930, 18, 14.931420) (1575130335930, 17, 19.549454) (1575130336930, 22, 16.908388) (1575130337930, 32, 15.637796) (1575130338930, 31, 15.517650) (1575130339930, 18, 14.038033) (1575130340930, 32, 19.859648) (1575130341930, 16, 13.220840) (1575130342930, 28, 16.445398) (1575130343930, 26, 16.695753) (1575130344930, 33, 13.696928) (1575130345930, 21, 15.352819) (1575130346930, 15, 12.388407) (1575130347930, 27, 11.267529) (1575130348930, 20, 14.103228) (1575130349930, 20, 16.250950) (1575130350930, 30, 16.236088) (1575130351930, 22, 18.305340) (1575130352930, 25, 17.360685) (1575130353930, 25, 14.978681) (1575130354930, 33, 14.096183) (1575130355930, 26, 10.019039) (1575130356930, 19, 19.158213) (1575130357930, 22, 15.593924) (1575130358930, 26, 18.780119) (1575130359930, 21, 16.001656) (1575130360930, 16, 18.458328) (1575130361930, 21, 16.417843) (1575130362930, 28, 11.736558) (1575130363930, 34, 18.143946) (1575130364930, 27, 10.303225) (1575130365930, 20, 19.756748) (1575130366930, 22, 12.940063) (1575130367930, 23, 11.509640) (1575130368930, 19, 18.319309) (1575130369930, 19, 16.278345) (1575130370930, 27, 10.898361) (1575130371930, 31, 13.922162) (1575130372930, 15, 19.296116) (1575130373930, 26, 15.885763) (1575130374930, 15, 15.525804) (1575130375930, 19, 19.579538) (1575130376930, 20, 11.073811) (1575130377930, 16, 13.932510) (1575130378930, 17, 11.900328) (1575130379930, 22, 16.540415) (1575130380930, 33, 15.203803) (1575130381930, 17, 11.518434) (1575130382930, 17, 13.152081) (1575130383930, 18, 11.378041) (1575130384930, 21, 15.390745) (1575130385930, 30, 15.127818) (1575130386930, 19, 16.530401) (1575130387930, 32, 16.542702) (1575130388930, 26, 16.366442) (1575130389930, 25, 10.306822) (1575130390930, 15, 13.691117) (1575130391930, 15, 13.476817) (1575130392930, 25, 12.529998) (1575130393930, 22, 15.550021) (1575130394930, 20, 15.064971) (1575130395930, 24, 13.313683) (1575130396930, 23, 17.002878) (1575130397930, 30, 19.991594) (1575130398930, 15, 11.116746) (1575130399920, 16, 19.405090) (1575130400920, 22, 14.377142) (1575130401920, 16, 16.868231) (1575130402920, 20, 11.565193) (1575130403920, 31, 13.009119) (1575130404920, 29, 18.136400) (1575130405920, 17, 13.806572) (1575130406920, 23, 14.688898) (1575130407920, 26, 12.931019) (1575130408920, 32, 12.185531) (1575130409920, 30, 13.608714) (1575130410920, 23, 18.624914) (1575130411920, 22, 12.970826) (1575130412920, 22, 12.065827) (1575130413920, 25, 16.967192) (1575130414920, 16, 10.283031) (1575130415920, 22, 16.072535) (1575130416920, 24, 10.794536) (1575130417920, 32, 10.591207) (1575130418920, 20, 13.015227) (1575130419920, 28, 15.410999) (1575130420920, 29, 12.785076) (1575130421920, 28, 15.305857) (1575130422920, 33, 12.820810) (1575130423920, 34, 13.618055) (1575130424920, 32, 12.971123) (1575130425920, 24, 10.974546) (1575130426920, 15, 10.742910) (1575130427920, 23, 16.810783) (1575130428920, 18, 13.115224) (1575130429920, 26, 17.418489) (1575130430920, 20, 17.302315) (1575130431920, 21, 14.283571) (1575130432920, 16, 16.826534) (1575130433920, 18, 19.222122) (1575130434920, 18, 14.931420) (1575130435920, 17, 19.549454) (1575130436920, 22, 16.908388) (1575130437920, 32, 15.637796) (1575130438920, 31, 15.517650) (1575130439920, 18, 14.038033) (1575130440920, 32, 19.859648) (1575130441920, 16, 13.220840) (1575130442920, 28, 16.445398) (1575130443920, 26, 16.695753) (1575130444920, 33, 13.696928) (1575130445920, 21, 15.352819) (1575130446920, 15, 12.388407) (1575130447920, 27, 11.267529) (1575130448920, 20, 14.103228) (1575130449920, 20, 16.250950) (1575130450920, 30, 16.236088) (1575130451920, 22, 18.305340) (1575130452920, 25, 17.360685) (1575130453920, 25, 14.978681) (1575130454920, 33, 14.096183) (1575130455920, 26, 10.019039) (1575130456920, 19, 19.158213) (1575130457920, 22, 15.593924) (1575130458920, 26, 18.780119) (1575130459920, 21, 16.001656) (1575130460920, 16, 18.458328) (1575130461920, 21, 16.417843) (1575130462920, 28, 11.736558) (1575130463920, 34, 18.143946) (1575130464920, 27, 10.303225) (1575130465920, 20, 19.756748) (1575130466920, 22, 12.940063) (1575130467920, 23, 11.509640) (1575130468920, 19, 18.319309) (1575130469920, 19, 16.278345) (1575130470920, 27, 10.898361) (1575130471920, 31, 13.922162) (1575130472920, 15, 19.296116) (1575130473920, 26, 15.885763) (1575130474920, 15, 15.525804) (1575130475920, 19, 19.579538) (1575130476920, 20, 11.073811) (1575130477920, 16, 13.932510) (1575130478920, 17, 11.900328) (1575130479920, 22, 16.540415) (1575130480920, 33, 15.203803) (1575130481920, 17, 11.518434) (1575130482920, 17, 13.152081) (1575130483920, 18, 11.378041) (1575130484920, 21, 15.390745) (1575130485920, 30, 15.127818) (1575130486920, 19, 16.530401) (1575130487920, 32, 16.542702) (1575130488920, 26, 16.366442) (1575130489920, 25, 10.306822) (1575130490920, 15, 13.691117) (1575130491920, 15, 13.476817) (1575130492920, 25, 12.529998) (1575130493920, 22, 15.550021) (1575130494920, 20, 15.064971) (1575130495920, 24, 13.313683) (1575130496920, 23, 17.002878) (1575130497920, 30, 19.991594) (1575130498920, 15, 11.116746) (1575130499910, 16, 19.405090) (1575130500910, 22, 14.377142) (1575130501910, 16, 16.868231) (1575130502910, 20, 11.565193) (1575130503910, 31, 13.009119) (1575130504910, 29, 18.136400) (1575130505910, 17, 13.806572) (1575130506910, 23, 14.688898) (1575130507910, 26, 12.931019) (1575130508910, 32, 12.185531) (1575130509910, 30, 13.608714) (1575130510910, 23, 18.624914) (1575130511910, 22, 12.970826) (1575130512910, 22, 12.065827) (1575130513910, 25, 16.967192) (1575130514910, 16, 10.283031) (1575130515910, 22, 16.072535) (1575130516910, 24, 10.794536) (1575130517910, 32, 10.591207) (1575130518910, 20, 13.015227) (1575130519910, 28, 15.410999) (1575130520910, 29, 12.785076) (1575130521910, 28, 15.305857) (1575130522910, 33, 12.820810) (1575130523910, 34, 13.618055) (1575130524910, 32, 12.971123) (1575130525910, 24, 10.974546) (1575130526910, 15, 10.742910) (1575130527910, 23, 16.810783) (1575130528910, 18, 13.115224) (1575130529910, 26, 17.418489) (1575130530910, 20, 17.302315) (1575130531910, 21, 14.283571) (1575130532910, 16, 16.826534) (1575130533910, 18, 19.222122) (1575130534910, 18, 14.931420) (1575130535910, 17, 19.549454) (1575130536910, 22, 16.908388) (1575130537910, 32, 15.637796) (1575130538910, 31, 15.517650) (1575130539910, 18, 14.038033) (1575130540910, 32, 19.859648) (1575130541910, 16, 13.220840) (1575130542910, 28, 16.445398) (1575130543910, 26, 16.695753) (1575130544910, 33, 13.696928) (1575130545910, 21, 15.352819) (1575130546910, 15, 12.388407) (1575130547910, 27, 11.267529) (1575130548910, 20, 14.103228) (1575130549910, 20, 16.250950) (1575130550910, 30, 16.236088) (1575130551910, 22, 18.305340) (1575130552910, 25, 17.360685) (1575130553910, 25, 14.978681) (1575130554910, 33, 14.096183) (1575130555910, 26, 10.019039) (1575130556910, 19, 19.158213) (1575130557910, 22, 15.593924) (1575130558910, 26, 18.780119) (1575130559910, 21, 16.001656) (1575130560910, 16, 18.458328) (1575130561910, 21, 16.417843) (1575130562910, 28, 11.736558) (1575130563910, 34, 18.143946) (1575130564910, 27, 10.303225) (1575130565910, 20, 19.756748) (1575130566910, 22, 12.940063) (1575130567910, 23, 11.509640) (1575130568910, 19, 18.319309) (1575130569910, 19, 16.278345) (1575130570910, 27, 10.898361) (1575130571910, 31, 13.922162) (1575130572910, 15, 19.296116) (1575130573910, 26, 15.885763) (1575130574910, 15, 15.525804) (1575130575910, 19, 19.579538) (1575130576910, 20, 11.073811) (1575130577910, 16, 13.932510) (1575130578910, 17, 11.900328) (1575130579910, 22, 16.540415) (1575130580910, 33, 15.203803) (1575130581910, 17, 11.518434) (1575130582910, 17, 13.152081) (1575130583910, 18, 11.378041) (1575130584910, 21, 15.390745) (1575130585910, 30, 15.127818) (1575130586910, 19, 16.530401) (1575130587910, 32, 16.542702) (1575130588910, 26, 16.366442) (1575130589910, 25, 10.306822) (1575130590910, 15, 13.691117) (1575130591910, 15, 13.476817) (1575130592910, 25, 12.529998) (1575130593910, 22, 15.550021) (1575130594910, 20, 15.064971) (1575130595910, 24, 13.313683) (1575130596910, 23, 17.002878) (1575130597910, 30, 19.991594) (1575130598910, 15, 11.116746) (1575130599900, 16, 19.405090) (1575130600900, 22, 14.377142) (1575130601900, 16, 16.868231) (1575130602900, 20, 11.565193) (1575130603900, 31, 13.009119) (1575130604900, 29, 18.136400) (1575130605900, 17, 13.806572) (1575130606900, 23, 14.688898) (1575130607900, 26, 12.931019) (1575130608900, 32, 12.185531) (1575130609900, 30, 13.608714) (1575130610900, 23, 18.624914) (1575130611900, 22, 12.970826) (1575130612900, 22, 12.065827) (1575130613900, 25, 16.967192) (1575130614900, 16, 10.283031) (1575130615900, 22, 16.072535) (1575130616900, 24, 10.794536) (1575130617900, 32, 10.591207) (1575130618900, 20, 13.015227) (1575130619900, 28, 15.410999) (1575130620900, 29, 12.785076) (1575130621900, 28, 15.305857) (1575130622900, 33, 12.820810) (1575130623900, 34, 13.618055) (1575130624900, 32, 12.971123) (1575130625900, 24, 10.974546) (1575130626900, 15, 10.742910) (1575130627900, 23, 16.810783) (1575130628900, 18, 13.115224) (1575130629900, 26, 17.418489) (1575130630900, 20, 17.302315) (1575130631900, 21, 14.283571) (1575130632900, 16, 16.826534) (1575130633900, 18, 19.222122) (1575130634900, 18, 14.931420) (1575130635900, 17, 19.549454) (1575130636900, 22, 16.908388) (1575130637900, 32, 15.637796) (1575130638900, 31, 15.517650) (1575130639900, 18, 14.038033) (1575130640900, 32, 19.859648) (1575130641900, 16, 13.220840) (1575130642900, 28, 16.445398) (1575130643900, 26, 16.695753) (1575130644900, 33, 13.696928) (1575130645900, 21, 15.352819) (1575130646900, 15, 12.388407) (1575130647900, 27, 11.267529) (1575130648900, 20, 14.103228) (1575130649900, 20, 16.250950) (1575130650900, 30, 16.236088) (1575130651900, 22, 18.305340) (1575130652900, 25, 17.360685) (1575130653900, 25, 14.978681) (1575130654900, 33, 14.096183) (1575130655900, 26, 10.019039) (1575130656900, 19, 19.158213) (1575130657900, 22, 15.593924) (1575130658900, 26, 18.780119) (1575130659900, 21, 16.001656) (1575130660900, 16, 18.458328) (1575130661900, 21, 16.417843) (1575130662900, 28, 11.736558) (1575130663900, 34, 18.143946) (1575130664900, 27, 10.303225) (1575130665900, 20, 19.756748) (1575130666900, 22, 12.940063) (1575130667900, 23, 11.509640) (1575130668900, 19, 18.319309) (1575130669900, 19, 16.278345) (1575130670900, 27, 10.898361) (1575130671900, 31, 13.922162) (1575130672900, 15, 19.296116) (1575130673900, 26, 15.885763) (1575130674900, 15, 15.525804) (1575130675900, 19, 19.579538) (1575130676900, 20, 11.073811) (1575130677900, 16, 13.932510) (1575130678900, 17, 11.900328) (1575130679900, 22, 16.540415) (1575130680900, 33, 15.203803) (1575130681900, 17, 11.518434) (1575130682900, 17, 13.152081) (1575130683900, 18, 11.378041) (1575130684900, 21, 15.390745) (1575130685900, 30, 15.127818) (1575130686900, 19, 16.530401) (1575130687900, 32, 16.542702) (1575130688900, 26, 16.366442) (1575130689900, 25, 10.306822) (1575130690900, 15, 13.691117) (1575130691900, 15, 13.476817) (1575130692900, 25, 12.529998) (1575130693900, 22, 15.550021) (1575130694900, 20, 15.064971) (1575130695900, 24, 13.313683) (1575130696900, 23, 17.002878) (1575130697900, 30, 19.991594) (1575130698900, 15, 11.116746) (1575130699890, 16, 19.405090) (1575130700890, 22, 14.377142) (1575130701890, 16, 16.868231) (1575130702890, 20, 11.565193) (1575130703890, 31, 13.009119) (1575130704890, 29, 18.136400) (1575130705890, 17, 13.806572) (1575130706890, 23, 14.688898) (1575130707890, 26, 12.931019) (1575130708890, 32, 12.185531) (1575130709890, 30, 13.608714) (1575130710890, 23, 18.624914) (1575130711890, 22, 12.970826) (1575130712890, 22, 12.065827) (1575130713890, 25, 16.967192) (1575130714890, 16, 10.283031) (1575130715890, 22, 16.072535) (1575130716890, 24, 10.794536) (1575130717890, 32, 10.591207) (1575130718890, 20, 13.015227) (1575130719890, 28, 15.410999) (1575130720890, 29, 12.785076) (1575130721890, 28, 15.305857) (1575130722890, 33, 12.820810) (1575130723890, 34, 13.618055) (1575130724890, 32, 12.971123) (1575130725890, 24, 10.974546) (1575130726890, 15, 10.742910) (1575130727890, 23, 16.810783) (1575130728890, 18, 13.115224) (1575130729890, 26, 17.418489) (1575130730890, 20, 17.302315) (1575130731890, 21, 14.283571) (1575130732890, 16, 16.826534) (1575130733890, 18, 19.222122) (1575130734890, 18, 14.931420) (1575130735890, 17, 19.549454) (1575130736890, 22, 16.908388) (1575130737890, 32, 15.637796) (1575130738890, 31, 15.517650) (1575130739890, 18, 14.038033) (1575130740890, 32, 19.859648) (1575130741890, 16, 13.220840) (1575130742890, 28, 16.445398) (1575130743890, 26, 16.695753) (1575130744890, 33, 13.696928) (1575130745890, 21, 15.352819) (1575130746890, 15, 12.388407) (1575130747890, 27, 11.267529) (1575130748890, 20, 14.103228) (1575130749890, 20, 16.250950) (1575130750890, 30, 16.236088) (1575130751890, 22, 18.305340) (1575130752890, 25, 17.360685) (1575130753890, 25, 14.978681) (1575130754890, 33, 14.096183) (1575130755890, 26, 10.019039) (1575130756890, 19, 19.158213) (1575130757890, 22, 15.593924) (1575130758890, 26, 18.780119) (1575130759890, 21, 16.001656) (1575130760890, 16, 18.458328) (1575130761890, 21, 16.417843) (1575130762890, 28, 11.736558) (1575130763890, 34, 18.143946) (1575130764890, 27, 10.303225) (1575130765890, 20, 19.756748) (1575130766890, 22, 12.940063) (1575130767890, 23, 11.509640) (1575130768890, 19, 18.319309) (1575130769890, 19, 16.278345) (1575130770890, 27, 10.898361) (1575130771890, 31, 13.922162) (1575130772890, 15, 19.296116) (1575130773890, 26, 15.885763) (1575130774890, 15, 15.525804) (1575130775890, 19, 19.579538) (1575130776890, 20, 11.073811) (1575130777890, 16, 13.932510) (1575130778890, 17, 11.900328) (1575130779890, 22, 16.540415) (1575130780890, 33, 15.203803) (1575130781890, 17, 11.518434) (1575130782890, 17, 13.152081) (1575130783890, 18, 11.378041) (1575130784890, 21, 15.390745) (1575130785890, 30, 15.127818) (1575130786890, 19, 16.530401) (1575130787890, 32, 16.542702) (1575130788890, 26, 16.366442) (1575130789890, 25, 10.306822) (1575130790890, 15, 13.691117) (1575130791890, 15, 13.476817) (1575130792890, 25, 12.529998) (1575130793890, 22, 15.550021) (1575130794890, 20, 15.064971) (1575130795890, 24, 13.313683) (1575130796890, 23, 17.002878) (1575130797890, 30, 19.991594) (1575130798890, 15, 11.116746) (1575130799880, 16, 19.405090) (1575130800880, 22, 14.377142) (1575130801880, 16, 16.868231) (1575130802880, 20, 11.565193) (1575130803880, 31, 13.009119) (1575130804880, 29, 18.136400) (1575130805880, 17, 13.806572) (1575130806880, 23, 14.688898) (1575130807880, 26, 12.931019) (1575130808880, 32, 12.185531) (1575130809880, 30, 13.608714) (1575130810880, 23, 18.624914) (1575130811880, 22, 12.970826) (1575130812880, 22, 12.065827) (1575130813880, 25, 16.967192) (1575130814880, 16, 10.283031) (1575130815880, 22, 16.072535) (1575130816880, 24, 10.794536) (1575130817880, 32, 10.591207) (1575130818880, 20, 13.015227) (1575130819880, 28, 15.410999) (1575130820880, 29, 12.785076) (1575130821880, 28, 15.305857) (1575130822880, 33, 12.820810) (1575130823880, 34, 13.618055) (1575130824880, 32, 12.971123) (1575130825880, 24, 10.974546) (1575130826880, 15, 10.742910) (1575130827880, 23, 16.810783) (1575130828880, 18, 13.115224) (1575130829880, 26, 17.418489) (1575130830880, 20, 17.302315) (1575130831880, 21, 14.283571) (1575130832880, 16, 16.826534) (1575130833880, 18, 19.222122) (1575130834880, 18, 14.931420) (1575130835880, 17, 19.549454) (1575130836880, 22, 16.908388) (1575130837880, 32, 15.637796) (1575130838880, 31, 15.517650) (1575130839880, 18, 14.038033) (1575130840880, 32, 19.859648) (1575130841880, 16, 13.220840) (1575130842880, 28, 16.445398) (1575130843880, 26, 16.695753) (1575130844880, 33, 13.696928) (1575130845880, 21, 15.352819) (1575130846880, 15, 12.388407) (1575130847880, 27, 11.267529) (1575130848880, 20, 14.103228) (1575130849880, 20, 16.250950) (1575130850880, 30, 16.236088) (1575130851880, 22, 18.305340) (1575130852880, 25, 17.360685) (1575130853880, 25, 14.978681) (1575130854880, 33, 14.096183) (1575130855880, 26, 10.019039) (1575130856880, 19, 19.158213) (1575130857880, 22, 15.593924) (1575130858880, 26, 18.780119) (1575130859880, 21, 16.001656) (1575130860880, 16, 18.458328) (1575130861880, 21, 16.417843) (1575130862880, 28, 11.736558) (1575130863880, 34, 18.143946) (1575130864880, 27, 10.303225) (1575130865880, 20, 19.756748) (1575130866880, 22, 12.940063) (1575130867880, 23, 11.509640) (1575130868880, 19, 18.319309) (1575130869880, 19, 16.278345) (1575130870880, 27, 10.898361) (1575130871880, 31, 13.922162) (1575130872880, 15, 19.296116) (1575130873880, 26, 15.885763) (1575130874880, 15, 15.525804) (1575130875880, 19, 19.579538) (1575130876880, 20, 11.073811) (1575130877880, 16, 13.932510) (1575130878880, 17, 11.900328) (1575130879880, 22, 16.540415) (1575130880880, 33, 15.203803) (1575130881880, 17, 11.518434) (1575130882880, 17, 13.152081) (1575130883880, 18, 11.378041) (1575130884880, 21, 15.390745) (1575130885880, 30, 15.127818) (1575130886880, 19, 16.530401) (1575130887880, 32, 16.542702) (1575130888880, 26, 16.366442) (1575130889880, 25, 10.306822) (1575130890880, 15, 13.691117) (1575130891880, 15, 13.476817) (1575130892880, 25, 12.529998) (1575130893880, 22, 15.550021) (1575130894880, 20, 15.064971) (1575130895880, 24, 13.313683) (1575130896880, 23, 17.002878) (1575130897880, 30, 19.991594) (1575130898880, 15, 11.116746) (1575130899870, 16, 19.405090) (1575130900870, 22, 14.377142) (1575130901870, 16, 16.868231) (1575130902870, 20, 11.565193) (1575130903870, 31, 13.009119) (1575130904870, 29, 18.136400) (1575130905870, 17, 13.806572) (1575130906870, 23, 14.688898) (1575130907870, 26, 12.931019) (1575130908870, 32, 12.185531) (1575130909870, 30, 13.608714) (1575130910870, 23, 18.624914) (1575130911870, 22, 12.970826) (1575130912870, 22, 12.065827) (1575130913870, 25, 16.967192) (1575130914870, 16, 10.283031) (1575130915870, 22, 16.072535) (1575130916870, 24, 10.794536) (1575130917870, 32, 10.591207) (1575130918870, 20, 13.015227) (1575130919870, 28, 15.410999) (1575130920870, 29, 12.785076) (1575130921870, 28, 15.305857) (1575130922870, 33, 12.820810) (1575130923870, 34, 13.618055) (1575130924870, 32, 12.971123) (1575130925870, 24, 10.974546) (1575130926870, 15, 10.742910) (1575130927870, 23, 16.810783) (1575130928870, 18, 13.115224) (1575130929870, 26, 17.418489) (1575130930870, 20, 17.302315) (1575130931870, 21, 14.283571) (1575130932870, 16, 16.826534) (1575130933870, 18, 19.222122) (1575130934870, 18, 14.931420) (1575130935870, 17, 19.549454) (1575130936870, 22, 16.908388) (1575130937870, 32, 15.637796) (1575130938870, 31, 15.517650) (1575130939870, 18, 14.038033) (1575130940870, 32, 19.859648) (1575130941870, 16, 13.220840) (1575130942870, 28, 16.445398) (1575130943870, 26, 16.695753) (1575130944870, 33, 13.696928) (1575130945870, 21, 15.352819) (1575130946870, 15, 12.388407) (1575130947870, 27, 11.267529) (1575130948870, 20, 14.103228) (1575130949870, 20, 16.250950) (1575130950870, 30, 16.236088) (1575130951870, 22, 18.305340) (1575130952870, 25, 17.360685) (1575130953870, 25, 14.978681) (1575130954870, 33, 14.096183) (1575130955870, 26, 10.019039) (1575130956870, 19, 19.158213) (1575130957870, 22, 15.593924) (1575130958870, 26, 18.780119) (1575130959870, 21, 16.001656) (1575130960870, 16, 18.458328) (1575130961870, 21, 16.417843) (1575130962870, 28, 11.736558) (1575130963870, 34, 18.143946) (1575130964870, 27, 10.303225) (1575130965870, 20, 19.756748) (1575130966870, 22, 12.940063) (1575130967870, 23, 11.509640) (1575130968870, 19, 18.319309) (1575130969870, 19, 16.278345) (1575130970870, 27, 10.898361) (1575130971870, 31, 13.922162) (1575130972870, 15, 19.296116) (1575130973870, 26, 15.885763) (1575130974870, 15, 15.525804) (1575130975870, 19, 19.579538) (1575130976870, 20, 11.073811) (1575130977870, 16, 13.932510) (1575130978870, 17, 11.900328) (1575130979870, 22, 16.540415) (1575130980870, 33, 15.203803) (1575130981870, 17, 11.518434) (1575130982870, 17, 13.152081) (1575130983870, 18, 11.378041) (1575130984870, 21, 15.390745) (1575130985870, 30, 15.127818) (1575130986870, 19, 16.530401) (1575130987870, 32, 16.542702) (1575130988870, 26, 16.366442) (1575130989870, 25, 10.306822) (1575130990870, 15, 13.691117) (1575130991870, 15, 13.476817) (1575130992870, 25, 12.529998) (1575130993870, 22, 15.550021) (1575130994870, 20, 15.064971) (1575130995870, 24, 13.313683) (1575130996870, 23, 17.002878) (1575130997870, 30, 19.991594) (1575130998870, 15, 11.116746) (1575130999860, 16, 19.405090) (1575131000860, 22, 14.377142) (1575131001860, 16, 16.868231) (1575131002860, 20, 11.565193) (1575131003860, 31, 13.009119) (1575131004860, 29, 18.136400) (1575131005860, 17, 13.806572) (1575131006860, 23, 14.688898) (1575131007860, 26, 12.931019) (1575131008860, 32, 12.185531) (1575131009860, 30, 13.608714) (1575131010860, 23, 18.624914) (1575131011860, 22, 12.970826) (1575131012860, 22, 12.065827) (1575131013860, 25, 16.967192) (1575131014860, 16, 10.283031) (1575131015860, 22, 16.072535) (1575131016860, 24, 10.794536) (1575131017860, 32, 10.591207) (1575131018860, 20, 13.015227) (1575131019860, 28, 15.410999) (1575131020860, 29, 12.785076) (1575131021860, 28, 15.305857) (1575131022860, 33, 12.820810) (1575131023860, 34, 13.618055) (1575131024860, 32, 12.971123) (1575131025860, 24, 10.974546) (1575131026860, 15, 10.742910) (1575131027860, 23, 16.810783) (1575131028860, 18, 13.115224) (1575131029860, 26, 17.418489) (1575131030860, 20, 17.302315) (1575131031860, 21, 14.283571) (1575131032860, 16, 16.826534) (1575131033860, 18, 19.222122) (1575131034860, 18, 14.931420) (1575131035860, 17, 19.549454) (1575131036860, 22, 16.908388) (1575131037860, 32, 15.637796) (1575131038860, 31, 15.517650) (1575131039860, 18, 14.038033) (1575131040860, 32, 19.859648) (1575131041860, 16, 13.220840) (1575131042860, 28, 16.445398) (1575131043860, 26, 16.695753) (1575131044860, 33, 13.696928) (1575131045860, 21, 15.352819) (1575131046860, 15, 12.388407) (1575131047860, 27, 11.267529) (1575131048860, 20, 14.103228) (1575131049860, 20, 16.250950) (1575131050860, 30, 16.236088) (1575131051860, 22, 18.305340) (1575131052860, 25, 17.360685) (1575131053860, 25, 14.978681) (1575131054860, 33, 14.096183) (1575131055860, 26, 10.019039) (1575131056860, 19, 19.158213) (1575131057860, 22, 15.593924) (1575131058860, 26, 18.780119) (1575131059860, 21, 16.001656) (1575131060860, 16, 18.458328) (1575131061860, 21, 16.417843) (1575131062860, 28, 11.736558) (1575131063860, 34, 18.143946) (1575131064860, 27, 10.303225) (1575131065860, 20, 19.756748) (1575131066860, 22, 12.940063) (1575131067860, 23, 11.509640) (1575131068860, 19, 18.319309) (1575131069860, 19, 16.278345) (1575131070860, 27, 10.898361) (1575131071860, 31, 13.922162) (1575131072860, 15, 19.296116) (1575131073860, 26, 15.885763) (1575131074860, 15, 15.525804) (1575131075860, 19, 19.579538) (1575131076860, 20, 11.073811) (1575131077860, 16, 13.932510) (1575131078860, 17, 11.900328) (1575131079860, 22, 16.540415) (1575131080860, 33, 15.203803) (1575131081860, 17, 11.518434) (1575131082860, 17, 13.152081) (1575131083860, 18, 11.378041) (1575131084860, 21, 15.390745) (1575131085860, 30, 15.127818) (1575131086860, 19, 16.530401) (1575131087860, 32, 16.542702) (1575131088860, 26, 16.366442) (1575131089860, 25, 10.306822) (1575131090860, 15, 13.691117) (1575131091860, 15, 13.476817) (1575131092860, 25, 12.529998) (1575131093860, 22, 15.550021) (1575131094860, 20, 15.064971) (1575131095860, 24, 13.313683) (1575131096860, 23, 17.002878) (1575131097860, 30, 19.991594) (1575131098860, 15, 11.116746) (1575131099850, 16, 19.405090) (1575131100850, 22, 14.377142) (1575131101850, 16, 16.868231) (1575131102850, 20, 11.565193) (1575131103850, 31, 13.009119) (1575131104850, 29, 18.136400) (1575131105850, 17, 13.806572) (1575131106850, 23, 14.688898) (1575131107850, 26, 12.931019) (1575131108850, 32, 12.185531) (1575131109850, 30, 13.608714) (1575131110850, 23, 18.624914) (1575131111850, 22, 12.970826) (1575131112850, 22, 12.065827) (1575131113850, 25, 16.967192) (1575131114850, 16, 10.283031) (1575131115850, 22, 16.072535) (1575131116850, 24, 10.794536) (1575131117850, 32, 10.591207) (1575131118850, 20, 13.015227) (1575131119850, 28, 15.410999) (1575131120850, 29, 12.785076) (1575131121850, 28, 15.305857) (1575131122850, 33, 12.820810) (1575131123850, 34, 13.618055) (1575131124850, 32, 12.971123) (1575131125850, 24, 10.974546) (1575131126850, 15, 10.742910) (1575131127850, 23, 16.810783) (1575131128850, 18, 13.115224) (1575131129850, 26, 17.418489) (1575131130850, 20, 17.302315) (1575131131850, 21, 14.283571) (1575131132850, 16, 16.826534) (1575131133850, 18, 19.222122) (1575131134850, 18, 14.931420) (1575131135850, 17, 19.549454) (1575131136850, 22, 16.908388) (1575131137850, 32, 15.637796) (1575131138850, 31, 15.517650) (1575131139850, 18, 14.038033) (1575131140850, 32, 19.859648) (1575131141850, 16, 13.220840) (1575131142850, 28, 16.445398) (1575131143850, 26, 16.695753) (1575131144850, 33, 13.696928) (1575131145850, 21, 15.352819) (1575131146850, 15, 12.388407) (1575131147850, 27, 11.267529) (1575131148850, 20, 14.103228) (1575131149850, 20, 16.250950) (1575131150850, 30, 16.236088) (1575131151850, 22, 18.305340) (1575131152850, 25, 17.360685) (1575131153850, 25, 14.978681) (1575131154850, 33, 14.096183) (1575131155850, 26, 10.019039) (1575131156850, 19, 19.158213) (1575131157850, 22, 15.593924) (1575131158850, 26, 18.780119) (1575131159850, 21, 16.001656) (1575131160850, 16, 18.458328) (1575131161850, 21, 16.417843) (1575131162850, 28, 11.736558) (1575131163850, 34, 18.143946) (1575131164850, 27, 10.303225) (1575131165850, 20, 19.756748) (1575131166850, 22, 12.940063) (1575131167850, 23, 11.509640) (1575131168850, 19, 18.319309) (1575131169850, 19, 16.278345) (1575131170850, 27, 10.898361) (1575131171850, 31, 13.922162) (1575131172850, 15, 19.296116) (1575131173850, 26, 15.885763) (1575131174850, 15, 15.525804) (1575131175850, 19, 19.579538) (1575131176850, 20, 11.073811) (1575131177850, 16, 13.932510) (1575131178850, 17, 11.900328) (1575131179850, 22, 16.540415) (1575131180850, 33, 15.203803) (1575131181850, 17, 11.518434) (1575131182850, 17, 13.152081) (1575131183850, 18, 11.378041) (1575131184850, 21, 15.390745) (1575131185850, 30, 15.127818) (1575131186850, 19, 16.530401) (1575131187850, 32, 16.542702) (1575131188850, 26, 16.366442) (1575131189850, 25, 10.306822) (1575131190850, 15, 13.691117) (1575131191850, 15, 13.476817) (1575131192850, 25, 12.529998) (1575131193850, 22, 15.550021) (1575131194850, 20, 15.064971) (1575131195850, 24, 13.313683) (1575131196850, 23, 17.002878) (1575131197850, 30, 19.991594) (1575131198850, 15, 11.116746) (1575131199840, 16, 19.405090) (1575131200840, 22, 14.377142) (1575131201840, 16, 16.868231) (1575131202840, 20, 11.565193) (1575131203840, 31, 13.009119) (1575131204840, 29, 18.136400) (1575131205840, 17, 13.806572) (1575131206840, 23, 14.688898) (1575131207840, 26, 12.931019) (1575131208840, 32, 12.185531) (1575131209840, 30, 13.608714) (1575131210840, 23, 18.624914) (1575131211840, 22, 12.970826) (1575131212840, 22, 12.065827) (1575131213840, 25, 16.967192) (1575131214840, 16, 10.283031) (1575131215840, 22, 16.072535) (1575131216840, 24, 10.794536) (1575131217840, 32, 10.591207) (1575131218840, 20, 13.015227) (1575131219840, 28, 15.410999) (1575131220840, 29, 12.785076) (1575131221840, 28, 15.305857) (1575131222840, 33, 12.820810) (1575131223840, 34, 13.618055) (1575131224840, 32, 12.971123) (1575131225840, 24, 10.974546) (1575131226840, 15, 10.742910) (1575131227840, 23, 16.810783) (1575131228840, 18, 13.115224) (1575131229840, 26, 17.418489) (1575131230840, 20, 17.302315) (1575131231840, 21, 14.283571) (1575131232840, 16, 16.826534) (1575131233840, 18, 19.222122) (1575131234840, 18, 14.931420) (1575131235840, 17, 19.549454) (1575131236840, 22, 16.908388) (1575131237840, 32, 15.637796) (1575131238840, 31, 15.517650) (1575131239840, 18, 14.038033) (1575131240840, 32, 19.859648) (1575131241840, 16, 13.220840) (1575131242840, 28, 16.445398) (1575131243840, 26, 16.695753) (1575131244840, 33, 13.696928) (1575131245840, 21, 15.352819) (1575131246840, 15, 12.388407) (1575131247840, 27, 11.267529) (1575131248840, 20, 14.103228) (1575131249840, 20, 16.250950) (1575131250840, 30, 16.236088) (1575131251840, 22, 18.305340) (1575131252840, 25, 17.360685) (1575131253840, 25, 14.978681) (1575131254840, 33, 14.096183) (1575131255840, 26, 10.019039) (1575131256840, 19, 19.158213) (1575131257840, 22, 15.593924) (1575131258840, 26, 18.780119) (1575131259840, 21, 16.001656) (1575131260840, 16, 18.458328) (1575131261840, 21, 16.417843) (1575131262840, 28, 11.736558) (1575131263840, 34, 18.143946) (1575131264840, 27, 10.303225) (1575131265840, 20, 19.756748) (1575131266840, 22, 12.940063) (1575131267840, 23, 11.509640) (1575131268840, 19, 18.319309) (1575131269840, 19, 16.278345) (1575131270840, 27, 10.898361) (1575131271840, 31, 13.922162) (1575131272840, 15, 19.296116) (1575131273840, 26, 15.885763) (1575131274840, 15, 15.525804) (1575131275840, 19, 19.579538) (1575131276840, 20, 11.073811) (1575131277840, 16, 13.932510) (1575131278840, 17, 11.900328) (1575131279840, 22, 16.540415) (1575131280840, 33, 15.203803) (1575131281840, 17, 11.518434) (1575131282840, 17, 13.152081) (1575131283840, 18, 11.378041) (1575131284840, 21, 15.390745) (1575131285840, 30, 15.127818) (1575131286840, 19, 16.530401) (1575131287840, 32, 16.542702) (1575131288840, 26, 16.366442) (1575131289840, 25, 10.306822) (1575131290840, 15, 13.691117) (1575131291840, 15, 13.476817) (1575131292840, 25, 12.529998) (1575131293840, 22, 15.550021) (1575131294840, 20, 15.064971) (1575131295840, 24, 13.313683) (1575131296840, 23, 17.002878) (1575131297840, 30, 19.991594) (1575131298840, 15, 11.116746) (1575131299830, 16, 19.405090) (1575131300830, 22, 14.377142) (1575131301830, 16, 16.868231) (1575131302830, 20, 11.565193) (1575131303830, 31, 13.009119) (1575131304830, 29, 18.136400) (1575131305830, 17, 13.806572) (1575131306830, 23, 14.688898) (1575131307830, 26, 12.931019) (1575131308830, 32, 12.185531) (1575131309830, 30, 13.608714) (1575131310830, 23, 18.624914) (1575131311830, 22, 12.970826) (1575131312830, 22, 12.065827) (1575131313830, 25, 16.967192) (1575131314830, 16, 10.283031) (1575131315830, 22, 16.072535) (1575131316830, 24, 10.794536) (1575131317830, 32, 10.591207) (1575131318830, 20, 13.015227) (1575131319830, 28, 15.410999) (1575131320830, 29, 12.785076) (1575131321830, 28, 15.305857) (1575131322830, 33, 12.820810) (1575131323830, 34, 13.618055) (1575131324830, 32, 12.971123) (1575131325830, 24, 10.974546) (1575131326830, 15, 10.742910) (1575131327830, 23, 16.810783) (1575131328830, 18, 13.115224) (1575131329830, 26, 17.418489) (1575131330830, 20, 17.302315) (1575131331830, 21, 14.283571) (1575131332830, 16, 16.826534) (1575131333830, 18, 19.222122) (1575131334830, 18, 14.931420) (1575131335830, 17, 19.549454) (1575131336830, 22, 16.908388) (1575131337830, 32, 15.637796) (1575131338830, 31, 15.517650) (1575131339830, 18, 14.038033) (1575131340830, 32, 19.859648) (1575131341830, 16, 13.220840) (1575131342830, 28, 16.445398) (1575131343830, 26, 16.695753) (1575131344830, 33, 13.696928) (1575131345830, 21, 15.352819) (1575131346830, 15, 12.388407) (1575131347830, 27, 11.267529) (1575131348830, 20, 14.103228) (1575131349830, 20, 16.250950) (1575131350830, 30, 16.236088) (1575131351830, 22, 18.305340) (1575131352830, 25, 17.360685) (1575131353830, 25, 14.978681) (1575131354830, 33, 14.096183) (1575131355830, 26, 10.019039) (1575131356830, 19, 19.158213) (1575131357830, 22, 15.593924) (1575131358830, 26, 18.780119) (1575131359830, 21, 16.001656) (1575131360830, 16, 18.458328) (1575131361830, 21, 16.417843) (1575131362830, 28, 11.736558) (1575131363830, 34, 18.143946) (1575131364830, 27, 10.303225) (1575131365830, 20, 19.756748) (1575131366830, 22, 12.940063) (1575131367830, 23, 11.509640) (1575131368830, 19, 18.319309) (1575131369830, 19, 16.278345) (1575131370830, 27, 10.898361) (1575131371830, 31, 13.922162) (1575131372830, 15, 19.296116) (1575131373830, 26, 15.885763) (1575131374830, 15, 15.525804) (1575131375830, 19, 19.579538) (1575131376830, 20, 11.073811) (1575131377830, 16, 13.932510) (1575131378830, 17, 11.900328) (1575131379830, 22, 16.540415) (1575131380830, 33, 15.203803) (1575131381830, 17, 11.518434) (1575131382830, 17, 13.152081) (1575131383830, 18, 11.378041) (1575131384830, 21, 15.390745) (1575131385830, 30, 15.127818) (1575131386830, 19, 16.530401) (1575131387830, 32, 16.542702) (1575131388830, 26, 16.366442) (1575131389830, 25, 10.306822) (1575131390830, 15, 13.691117) (1575131391830, 15, 13.476817) (1575131392830, 25, 12.529998) (1575131393830, 22, 15.550021) (1575131394830, 20, 15.064971) (1575131395830, 24, 13.313683) (1575131396830, 23, 17.002878) (1575131397830, 30, 19.991594) (1575131398830, 15, 11.116746) (1575131399820, 16, 19.405090) (1575131400820, 22, 14.377142) (1575131401820, 16, 16.868231) (1575131402820, 20, 11.565193) (1575131403820, 31, 13.009119) (1575131404820, 29, 18.136400) (1575131405820, 17, 13.806572) (1575131406820, 23, 14.688898) (1575131407820, 26, 12.931019) (1575131408820, 32, 12.185531) (1575131409820, 30, 13.608714) (1575131410820, 23, 18.624914) (1575131411820, 22, 12.970826) (1575131412820, 22, 12.065827) (1575131413820, 25, 16.967192) (1575131414820, 16, 10.283031) (1575131415820, 22, 16.072535) (1575131416820, 24, 10.794536) (1575131417820, 32, 10.591207) (1575131418820, 20, 13.015227) (1575131419820, 28, 15.410999) (1575131420820, 29, 12.785076) (1575131421820, 28, 15.305857) (1575131422820, 33, 12.820810) (1575131423820, 34, 13.618055) (1575131424820, 32, 12.971123) (1575131425820, 24, 10.974546) (1575131426820, 15, 10.742910) (1575131427820, 23, 16.810783) (1575131428820, 18, 13.115224) (1575131429820, 26, 17.418489) (1575131430820, 20, 17.302315) (1575131431820, 21, 14.283571) (1575131432820, 16, 16.826534) (1575131433820, 18, 19.222122) (1575131434820, 18, 14.931420) (1575131435820, 17, 19.549454) (1575131436820, 22, 16.908388) (1575131437820, 32, 15.637796) (1575131438820, 31, 15.517650) (1575131439820, 18, 14.038033) (1575131440820, 32, 19.859648) (1575131441820, 16, 13.220840) (1575131442820, 28, 16.445398) (1575131443820, 26, 16.695753) (1575131444820, 33, 13.696928) (1575131445820, 21, 15.352819) (1575131446820, 15, 12.388407) (1575131447820, 27, 11.267529) (1575131448820, 20, 14.103228) (1575131449820, 20, 16.250950) (1575131450820, 30, 16.236088) (1575131451820, 22, 18.305340) (1575131452820, 25, 17.360685) (1575131453820, 25, 14.978681) (1575131454820, 33, 14.096183) (1575131455820, 26, 10.019039) (1575131456820, 19, 19.158213) (1575131457820, 22, 15.593924) (1575131458820, 26, 18.780119) (1575131459820, 21, 16.001656) (1575131460820, 16, 18.458328) (1575131461820, 21, 16.417843) (1575131462820, 28, 11.736558) (1575131463820, 34, 18.143946) (1575131464820, 27, 10.303225) (1575131465820, 20, 19.756748) (1575131466820, 22, 12.940063) (1575131467820, 23, 11.509640) (1575131468820, 19, 18.319309) (1575131469820, 19, 16.278345) (1575131470820, 27, 10.898361) (1575131471820, 31, 13.922162) (1575131472820, 15, 19.296116) (1575131473820, 26, 15.885763) (1575131474820, 15, 15.525804) (1575131475820, 19, 19.579538) (1575131476820, 20, 11.073811) (1575131477820, 16, 13.932510) (1575131478820, 17, 11.900328) (1575131479820, 22, 16.540415) (1575131480820, 33, 15.203803) (1575131481820, 17, 11.518434) (1575131482820, 17, 13.152081) (1575131483820, 18, 11.378041) (1575131484820, 21, 15.390745) (1575131485820, 30, 15.127818) (1575131486820, 19, 16.530401) (1575131487820, 32, 16.542702) (1575131488820, 26, 16.366442) (1575131489820, 25, 10.306822) (1575131490820, 15, 13.691117) (1575131491820, 15, 13.476817) (1575131492820, 25, 12.529998) (1575131493820, 22, 15.550021) (1575131494820, 20, 15.064971) (1575131495820, 24, 13.313683) (1575131496820, 23, 17.002878) (1575131497820, 30, 19.991594) (1575131498820, 15, 11.116746) (1575131499810, 16, 19.405090) (1575131500810, 22, 14.377142) (1575131501810, 16, 16.868231) (1575131502810, 20, 11.565193) (1575131503810, 31, 13.009119) (1575131504810, 29, 18.136400) (1575131505810, 17, 13.806572) (1575131506810, 23, 14.688898) (1575131507810, 26, 12.931019) (1575131508810, 32, 12.185531) (1575131509810, 30, 13.608714) (1575131510810, 23, 18.624914) (1575131511810, 22, 12.970826) (1575131512810, 22, 12.065827) (1575131513810, 25, 16.967192) (1575131514810, 16, 10.283031) (1575131515810, 22, 16.072535) (1575131516810, 24, 10.794536) (1575131517810, 32, 10.591207) (1575131518810, 20, 13.015227) (1575131519810, 28, 15.410999) (1575131520810, 29, 12.785076) (1575131521810, 28, 15.305857) (1575131522810, 33, 12.820810) (1575131523810, 34, 13.618055) (1575131524810, 32, 12.971123) (1575131525810, 24, 10.974546) (1575131526810, 15, 10.742910) (1575131527810, 23, 16.810783) (1575131528810, 18, 13.115224) (1575131529810, 26, 17.418489) (1575131530810, 20, 17.302315) (1575131531810, 21, 14.283571) (1575131532810, 16, 16.826534) (1575131533810, 18, 19.222122) (1575131534810, 18, 14.931420) (1575131535810, 17, 19.549454) (1575131536810, 22, 16.908388) (1575131537810, 32, 15.637796) (1575131538810, 31, 15.517650) (1575131539810, 18, 14.038033) (1575131540810, 32, 19.859648) (1575131541810, 16, 13.220840) (1575131542810, 28, 16.445398) (1575131543810, 26, 16.695753) (1575131544810, 33, 13.696928) (1575131545810, 21, 15.352819) (1575131546810, 15, 12.388407) (1575131547810, 27, 11.267529) (1575131548810, 20, 14.103228) (1575131549810, 20, 16.250950) (1575131550810, 30, 16.236088) (1575131551810, 22, 18.305340) (1575131552810, 25, 17.360685) (1575131553810, 25, 14.978681) (1575131554810, 33, 14.096183) (1575131555810, 26, 10.019039) (1575131556810, 19, 19.158213) (1575131557810, 22, 15.593924) (1575131558810, 26, 18.780119) (1575131559810, 21, 16.001656) (1575131560810, 16, 18.458328) (1575131561810, 21, 16.417843) (1575131562810, 28, 11.736558) (1575131563810, 34, 18.143946) (1575131564810, 27, 10.303225) (1575131565810, 20, 19.756748) (1575131566810, 22, 12.940063) (1575131567810, 23, 11.509640) (1575131568810, 19, 18.319309) (1575131569810, 19, 16.278345) (1575131570810, 27, 10.898361) (1575131571810, 31, 13.922162) (1575131572810, 15, 19.296116) (1575131573810, 26, 15.885763) (1575131574810, 15, 15.525804) (1575131575810, 19, 19.579538) (1575131576810, 20, 11.073811) (1575131577810, 16, 13.932510) (1575131578810, 17, 11.900328) (1575131579810, 22, 16.540415) (1575131580810, 33, 15.203803) (1575131581810, 17, 11.518434) (1575131582810, 17, 13.152081) (1575131583810, 18, 11.378041) (1575131584810, 21, 15.390745) (1575131585810, 30, 15.127818) (1575131586810, 19, 16.530401) (1575131587810, 32, 16.542702) (1575131588810, 26, 16.366442) (1575131589810, 25, 10.306822) (1575131590810, 15, 13.691117) (1575131591810, 15, 13.476817) (1575131592810, 25, 12.529998) (1575131593810, 22, 15.550021) (1575131594810, 20, 15.064971) (1575131595810, 24, 13.313683) (1575131596810, 23, 17.002878) (1575131597810, 30, 19.991594) (1575131598810, 15, 11.116746) (1575131599800, 16, 19.405090) (1575131600800, 22, 14.377142) (1575131601800, 16, 16.868231) (1575131602800, 20, 11.565193) (1575131603800, 31, 13.009119) (1575131604800, 29, 18.136400) (1575131605800, 17, 13.806572) (1575131606800, 23, 14.688898) (1575131607800, 26, 12.931019) (1575131608800, 32, 12.185531) (1575131609800, 30, 13.608714) (1575131610800, 23, 18.624914) (1575131611800, 22, 12.970826) (1575131612800, 22, 12.065827) (1575131613800, 25, 16.967192) (1575131614800, 16, 10.283031) (1575131615800, 22, 16.072535) (1575131616800, 24, 10.794536) (1575131617800, 32, 10.591207) (1575131618800, 20, 13.015227) (1575131619800, 28, 15.410999) (1575131620800, 29, 12.785076) (1575131621800, 28, 15.305857) (1575131622800, 33, 12.820810) (1575131623800, 34, 13.618055) (1575131624800, 32, 12.971123) (1575131625800, 24, 10.974546) (1575131626800, 15, 10.742910) (1575131627800, 23, 16.810783) (1575131628800, 18, 13.115224) (1575131629800, 26, 17.418489) (1575131630800, 20, 17.302315) (1575131631800, 21, 14.283571) (1575131632800, 16, 16.826534) (1575131633800, 18, 19.222122) (1575131634800, 18, 14.931420) (1575131635800, 17, 19.549454) (1575131636800, 22, 16.908388) (1575131637800, 32, 15.637796) (1575131638800, 31, 15.517650) (1575131639800, 18, 14.038033) (1575131640800, 32, 19.859648) (1575131641800, 16, 13.220840) (1575131642800, 28, 16.445398) (1575131643800, 26, 16.695753) (1575131644800, 33, 13.696928) (1575131645800, 21, 15.352819) (1575131646800, 15, 12.388407) (1575131647800, 27, 11.267529) (1575131648800, 20, 14.103228) (1575131649800, 20, 16.250950) (1575131650800, 30, 16.236088) (1575131651800, 22, 18.305340) (1575131652800, 25, 17.360685) (1575131653800, 25, 14.978681) (1575131654800, 33, 14.096183) (1575131655800, 26, 10.019039) (1575131656800, 19, 19.158213) (1575131657800, 22, 15.593924) (1575131658800, 26, 18.780119) (1575131659800, 21, 16.001656) (1575131660800, 16, 18.458328) (1575131661800, 21, 16.417843) (1575131662800, 28, 11.736558) (1575131663800, 34, 18.143946) (1575131664800, 27, 10.303225) (1575131665800, 20, 19.756748) (1575131666800, 22, 12.940063) (1575131667800, 23, 11.509640) (1575131668800, 19, 18.319309) (1575131669800, 19, 16.278345) (1575131670800, 27, 10.898361) (1575131671800, 31, 13.922162) (1575131672800, 15, 19.296116) (1575131673800, 26, 15.885763) (1575131674800, 15, 15.525804) (1575131675800, 19, 19.579538) (1575131676800, 20, 11.073811) (1575131677800, 16, 13.932510) (1575131678800, 17, 11.900328) (1575131679800, 22, 16.540415) (1575131680800, 33, 15.203803) (1575131681800, 17, 11.518434) (1575131682800, 17, 13.152081) (1575131683800, 18, 11.378041) (1575131684800, 21, 15.390745) (1575131685800, 30, 15.127818) (1575131686800, 19, 16.530401) (1575131687800, 32, 16.542702) (1575131688800, 26, 16.366442) (1575131689800, 25, 10.306822) (1575131690800, 15, 13.691117) (1575131691800, 15, 13.476817) (1575131692800, 25, 12.529998) (1575131693800, 22, 15.550021)") + + end_time = int(round(time.time() * 1000)) + tdLog.info("Execute time with compress: %dms" % (end_time - start_time)) + + simLogPath = tdDnodes.getSimLogPath() + grepCmd = "grep -a 'compress rpc msg, before:' -r %s | head -2" % simLogPath + output = subprocess.check_output(grepCmd, shell=True).decode("utf-8") + + if output != "": + tdLog.info("Find %s in log file." % output) + tdLog.success("%s successfully executed! Compress works as expected." % __file__) + else: + tdLog.exit("%s failed! Compress does NOT works." % __file__) + + conn.close() diff --git a/tests/pytest/testNoCompress.py b/tests/pytest/testNoCompress.py new file mode 100644 index 0000000000..e3b40b4426 --- /dev/null +++ b/tests/pytest/testNoCompress.py @@ -0,0 +1,137 @@ +#!/usr/bin/python +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### +# install pip +# pip install src/connector/python/linux/python2/ + +# -*- coding: utf-8 -*- +import sys +import getopt +import subprocess +from distutils.log import warn as printf + +from util.log import * +from util.dnodes import * +from util.cases import * +from util.sql import * + +import taos + + +if __name__ == "__main__": + fileName = "all" + deployPath = "" + masterIp = "" + testCluster = False + valgrind = 0 + logSql = True + stop = 0 + opts, args = getopt.gnu_getopt(sys.argv[1:], 'l:sgh', [ + 'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help']) + for key, value in opts: + if key in ['-h', '--help']: + tdLog.printNoPrefix( + 'A collection of test cases written using Python') + tdLog.printNoPrefix('-l logSql Flag') + tdLog.printNoPrefix('-s stop All dnodes') + tdLog.printNoPrefix('-g valgrind Test Flag') + sys.exit(0) + + if key in ['-l', '--logSql']: + if (value.upper() == "TRUE"): + logSql = True + elif (value.upper() == "FALSE"): + logSql = False + else: + tdLog.printNoPrefix("logSql value %s is invalid" % logSql) + sys.exit(0) + + if key in ['-g', '--valgrind']: + valgrind = 1 + + if key in ['-s', '--stop']: + stop = 1 + + if (stop != 0): + if (valgrind == 0): + toBeKilled = "taosd" + else: + toBeKilled = "valgrind.bin" + + killCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -HUP > /dev/null 2>&1" % toBeKilled + + psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled + processID = subprocess.check_output(psCmd, shell=True) + + while(processID): + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output(psCmd, shell=True) + + for port in range(6030, 6041): + usePortPID = "lsof -i tcp:%d | grep LISTEn | awk '{print $2}'" % port + processID = subprocess.check_output(usePortPID, shell=True) + + if processID: + killCmd = "kill -9 %s" % processID + os.system(killCmd) + fuserCmd = "fuser -k -n tcp %d" % port + os.system(fuserCmd) + if valgrind: + time.sleep(2) + + tdLog.info('stop All dnodes') + sys.exit(0) + + tdDnodes.init(deployPath) + tdDnodes.setTestCluster(testCluster) + tdDnodes.setValgrind(valgrind) + + tdDnodes.stopAll() + tdDnodes.addSimExtraCfg("compressMsgSize", "-1") + tdDnodes.deploy(1) + tdDnodes.start(1) + + host = '127.0.0.1' + + tdLog.info("Procedures for tdengine deployed in %s" % (host)) + + tdCases.logSql(logSql) + + conn = taos.connect( + host, + config=tdDnodes.getSimCfgPath()) + + tdSql.init(conn.cursor(), logSql) + + tdSql.execute("CREATE DATABASE IF NOT EXISTS t10b") + tdSql.execute("USE t10b") + tdSql.execute( + "CREATE TABLE IF NOT EXISTS s_sensor_info (ts TIMESTAMP, temperature INT, humidity FLOAT)") + + start_time = int(round(time.time() * 1000)) + for i in range(1, 1000): + tdSql.execute("IMPORT INTO s_sensor_info VALUES (1575129600000, 16, 19.405090) (1575129601000, 22, 14.377142) (1575129602000, 16, 16.868231) (1575129603000, 20, 11.565193) (1575129604000, 31, 13.009119) (1575129605000, 29, 18.136400) (1575129606000, 17, 13.806572) (1575129607000, 23, 14.688898) (1575129608000, 26, 12.931019) (1575129609000, 32, 12.185531) (1575129610000, 30, 13.608714) (1575129611000, 23, 18.624914) (1575129612000, 22, 12.970826) (1575129613000, 22, 12.065827) (1575129614000, 25, 16.967192) (1575129615000, 16, 10.283031) (1575129616000, 22, 16.072535) (1575129617000, 24, 10.794536) (1575129618000, 32, 10.591207) (1575129619000, 20, 13.015227) (1575129620000, 28, 15.410999) (1575129621000, 29, 12.785076) (1575129622000, 28, 15.305857) (1575129623000, 33, 12.820810) (1575129624000, 34, 13.618055) (1575129625000, 32, 12.971123) (1575129626000, 24, 10.974546) (1575129627000, 15, 10.742910) (1575129628000, 23, 16.810783) (1575129629000, 18, 13.115224) (1575129630000, 26, 17.418489) (1575129631000, 20, 17.302315) (1575129632000, 21, 14.283571) (1575129633000, 16, 16.826534) (1575129634000, 18, 19.222122) (1575129635000, 18, 14.931420) (1575129636000, 17, 19.549454) (1575129637000, 22, 16.908388) (1575129638000, 32, 15.637796) (1575129639000, 31, 15.517650) (1575129640000, 18, 14.038033) (1575129641000, 32, 19.859648) (1575129642000, 16, 13.220840) (1575129643000, 28, 16.445398) (1575129644000, 26, 16.695753) (1575129645000, 33, 13.696928) (1575129646000, 21, 15.352819) (1575129647000, 15, 12.388407) (1575129648000, 27, 11.267529) (1575129649000, 20, 14.103228) (1575129650000, 20, 16.250950) (1575129651000, 30, 16.236088) (1575129652000, 22, 18.305340) (1575129653000, 25, 17.360685) (1575129654000, 25, 14.978681) (1575129655000, 33, 14.096183) (1575129656000, 26, 10.019039) (1575129657000, 19, 19.158213) (1575129658000, 22, 15.593924) (1575129659000, 26, 18.780119) (1575129660000, 21, 16.001656) (1575129661000, 16, 18.458328) (1575129662000, 21, 16.417843) (1575129663000, 28, 11.736558) (1575129664000, 34, 18.143946) (1575129665000, 27, 10.303225) (1575129666000, 20, 19.756748) (1575129667000, 22, 12.940063) (1575129668000, 23, 11.509640) (1575129669000, 19, 18.319309) (1575129670000, 19, 16.278345) (1575129671000, 27, 10.898361) (1575129672000, 31, 13.922162) (1575129673000, 15, 19.296116) (1575129674000, 26, 15.885763) (1575129675000, 15, 15.525804) (1575129676000, 19, 19.579538) (1575129677000, 20, 11.073811) (1575129678000, 16, 13.932510) (1575129679000, 17, 11.900328) (1575129680000, 22, 16.540415) (1575129681000, 33, 15.203803) (1575129682000, 17, 11.518434) (1575129683000, 17, 13.152081) (1575129684000, 18, 11.378041) (1575129685000, 21, 15.390745) (1575129686000, 30, 15.127818) (1575129687000, 19, 16.530401) (1575129688000, 32, 16.542702) (1575129689000, 26, 16.366442) (1575129690000, 25, 10.306822) (1575129691000, 15, 13.691117) (1575129692000, 15, 13.476817) (1575129693000, 25, 12.529998) (1575129694000, 22, 15.550021) (1575129695000, 20, 15.064971) (1575129696000, 24, 13.313683) (1575129697000, 23, 17.002878) (1575129698000, 30, 19.991594) (1575129699000, 15, 11.116746) (1575129699990, 16, 19.405090) (1575129700990, 22, 14.377142) (1575129701990, 16, 16.868231) (1575129702990, 20, 11.565193) (1575129703990, 31, 13.009119) (1575129704990, 29, 18.136400) (1575129705990, 17, 13.806572) (1575129706990, 23, 14.688898) (1575129707990, 26, 12.931019) (1575129708990, 32, 12.185531) (1575129709990, 30, 13.608714) (1575129710990, 23, 18.624914) (1575129711990, 22, 12.970826) (1575129712990, 22, 12.065827) (1575129713990, 25, 16.967192) (1575129714990, 16, 10.283031) (1575129715990, 22, 16.072535) (1575129716990, 24, 10.794536) (1575129717990, 32, 10.591207) (1575129718990, 20, 13.015227) (1575129719990, 28, 15.410999) (1575129720990, 29, 12.785076) (1575129721990, 28, 15.305857) (1575129722990, 33, 12.820810) (1575129723990, 34, 13.618055) (1575129724990, 32, 12.971123) (1575129725990, 24, 10.974546) (1575129726990, 15, 10.742910) (1575129727990, 23, 16.810783) (1575129728990, 18, 13.115224) (1575129729990, 26, 17.418489) (1575129730990, 20, 17.302315) (1575129731990, 21, 14.283571) (1575129732990, 16, 16.826534) (1575129733990, 18, 19.222122) (1575129734990, 18, 14.931420) (1575129735990, 17, 19.549454) (1575129736990, 22, 16.908388) (1575129737990, 32, 15.637796) (1575129738990, 31, 15.517650) (1575129739990, 18, 14.038033) (1575129740990, 32, 19.859648) (1575129741990, 16, 13.220840) (1575129742990, 28, 16.445398) (1575129743990, 26, 16.695753) (1575129744990, 33, 13.696928) (1575129745990, 21, 15.352819) (1575129746990, 15, 12.388407) (1575129747990, 27, 11.267529) (1575129748990, 20, 14.103228) (1575129749990, 20, 16.250950) (1575129750990, 30, 16.236088) (1575129751990, 22, 18.305340) (1575129752990, 25, 17.360685) (1575129753990, 25, 14.978681) (1575129754990, 33, 14.096183) (1575129755990, 26, 10.019039) (1575129756990, 19, 19.158213) (1575129757990, 22, 15.593924) (1575129758990, 26, 18.780119) (1575129759990, 21, 16.001656) (1575129760990, 16, 18.458328) (1575129761990, 21, 16.417843) (1575129762990, 28, 11.736558) (1575129763990, 34, 18.143946) (1575129764990, 27, 10.303225) (1575129765990, 20, 19.756748) (1575129766990, 22, 12.940063) (1575129767990, 23, 11.509640) (1575129768990, 19, 18.319309) (1575129769990, 19, 16.278345) (1575129770990, 27, 10.898361) (1575129771990, 31, 13.922162) (1575129772990, 15, 19.296116) (1575129773990, 26, 15.885763) (1575129774990, 15, 15.525804) (1575129775990, 19, 19.579538) (1575129776990, 20, 11.073811) (1575129777990, 16, 13.932510) (1575129778990, 17, 11.900328) (1575129779990, 22, 16.540415) (1575129780990, 33, 15.203803) (1575129781990, 17, 11.518434) (1575129782990, 17, 13.152081) (1575129783990, 18, 11.378041) (1575129784990, 21, 15.390745) (1575129785990, 30, 15.127818) (1575129786990, 19, 16.530401) (1575129787990, 32, 16.542702) (1575129788990, 26, 16.366442) (1575129789990, 25, 10.306822) (1575129790990, 15, 13.691117) (1575129791990, 15, 13.476817) (1575129792990, 25, 12.529998) (1575129793990, 22, 15.550021) (1575129794990, 20, 15.064971) (1575129795990, 24, 13.313683) (1575129796990, 23, 17.002878) (1575129797990, 30, 19.991594) (1575129798990, 15, 11.116746) (1575129799980, 16, 19.405090) (1575129800980, 22, 14.377142) (1575129801980, 16, 16.868231) (1575129802980, 20, 11.565193) (1575129803980, 31, 13.009119) (1575129804980, 29, 18.136400) (1575129805980, 17, 13.806572) (1575129806980, 23, 14.688898) (1575129807980, 26, 12.931019) (1575129808980, 32, 12.185531) (1575129809980, 30, 13.608714) (1575129810980, 23, 18.624914) (1575129811980, 22, 12.970826) (1575129812980, 22, 12.065827) (1575129813980, 25, 16.967192) (1575129814980, 16, 10.283031) (1575129815980, 22, 16.072535) (1575129816980, 24, 10.794536) (1575129817980, 32, 10.591207) (1575129818980, 20, 13.015227) (1575129819980, 28, 15.410999) (1575129820980, 29, 12.785076) (1575129821980, 28, 15.305857) (1575129822980, 33, 12.820810) (1575129823980, 34, 13.618055) (1575129824980, 32, 12.971123) (1575129825980, 24, 10.974546) (1575129826980, 15, 10.742910) (1575129827980, 23, 16.810783) (1575129828980, 18, 13.115224) (1575129829980, 26, 17.418489) (1575129830980, 20, 17.302315) (1575129831980, 21, 14.283571) (1575129832980, 16, 16.826534) (1575129833980, 18, 19.222122) (1575129834980, 18, 14.931420) (1575129835980, 17, 19.549454) (1575129836980, 22, 16.908388) (1575129837980, 32, 15.637796) (1575129838980, 31, 15.517650) (1575129839980, 18, 14.038033) (1575129840980, 32, 19.859648) (1575129841980, 16, 13.220840) (1575129842980, 28, 16.445398) (1575129843980, 26, 16.695753) (1575129844980, 33, 13.696928) (1575129845980, 21, 15.352819) (1575129846980, 15, 12.388407) (1575129847980, 27, 11.267529) (1575129848980, 20, 14.103228) (1575129849980, 20, 16.250950) (1575129850980, 30, 16.236088) (1575129851980, 22, 18.305340) (1575129852980, 25, 17.360685) (1575129853980, 25, 14.978681) (1575129854980, 33, 14.096183) (1575129855980, 26, 10.019039) (1575129856980, 19, 19.158213) (1575129857980, 22, 15.593924) (1575129858980, 26, 18.780119) (1575129859980, 21, 16.001656) (1575129860980, 16, 18.458328) (1575129861980, 21, 16.417843) (1575129862980, 28, 11.736558) (1575129863980, 34, 18.143946) (1575129864980, 27, 10.303225) (1575129865980, 20, 19.756748) (1575129866980, 22, 12.940063) (1575129867980, 23, 11.509640) (1575129868980, 19, 18.319309) (1575129869980, 19, 16.278345) (1575129870980, 27, 10.898361) (1575129871980, 31, 13.922162) (1575129872980, 15, 19.296116) (1575129873980, 26, 15.885763) (1575129874980, 15, 15.525804) (1575129875980, 19, 19.579538) (1575129876980, 20, 11.073811) (1575129877980, 16, 13.932510) (1575129878980, 17, 11.900328) (1575129879980, 22, 16.540415) (1575129880980, 33, 15.203803) (1575129881980, 17, 11.518434) (1575129882980, 17, 13.152081) (1575129883980, 18, 11.378041) (1575129884980, 21, 15.390745) (1575129885980, 30, 15.127818) (1575129886980, 19, 16.530401) (1575129887980, 32, 16.542702) (1575129888980, 26, 16.366442) (1575129889980, 25, 10.306822) (1575129890980, 15, 13.691117) (1575129891980, 15, 13.476817) (1575129892980, 25, 12.529998) (1575129893980, 22, 15.550021) (1575129894980, 20, 15.064971) (1575129895980, 24, 13.313683) (1575129896980, 23, 17.002878) (1575129897980, 30, 19.991594) (1575129898980, 15, 11.116746) (1575129899970, 16, 19.405090) (1575129900970, 22, 14.377142) (1575129901970, 16, 16.868231) (1575129902970, 20, 11.565193) (1575129903970, 31, 13.009119) (1575129904970, 29, 18.136400) (1575129905970, 17, 13.806572) (1575129906970, 23, 14.688898) (1575129907970, 26, 12.931019) (1575129908970, 32, 12.185531) (1575129909970, 30, 13.608714) (1575129910970, 23, 18.624914) (1575129911970, 22, 12.970826) (1575129912970, 22, 12.065827) (1575129913970, 25, 16.967192) (1575129914970, 16, 10.283031) (1575129915970, 22, 16.072535) (1575129916970, 24, 10.794536) (1575129917970, 32, 10.591207) (1575129918970, 20, 13.015227) (1575129919970, 28, 15.410999) (1575129920970, 29, 12.785076) (1575129921970, 28, 15.305857) (1575129922970, 33, 12.820810) (1575129923970, 34, 13.618055) (1575129924970, 32, 12.971123) (1575129925970, 24, 10.974546) (1575129926970, 15, 10.742910) (1575129927970, 23, 16.810783) (1575129928970, 18, 13.115224) (1575129929970, 26, 17.418489) (1575129930970, 20, 17.302315) (1575129931970, 21, 14.283571) (1575129932970, 16, 16.826534) (1575129933970, 18, 19.222122) (1575129934970, 18, 14.931420) (1575129935970, 17, 19.549454) (1575129936970, 22, 16.908388) (1575129937970, 32, 15.637796) (1575129938970, 31, 15.517650) (1575129939970, 18, 14.038033) (1575129940970, 32, 19.859648) (1575129941970, 16, 13.220840) (1575129942970, 28, 16.445398) (1575129943970, 26, 16.695753) (1575129944970, 33, 13.696928) (1575129945970, 21, 15.352819) (1575129946970, 15, 12.388407) (1575129947970, 27, 11.267529) (1575129948970, 20, 14.103228) (1575129949970, 20, 16.250950) (1575129950970, 30, 16.236088) (1575129951970, 22, 18.305340) (1575129952970, 25, 17.360685) (1575129953970, 25, 14.978681) (1575129954970, 33, 14.096183) (1575129955970, 26, 10.019039) (1575129956970, 19, 19.158213) (1575129957970, 22, 15.593924) (1575129958970, 26, 18.780119) (1575129959970, 21, 16.001656) (1575129960970, 16, 18.458328) (1575129961970, 21, 16.417843) (1575129962970, 28, 11.736558) (1575129963970, 34, 18.143946) (1575129964970, 27, 10.303225) (1575129965970, 20, 19.756748) (1575129966970, 22, 12.940063) (1575129967970, 23, 11.509640) (1575129968970, 19, 18.319309) (1575129969970, 19, 16.278345) (1575129970970, 27, 10.898361) (1575129971970, 31, 13.922162) (1575129972970, 15, 19.296116) (1575129973970, 26, 15.885763) (1575129974970, 15, 15.525804) (1575129975970, 19, 19.579538) (1575129976970, 20, 11.073811) (1575129977970, 16, 13.932510) (1575129978970, 17, 11.900328) (1575129979970, 22, 16.540415) (1575129980970, 33, 15.203803) (1575129981970, 17, 11.518434) (1575129982970, 17, 13.152081) (1575129983970, 18, 11.378041) (1575129984970, 21, 15.390745) (1575129985970, 30, 15.127818) (1575129986970, 19, 16.530401) (1575129987970, 32, 16.542702) (1575129988970, 26, 16.366442) (1575129989970, 25, 10.306822) (1575129990970, 15, 13.691117) (1575129991970, 15, 13.476817) (1575129992970, 25, 12.529998) (1575129993970, 22, 15.550021) (1575129994970, 20, 15.064971) (1575129995970, 24, 13.313683) (1575129996970, 23, 17.002878) (1575129997970, 30, 19.991594) (1575129998970, 15, 11.116746) (1575129999960, 16, 19.405090) (1575130000960, 22, 14.377142) (1575130001960, 16, 16.868231) (1575130002960, 20, 11.565193) (1575130003960, 31, 13.009119) (1575130004960, 29, 18.136400) (1575130005960, 17, 13.806572) (1575130006960, 23, 14.688898) (1575130007960, 26, 12.931019) (1575130008960, 32, 12.185531) (1575130009960, 30, 13.608714) (1575130010960, 23, 18.624914) (1575130011960, 22, 12.970826) (1575130012960, 22, 12.065827) (1575130013960, 25, 16.967192) (1575130014960, 16, 10.283031) (1575130015960, 22, 16.072535) (1575130016960, 24, 10.794536) (1575130017960, 32, 10.591207) (1575130018960, 20, 13.015227) (1575130019960, 28, 15.410999) (1575130020960, 29, 12.785076) (1575130021960, 28, 15.305857) (1575130022960, 33, 12.820810) (1575130023960, 34, 13.618055) (1575130024960, 32, 12.971123) (1575130025960, 24, 10.974546) (1575130026960, 15, 10.742910) (1575130027960, 23, 16.810783) (1575130028960, 18, 13.115224) (1575130029960, 26, 17.418489) (1575130030960, 20, 17.302315) (1575130031960, 21, 14.283571) (1575130032960, 16, 16.826534) (1575130033960, 18, 19.222122) (1575130034960, 18, 14.931420) (1575130035960, 17, 19.549454) (1575130036960, 22, 16.908388) (1575130037960, 32, 15.637796) (1575130038960, 31, 15.517650) (1575130039960, 18, 14.038033) (1575130040960, 32, 19.859648) (1575130041960, 16, 13.220840) (1575130042960, 28, 16.445398) (1575130043960, 26, 16.695753) (1575130044960, 33, 13.696928) (1575130045960, 21, 15.352819) (1575130046960, 15, 12.388407) (1575130047960, 27, 11.267529) (1575130048960, 20, 14.103228) (1575130049960, 20, 16.250950) (1575130050960, 30, 16.236088) (1575130051960, 22, 18.305340) (1575130052960, 25, 17.360685) (1575130053960, 25, 14.978681) (1575130054960, 33, 14.096183) (1575130055960, 26, 10.019039) (1575130056960, 19, 19.158213) (1575130057960, 22, 15.593924) (1575130058960, 26, 18.780119) (1575130059960, 21, 16.001656) (1575130060960, 16, 18.458328) (1575130061960, 21, 16.417843) (1575130062960, 28, 11.736558) (1575130063960, 34, 18.143946) (1575130064960, 27, 10.303225) (1575130065960, 20, 19.756748) (1575130066960, 22, 12.940063) (1575130067960, 23, 11.509640) (1575130068960, 19, 18.319309) (1575130069960, 19, 16.278345) (1575130070960, 27, 10.898361) (1575130071960, 31, 13.922162) (1575130072960, 15, 19.296116) (1575130073960, 26, 15.885763) (1575130074960, 15, 15.525804) (1575130075960, 19, 19.579538) (1575130076960, 20, 11.073811) (1575130077960, 16, 13.932510) (1575130078960, 17, 11.900328) (1575130079960, 22, 16.540415) (1575130080960, 33, 15.203803) (1575130081960, 17, 11.518434) (1575130082960, 17, 13.152081) (1575130083960, 18, 11.378041) (1575130084960, 21, 15.390745) (1575130085960, 30, 15.127818) (1575130086960, 19, 16.530401) (1575130087960, 32, 16.542702) (1575130088960, 26, 16.366442) (1575130089960, 25, 10.306822) (1575130090960, 15, 13.691117) (1575130091960, 15, 13.476817) (1575130092960, 25, 12.529998) (1575130093960, 22, 15.550021) (1575130094960, 20, 15.064971) (1575130095960, 24, 13.313683) (1575130096960, 23, 17.002878) (1575130097960, 30, 19.991594) (1575130098960, 15, 11.116746) (1575130099950, 16, 19.405090) (1575130100950, 22, 14.377142) (1575130101950, 16, 16.868231) (1575130102950, 20, 11.565193) (1575130103950, 31, 13.009119) (1575130104950, 29, 18.136400) (1575130105950, 17, 13.806572) (1575130106950, 23, 14.688898) (1575130107950, 26, 12.931019) (1575130108950, 32, 12.185531) (1575130109950, 30, 13.608714) (1575130110950, 23, 18.624914) (1575130111950, 22, 12.970826) (1575130112950, 22, 12.065827) (1575130113950, 25, 16.967192) (1575130114950, 16, 10.283031) (1575130115950, 22, 16.072535) (1575130116950, 24, 10.794536) (1575130117950, 32, 10.591207) (1575130118950, 20, 13.015227) (1575130119950, 28, 15.410999) (1575130120950, 29, 12.785076) (1575130121950, 28, 15.305857) (1575130122950, 33, 12.820810) (1575130123950, 34, 13.618055) (1575130124950, 32, 12.971123) (1575130125950, 24, 10.974546) (1575130126950, 15, 10.742910) (1575130127950, 23, 16.810783) (1575130128950, 18, 13.115224) (1575130129950, 26, 17.418489) (1575130130950, 20, 17.302315) (1575130131950, 21, 14.283571) (1575130132950, 16, 16.826534) (1575130133950, 18, 19.222122) (1575130134950, 18, 14.931420) (1575130135950, 17, 19.549454) (1575130136950, 22, 16.908388) (1575130137950, 32, 15.637796) (1575130138950, 31, 15.517650) (1575130139950, 18, 14.038033) (1575130140950, 32, 19.859648) (1575130141950, 16, 13.220840) (1575130142950, 28, 16.445398) (1575130143950, 26, 16.695753) (1575130144950, 33, 13.696928) (1575130145950, 21, 15.352819) (1575130146950, 15, 12.388407) (1575130147950, 27, 11.267529) (1575130148950, 20, 14.103228) (1575130149950, 20, 16.250950) (1575130150950, 30, 16.236088) (1575130151950, 22, 18.305340) (1575130152950, 25, 17.360685) (1575130153950, 25, 14.978681) (1575130154950, 33, 14.096183) (1575130155950, 26, 10.019039) (1575130156950, 19, 19.158213) (1575130157950, 22, 15.593924) (1575130158950, 26, 18.780119) (1575130159950, 21, 16.001656) (1575130160950, 16, 18.458328) (1575130161950, 21, 16.417843) (1575130162950, 28, 11.736558) (1575130163950, 34, 18.143946) (1575130164950, 27, 10.303225) (1575130165950, 20, 19.756748) (1575130166950, 22, 12.940063) (1575130167950, 23, 11.509640) (1575130168950, 19, 18.319309) (1575130169950, 19, 16.278345) (1575130170950, 27, 10.898361) (1575130171950, 31, 13.922162) (1575130172950, 15, 19.296116) (1575130173950, 26, 15.885763) (1575130174950, 15, 15.525804) (1575130175950, 19, 19.579538) (1575130176950, 20, 11.073811) (1575130177950, 16, 13.932510) (1575130178950, 17, 11.900328) (1575130179950, 22, 16.540415) (1575130180950, 33, 15.203803) (1575130181950, 17, 11.518434) (1575130182950, 17, 13.152081) (1575130183950, 18, 11.378041) (1575130184950, 21, 15.390745) (1575130185950, 30, 15.127818) (1575130186950, 19, 16.530401) (1575130187950, 32, 16.542702) (1575130188950, 26, 16.366442) (1575130189950, 25, 10.306822) (1575130190950, 15, 13.691117) (1575130191950, 15, 13.476817) (1575130192950, 25, 12.529998) (1575130193950, 22, 15.550021) (1575130194950, 20, 15.064971) (1575130195950, 24, 13.313683) (1575130196950, 23, 17.002878) (1575130197950, 30, 19.991594) (1575130198950, 15, 11.116746) (1575130199940, 16, 19.405090) (1575130200940, 22, 14.377142) (1575130201940, 16, 16.868231) (1575130202940, 20, 11.565193) (1575130203940, 31, 13.009119) (1575130204940, 29, 18.136400) (1575130205940, 17, 13.806572) (1575130206940, 23, 14.688898) (1575130207940, 26, 12.931019) (1575130208940, 32, 12.185531) (1575130209940, 30, 13.608714) (1575130210940, 23, 18.624914) (1575130211940, 22, 12.970826) (1575130212940, 22, 12.065827) (1575130213940, 25, 16.967192) (1575130214940, 16, 10.283031) (1575130215940, 22, 16.072535) (1575130216940, 24, 10.794536) (1575130217940, 32, 10.591207) (1575130218940, 20, 13.015227) (1575130219940, 28, 15.410999) (1575130220940, 29, 12.785076) (1575130221940, 28, 15.305857) (1575130222940, 33, 12.820810) (1575130223940, 34, 13.618055) (1575130224940, 32, 12.971123) (1575130225940, 24, 10.974546) (1575130226940, 15, 10.742910) (1575130227940, 23, 16.810783) (1575130228940, 18, 13.115224) (1575130229940, 26, 17.418489) (1575130230940, 20, 17.302315) (1575130231940, 21, 14.283571) (1575130232940, 16, 16.826534) (1575130233940, 18, 19.222122) (1575130234940, 18, 14.931420) (1575130235940, 17, 19.549454) (1575130236940, 22, 16.908388) (1575130237940, 32, 15.637796) (1575130238940, 31, 15.517650) (1575130239940, 18, 14.038033) (1575130240940, 32, 19.859648) (1575130241940, 16, 13.220840) (1575130242940, 28, 16.445398) (1575130243940, 26, 16.695753) (1575130244940, 33, 13.696928) (1575130245940, 21, 15.352819) (1575130246940, 15, 12.388407) (1575130247940, 27, 11.267529) (1575130248940, 20, 14.103228) (1575130249940, 20, 16.250950) (1575130250940, 30, 16.236088) (1575130251940, 22, 18.305340) (1575130252940, 25, 17.360685) (1575130253940, 25, 14.978681) (1575130254940, 33, 14.096183) (1575130255940, 26, 10.019039) (1575130256940, 19, 19.158213) (1575130257940, 22, 15.593924) (1575130258940, 26, 18.780119) (1575130259940, 21, 16.001656) (1575130260940, 16, 18.458328) (1575130261940, 21, 16.417843) (1575130262940, 28, 11.736558) (1575130263940, 34, 18.143946) (1575130264940, 27, 10.303225) (1575130265940, 20, 19.756748) (1575130266940, 22, 12.940063) (1575130267940, 23, 11.509640) (1575130268940, 19, 18.319309) (1575130269940, 19, 16.278345) (1575130270940, 27, 10.898361) (1575130271940, 31, 13.922162) (1575130272940, 15, 19.296116) (1575130273940, 26, 15.885763) (1575130274940, 15, 15.525804) (1575130275940, 19, 19.579538) (1575130276940, 20, 11.073811) (1575130277940, 16, 13.932510) (1575130278940, 17, 11.900328) (1575130279940, 22, 16.540415) (1575130280940, 33, 15.203803) (1575130281940, 17, 11.518434) (1575130282940, 17, 13.152081) (1575130283940, 18, 11.378041) (1575130284940, 21, 15.390745) (1575130285940, 30, 15.127818) (1575130286940, 19, 16.530401) (1575130287940, 32, 16.542702) (1575130288940, 26, 16.366442) (1575130289940, 25, 10.306822) (1575130290940, 15, 13.691117) (1575130291940, 15, 13.476817) (1575130292940, 25, 12.529998) (1575130293940, 22, 15.550021) (1575130294940, 20, 15.064971) (1575130295940, 24, 13.313683) (1575130296940, 23, 17.002878) (1575130297940, 30, 19.991594) (1575130298940, 15, 11.116746) (1575130299930, 16, 19.405090) (1575130300930, 22, 14.377142) (1575130301930, 16, 16.868231) (1575130302930, 20, 11.565193) (1575130303930, 31, 13.009119) (1575130304930, 29, 18.136400) (1575130305930, 17, 13.806572) (1575130306930, 23, 14.688898) (1575130307930, 26, 12.931019) (1575130308930, 32, 12.185531) (1575130309930, 30, 13.608714) (1575130310930, 23, 18.624914) (1575130311930, 22, 12.970826) (1575130312930, 22, 12.065827) (1575130313930, 25, 16.967192) (1575130314930, 16, 10.283031) (1575130315930, 22, 16.072535) (1575130316930, 24, 10.794536) (1575130317930, 32, 10.591207) (1575130318930, 20, 13.015227) (1575130319930, 28, 15.410999) (1575130320930, 29, 12.785076) (1575130321930, 28, 15.305857) (1575130322930, 33, 12.820810) (1575130323930, 34, 13.618055) (1575130324930, 32, 12.971123) (1575130325930, 24, 10.974546) (1575130326930, 15, 10.742910) (1575130327930, 23, 16.810783) (1575130328930, 18, 13.115224) (1575130329930, 26, 17.418489) (1575130330930, 20, 17.302315) (1575130331930, 21, 14.283571) (1575130332930, 16, 16.826534) (1575130333930, 18, 19.222122) (1575130334930, 18, 14.931420) (1575130335930, 17, 19.549454) (1575130336930, 22, 16.908388) (1575130337930, 32, 15.637796) (1575130338930, 31, 15.517650) (1575130339930, 18, 14.038033) (1575130340930, 32, 19.859648) (1575130341930, 16, 13.220840) (1575130342930, 28, 16.445398) (1575130343930, 26, 16.695753) (1575130344930, 33, 13.696928) (1575130345930, 21, 15.352819) (1575130346930, 15, 12.388407) (1575130347930, 27, 11.267529) (1575130348930, 20, 14.103228) (1575130349930, 20, 16.250950) (1575130350930, 30, 16.236088) (1575130351930, 22, 18.305340) (1575130352930, 25, 17.360685) (1575130353930, 25, 14.978681) (1575130354930, 33, 14.096183) (1575130355930, 26, 10.019039) (1575130356930, 19, 19.158213) (1575130357930, 22, 15.593924) (1575130358930, 26, 18.780119) (1575130359930, 21, 16.001656) (1575130360930, 16, 18.458328) (1575130361930, 21, 16.417843) (1575130362930, 28, 11.736558) (1575130363930, 34, 18.143946) (1575130364930, 27, 10.303225) (1575130365930, 20, 19.756748) (1575130366930, 22, 12.940063) (1575130367930, 23, 11.509640) (1575130368930, 19, 18.319309) (1575130369930, 19, 16.278345) (1575130370930, 27, 10.898361) (1575130371930, 31, 13.922162) (1575130372930, 15, 19.296116) (1575130373930, 26, 15.885763) (1575130374930, 15, 15.525804) (1575130375930, 19, 19.579538) (1575130376930, 20, 11.073811) (1575130377930, 16, 13.932510) (1575130378930, 17, 11.900328) (1575130379930, 22, 16.540415) (1575130380930, 33, 15.203803) (1575130381930, 17, 11.518434) (1575130382930, 17, 13.152081) (1575130383930, 18, 11.378041) (1575130384930, 21, 15.390745) (1575130385930, 30, 15.127818) (1575130386930, 19, 16.530401) (1575130387930, 32, 16.542702) (1575130388930, 26, 16.366442) (1575130389930, 25, 10.306822) (1575130390930, 15, 13.691117) (1575130391930, 15, 13.476817) (1575130392930, 25, 12.529998) (1575130393930, 22, 15.550021) (1575130394930, 20, 15.064971) (1575130395930, 24, 13.313683) (1575130396930, 23, 17.002878) (1575130397930, 30, 19.991594) (1575130398930, 15, 11.116746) (1575130399920, 16, 19.405090) (1575130400920, 22, 14.377142) (1575130401920, 16, 16.868231) (1575130402920, 20, 11.565193) (1575130403920, 31, 13.009119) (1575130404920, 29, 18.136400) (1575130405920, 17, 13.806572) (1575130406920, 23, 14.688898) (1575130407920, 26, 12.931019) (1575130408920, 32, 12.185531) (1575130409920, 30, 13.608714) (1575130410920, 23, 18.624914) (1575130411920, 22, 12.970826) (1575130412920, 22, 12.065827) (1575130413920, 25, 16.967192) (1575130414920, 16, 10.283031) (1575130415920, 22, 16.072535) (1575130416920, 24, 10.794536) (1575130417920, 32, 10.591207) (1575130418920, 20, 13.015227) (1575130419920, 28, 15.410999) (1575130420920, 29, 12.785076) (1575130421920, 28, 15.305857) (1575130422920, 33, 12.820810) (1575130423920, 34, 13.618055) (1575130424920, 32, 12.971123) (1575130425920, 24, 10.974546) (1575130426920, 15, 10.742910) (1575130427920, 23, 16.810783) (1575130428920, 18, 13.115224) (1575130429920, 26, 17.418489) (1575130430920, 20, 17.302315) (1575130431920, 21, 14.283571) (1575130432920, 16, 16.826534) (1575130433920, 18, 19.222122) (1575130434920, 18, 14.931420) (1575130435920, 17, 19.549454) (1575130436920, 22, 16.908388) (1575130437920, 32, 15.637796) (1575130438920, 31, 15.517650) (1575130439920, 18, 14.038033) (1575130440920, 32, 19.859648) (1575130441920, 16, 13.220840) (1575130442920, 28, 16.445398) (1575130443920, 26, 16.695753) (1575130444920, 33, 13.696928) (1575130445920, 21, 15.352819) (1575130446920, 15, 12.388407) (1575130447920, 27, 11.267529) (1575130448920, 20, 14.103228) (1575130449920, 20, 16.250950) (1575130450920, 30, 16.236088) (1575130451920, 22, 18.305340) (1575130452920, 25, 17.360685) (1575130453920, 25, 14.978681) (1575130454920, 33, 14.096183) (1575130455920, 26, 10.019039) (1575130456920, 19, 19.158213) (1575130457920, 22, 15.593924) (1575130458920, 26, 18.780119) (1575130459920, 21, 16.001656) (1575130460920, 16, 18.458328) (1575130461920, 21, 16.417843) (1575130462920, 28, 11.736558) (1575130463920, 34, 18.143946) (1575130464920, 27, 10.303225) (1575130465920, 20, 19.756748) (1575130466920, 22, 12.940063) (1575130467920, 23, 11.509640) (1575130468920, 19, 18.319309) (1575130469920, 19, 16.278345) (1575130470920, 27, 10.898361) (1575130471920, 31, 13.922162) (1575130472920, 15, 19.296116) (1575130473920, 26, 15.885763) (1575130474920, 15, 15.525804) (1575130475920, 19, 19.579538) (1575130476920, 20, 11.073811) (1575130477920, 16, 13.932510) (1575130478920, 17, 11.900328) (1575130479920, 22, 16.540415) (1575130480920, 33, 15.203803) (1575130481920, 17, 11.518434) (1575130482920, 17, 13.152081) (1575130483920, 18, 11.378041) (1575130484920, 21, 15.390745) (1575130485920, 30, 15.127818) (1575130486920, 19, 16.530401) (1575130487920, 32, 16.542702) (1575130488920, 26, 16.366442) (1575130489920, 25, 10.306822) (1575130490920, 15, 13.691117) (1575130491920, 15, 13.476817) (1575130492920, 25, 12.529998) (1575130493920, 22, 15.550021) (1575130494920, 20, 15.064971) (1575130495920, 24, 13.313683) (1575130496920, 23, 17.002878) (1575130497920, 30, 19.991594) (1575130498920, 15, 11.116746) (1575130499910, 16, 19.405090) (1575130500910, 22, 14.377142) (1575130501910, 16, 16.868231) (1575130502910, 20, 11.565193) (1575130503910, 31, 13.009119) (1575130504910, 29, 18.136400) (1575130505910, 17, 13.806572) (1575130506910, 23, 14.688898) (1575130507910, 26, 12.931019) (1575130508910, 32, 12.185531) (1575130509910, 30, 13.608714) (1575130510910, 23, 18.624914) (1575130511910, 22, 12.970826) (1575130512910, 22, 12.065827) (1575130513910, 25, 16.967192) (1575130514910, 16, 10.283031) (1575130515910, 22, 16.072535) (1575130516910, 24, 10.794536) (1575130517910, 32, 10.591207) (1575130518910, 20, 13.015227) (1575130519910, 28, 15.410999) (1575130520910, 29, 12.785076) (1575130521910, 28, 15.305857) (1575130522910, 33, 12.820810) (1575130523910, 34, 13.618055) (1575130524910, 32, 12.971123) (1575130525910, 24, 10.974546) (1575130526910, 15, 10.742910) (1575130527910, 23, 16.810783) (1575130528910, 18, 13.115224) (1575130529910, 26, 17.418489) (1575130530910, 20, 17.302315) (1575130531910, 21, 14.283571) (1575130532910, 16, 16.826534) (1575130533910, 18, 19.222122) (1575130534910, 18, 14.931420) (1575130535910, 17, 19.549454) (1575130536910, 22, 16.908388) (1575130537910, 32, 15.637796) (1575130538910, 31, 15.517650) (1575130539910, 18, 14.038033) (1575130540910, 32, 19.859648) (1575130541910, 16, 13.220840) (1575130542910, 28, 16.445398) (1575130543910, 26, 16.695753) (1575130544910, 33, 13.696928) (1575130545910, 21, 15.352819) (1575130546910, 15, 12.388407) (1575130547910, 27, 11.267529) (1575130548910, 20, 14.103228) (1575130549910, 20, 16.250950) (1575130550910, 30, 16.236088) (1575130551910, 22, 18.305340) (1575130552910, 25, 17.360685) (1575130553910, 25, 14.978681) (1575130554910, 33, 14.096183) (1575130555910, 26, 10.019039) (1575130556910, 19, 19.158213) (1575130557910, 22, 15.593924) (1575130558910, 26, 18.780119) (1575130559910, 21, 16.001656) (1575130560910, 16, 18.458328) (1575130561910, 21, 16.417843) (1575130562910, 28, 11.736558) (1575130563910, 34, 18.143946) (1575130564910, 27, 10.303225) (1575130565910, 20, 19.756748) (1575130566910, 22, 12.940063) (1575130567910, 23, 11.509640) (1575130568910, 19, 18.319309) (1575130569910, 19, 16.278345) (1575130570910, 27, 10.898361) (1575130571910, 31, 13.922162) (1575130572910, 15, 19.296116) (1575130573910, 26, 15.885763) (1575130574910, 15, 15.525804) (1575130575910, 19, 19.579538) (1575130576910, 20, 11.073811) (1575130577910, 16, 13.932510) (1575130578910, 17, 11.900328) (1575130579910, 22, 16.540415) (1575130580910, 33, 15.203803) (1575130581910, 17, 11.518434) (1575130582910, 17, 13.152081) (1575130583910, 18, 11.378041) (1575130584910, 21, 15.390745) (1575130585910, 30, 15.127818) (1575130586910, 19, 16.530401) (1575130587910, 32, 16.542702) (1575130588910, 26, 16.366442) (1575130589910, 25, 10.306822) (1575130590910, 15, 13.691117) (1575130591910, 15, 13.476817) (1575130592910, 25, 12.529998) (1575130593910, 22, 15.550021) (1575130594910, 20, 15.064971) (1575130595910, 24, 13.313683) (1575130596910, 23, 17.002878) (1575130597910, 30, 19.991594) (1575130598910, 15, 11.116746) (1575130599900, 16, 19.405090) (1575130600900, 22, 14.377142) (1575130601900, 16, 16.868231) (1575130602900, 20, 11.565193) (1575130603900, 31, 13.009119) (1575130604900, 29, 18.136400) (1575130605900, 17, 13.806572) (1575130606900, 23, 14.688898) (1575130607900, 26, 12.931019) (1575130608900, 32, 12.185531) (1575130609900, 30, 13.608714) (1575130610900, 23, 18.624914) (1575130611900, 22, 12.970826) (1575130612900, 22, 12.065827) (1575130613900, 25, 16.967192) (1575130614900, 16, 10.283031) (1575130615900, 22, 16.072535) (1575130616900, 24, 10.794536) (1575130617900, 32, 10.591207) (1575130618900, 20, 13.015227) (1575130619900, 28, 15.410999) (1575130620900, 29, 12.785076) (1575130621900, 28, 15.305857) (1575130622900, 33, 12.820810) (1575130623900, 34, 13.618055) (1575130624900, 32, 12.971123) (1575130625900, 24, 10.974546) (1575130626900, 15, 10.742910) (1575130627900, 23, 16.810783) (1575130628900, 18, 13.115224) (1575130629900, 26, 17.418489) (1575130630900, 20, 17.302315) (1575130631900, 21, 14.283571) (1575130632900, 16, 16.826534) (1575130633900, 18, 19.222122) (1575130634900, 18, 14.931420) (1575130635900, 17, 19.549454) (1575130636900, 22, 16.908388) (1575130637900, 32, 15.637796) (1575130638900, 31, 15.517650) (1575130639900, 18, 14.038033) (1575130640900, 32, 19.859648) (1575130641900, 16, 13.220840) (1575130642900, 28, 16.445398) (1575130643900, 26, 16.695753) (1575130644900, 33, 13.696928) (1575130645900, 21, 15.352819) (1575130646900, 15, 12.388407) (1575130647900, 27, 11.267529) (1575130648900, 20, 14.103228) (1575130649900, 20, 16.250950) (1575130650900, 30, 16.236088) (1575130651900, 22, 18.305340) (1575130652900, 25, 17.360685) (1575130653900, 25, 14.978681) (1575130654900, 33, 14.096183) (1575130655900, 26, 10.019039) (1575130656900, 19, 19.158213) (1575130657900, 22, 15.593924) (1575130658900, 26, 18.780119) (1575130659900, 21, 16.001656) (1575130660900, 16, 18.458328) (1575130661900, 21, 16.417843) (1575130662900, 28, 11.736558) (1575130663900, 34, 18.143946) (1575130664900, 27, 10.303225) (1575130665900, 20, 19.756748) (1575130666900, 22, 12.940063) (1575130667900, 23, 11.509640) (1575130668900, 19, 18.319309) (1575130669900, 19, 16.278345) (1575130670900, 27, 10.898361) (1575130671900, 31, 13.922162) (1575130672900, 15, 19.296116) (1575130673900, 26, 15.885763) (1575130674900, 15, 15.525804) (1575130675900, 19, 19.579538) (1575130676900, 20, 11.073811) (1575130677900, 16, 13.932510) (1575130678900, 17, 11.900328) (1575130679900, 22, 16.540415) (1575130680900, 33, 15.203803) (1575130681900, 17, 11.518434) (1575130682900, 17, 13.152081) (1575130683900, 18, 11.378041) (1575130684900, 21, 15.390745) (1575130685900, 30, 15.127818) (1575130686900, 19, 16.530401) (1575130687900, 32, 16.542702) (1575130688900, 26, 16.366442) (1575130689900, 25, 10.306822) (1575130690900, 15, 13.691117) (1575130691900, 15, 13.476817) (1575130692900, 25, 12.529998) (1575130693900, 22, 15.550021) (1575130694900, 20, 15.064971) (1575130695900, 24, 13.313683) (1575130696900, 23, 17.002878) (1575130697900, 30, 19.991594) (1575130698900, 15, 11.116746) (1575130699890, 16, 19.405090) (1575130700890, 22, 14.377142) (1575130701890, 16, 16.868231) (1575130702890, 20, 11.565193) (1575130703890, 31, 13.009119) (1575130704890, 29, 18.136400) (1575130705890, 17, 13.806572) (1575130706890, 23, 14.688898) (1575130707890, 26, 12.931019) (1575130708890, 32, 12.185531) (1575130709890, 30, 13.608714) (1575130710890, 23, 18.624914) (1575130711890, 22, 12.970826) (1575130712890, 22, 12.065827) (1575130713890, 25, 16.967192) (1575130714890, 16, 10.283031) (1575130715890, 22, 16.072535) (1575130716890, 24, 10.794536) (1575130717890, 32, 10.591207) (1575130718890, 20, 13.015227) (1575130719890, 28, 15.410999) (1575130720890, 29, 12.785076) (1575130721890, 28, 15.305857) (1575130722890, 33, 12.820810) (1575130723890, 34, 13.618055) (1575130724890, 32, 12.971123) (1575130725890, 24, 10.974546) (1575130726890, 15, 10.742910) (1575130727890, 23, 16.810783) (1575130728890, 18, 13.115224) (1575130729890, 26, 17.418489) (1575130730890, 20, 17.302315) (1575130731890, 21, 14.283571) (1575130732890, 16, 16.826534) (1575130733890, 18, 19.222122) (1575130734890, 18, 14.931420) (1575130735890, 17, 19.549454) (1575130736890, 22, 16.908388) (1575130737890, 32, 15.637796) (1575130738890, 31, 15.517650) (1575130739890, 18, 14.038033) (1575130740890, 32, 19.859648) (1575130741890, 16, 13.220840) (1575130742890, 28, 16.445398) (1575130743890, 26, 16.695753) (1575130744890, 33, 13.696928) (1575130745890, 21, 15.352819) (1575130746890, 15, 12.388407) (1575130747890, 27, 11.267529) (1575130748890, 20, 14.103228) (1575130749890, 20, 16.250950) (1575130750890, 30, 16.236088) (1575130751890, 22, 18.305340) (1575130752890, 25, 17.360685) (1575130753890, 25, 14.978681) (1575130754890, 33, 14.096183) (1575130755890, 26, 10.019039) (1575130756890, 19, 19.158213) (1575130757890, 22, 15.593924) (1575130758890, 26, 18.780119) (1575130759890, 21, 16.001656) (1575130760890, 16, 18.458328) (1575130761890, 21, 16.417843) (1575130762890, 28, 11.736558) (1575130763890, 34, 18.143946) (1575130764890, 27, 10.303225) (1575130765890, 20, 19.756748) (1575130766890, 22, 12.940063) (1575130767890, 23, 11.509640) (1575130768890, 19, 18.319309) (1575130769890, 19, 16.278345) (1575130770890, 27, 10.898361) (1575130771890, 31, 13.922162) (1575130772890, 15, 19.296116) (1575130773890, 26, 15.885763) (1575130774890, 15, 15.525804) (1575130775890, 19, 19.579538) (1575130776890, 20, 11.073811) (1575130777890, 16, 13.932510) (1575130778890, 17, 11.900328) (1575130779890, 22, 16.540415) (1575130780890, 33, 15.203803) (1575130781890, 17, 11.518434) (1575130782890, 17, 13.152081) (1575130783890, 18, 11.378041) (1575130784890, 21, 15.390745) (1575130785890, 30, 15.127818) (1575130786890, 19, 16.530401) (1575130787890, 32, 16.542702) (1575130788890, 26, 16.366442) (1575130789890, 25, 10.306822) (1575130790890, 15, 13.691117) (1575130791890, 15, 13.476817) (1575130792890, 25, 12.529998) (1575130793890, 22, 15.550021) (1575130794890, 20, 15.064971) (1575130795890, 24, 13.313683) (1575130796890, 23, 17.002878) (1575130797890, 30, 19.991594) (1575130798890, 15, 11.116746) (1575130799880, 16, 19.405090) (1575130800880, 22, 14.377142) (1575130801880, 16, 16.868231) (1575130802880, 20, 11.565193) (1575130803880, 31, 13.009119) (1575130804880, 29, 18.136400) (1575130805880, 17, 13.806572) (1575130806880, 23, 14.688898) (1575130807880, 26, 12.931019) (1575130808880, 32, 12.185531) (1575130809880, 30, 13.608714) (1575130810880, 23, 18.624914) (1575130811880, 22, 12.970826) (1575130812880, 22, 12.065827) (1575130813880, 25, 16.967192) (1575130814880, 16, 10.283031) (1575130815880, 22, 16.072535) (1575130816880, 24, 10.794536) (1575130817880, 32, 10.591207) (1575130818880, 20, 13.015227) (1575130819880, 28, 15.410999) (1575130820880, 29, 12.785076) (1575130821880, 28, 15.305857) (1575130822880, 33, 12.820810) (1575130823880, 34, 13.618055) (1575130824880, 32, 12.971123) (1575130825880, 24, 10.974546) (1575130826880, 15, 10.742910) (1575130827880, 23, 16.810783) (1575130828880, 18, 13.115224) (1575130829880, 26, 17.418489) (1575130830880, 20, 17.302315) (1575130831880, 21, 14.283571) (1575130832880, 16, 16.826534) (1575130833880, 18, 19.222122) (1575130834880, 18, 14.931420) (1575130835880, 17, 19.549454) (1575130836880, 22, 16.908388) (1575130837880, 32, 15.637796) (1575130838880, 31, 15.517650) (1575130839880, 18, 14.038033) (1575130840880, 32, 19.859648) (1575130841880, 16, 13.220840) (1575130842880, 28, 16.445398) (1575130843880, 26, 16.695753) (1575130844880, 33, 13.696928) (1575130845880, 21, 15.352819) (1575130846880, 15, 12.388407) (1575130847880, 27, 11.267529) (1575130848880, 20, 14.103228) (1575130849880, 20, 16.250950) (1575130850880, 30, 16.236088) (1575130851880, 22, 18.305340) (1575130852880, 25, 17.360685) (1575130853880, 25, 14.978681) (1575130854880, 33, 14.096183) (1575130855880, 26, 10.019039) (1575130856880, 19, 19.158213) (1575130857880, 22, 15.593924) (1575130858880, 26, 18.780119) (1575130859880, 21, 16.001656) (1575130860880, 16, 18.458328) (1575130861880, 21, 16.417843) (1575130862880, 28, 11.736558) (1575130863880, 34, 18.143946) (1575130864880, 27, 10.303225) (1575130865880, 20, 19.756748) (1575130866880, 22, 12.940063) (1575130867880, 23, 11.509640) (1575130868880, 19, 18.319309) (1575130869880, 19, 16.278345) (1575130870880, 27, 10.898361) (1575130871880, 31, 13.922162) (1575130872880, 15, 19.296116) (1575130873880, 26, 15.885763) (1575130874880, 15, 15.525804) (1575130875880, 19, 19.579538) (1575130876880, 20, 11.073811) (1575130877880, 16, 13.932510) (1575130878880, 17, 11.900328) (1575130879880, 22, 16.540415) (1575130880880, 33, 15.203803) (1575130881880, 17, 11.518434) (1575130882880, 17, 13.152081) (1575130883880, 18, 11.378041) (1575130884880, 21, 15.390745) (1575130885880, 30, 15.127818) (1575130886880, 19, 16.530401) (1575130887880, 32, 16.542702) (1575130888880, 26, 16.366442) (1575130889880, 25, 10.306822) (1575130890880, 15, 13.691117) (1575130891880, 15, 13.476817) (1575130892880, 25, 12.529998) (1575130893880, 22, 15.550021) (1575130894880, 20, 15.064971) (1575130895880, 24, 13.313683) (1575130896880, 23, 17.002878) (1575130897880, 30, 19.991594) (1575130898880, 15, 11.116746) (1575130899870, 16, 19.405090) (1575130900870, 22, 14.377142) (1575130901870, 16, 16.868231) (1575130902870, 20, 11.565193) (1575130903870, 31, 13.009119) (1575130904870, 29, 18.136400) (1575130905870, 17, 13.806572) (1575130906870, 23, 14.688898) (1575130907870, 26, 12.931019) (1575130908870, 32, 12.185531) (1575130909870, 30, 13.608714) (1575130910870, 23, 18.624914) (1575130911870, 22, 12.970826) (1575130912870, 22, 12.065827) (1575130913870, 25, 16.967192) (1575130914870, 16, 10.283031) (1575130915870, 22, 16.072535) (1575130916870, 24, 10.794536) (1575130917870, 32, 10.591207) (1575130918870, 20, 13.015227) (1575130919870, 28, 15.410999) (1575130920870, 29, 12.785076) (1575130921870, 28, 15.305857) (1575130922870, 33, 12.820810) (1575130923870, 34, 13.618055) (1575130924870, 32, 12.971123) (1575130925870, 24, 10.974546) (1575130926870, 15, 10.742910) (1575130927870, 23, 16.810783) (1575130928870, 18, 13.115224) (1575130929870, 26, 17.418489) (1575130930870, 20, 17.302315) (1575130931870, 21, 14.283571) (1575130932870, 16, 16.826534) (1575130933870, 18, 19.222122) (1575130934870, 18, 14.931420) (1575130935870, 17, 19.549454) (1575130936870, 22, 16.908388) (1575130937870, 32, 15.637796) (1575130938870, 31, 15.517650) (1575130939870, 18, 14.038033) (1575130940870, 32, 19.859648) (1575130941870, 16, 13.220840) (1575130942870, 28, 16.445398) (1575130943870, 26, 16.695753) (1575130944870, 33, 13.696928) (1575130945870, 21, 15.352819) (1575130946870, 15, 12.388407) (1575130947870, 27, 11.267529) (1575130948870, 20, 14.103228) (1575130949870, 20, 16.250950) (1575130950870, 30, 16.236088) (1575130951870, 22, 18.305340) (1575130952870, 25, 17.360685) (1575130953870, 25, 14.978681) (1575130954870, 33, 14.096183) (1575130955870, 26, 10.019039) (1575130956870, 19, 19.158213) (1575130957870, 22, 15.593924) (1575130958870, 26, 18.780119) (1575130959870, 21, 16.001656) (1575130960870, 16, 18.458328) (1575130961870, 21, 16.417843) (1575130962870, 28, 11.736558) (1575130963870, 34, 18.143946) (1575130964870, 27, 10.303225) (1575130965870, 20, 19.756748) (1575130966870, 22, 12.940063) (1575130967870, 23, 11.509640) (1575130968870, 19, 18.319309) (1575130969870, 19, 16.278345) (1575130970870, 27, 10.898361) (1575130971870, 31, 13.922162) (1575130972870, 15, 19.296116) (1575130973870, 26, 15.885763) (1575130974870, 15, 15.525804) (1575130975870, 19, 19.579538) (1575130976870, 20, 11.073811) (1575130977870, 16, 13.932510) (1575130978870, 17, 11.900328) (1575130979870, 22, 16.540415) (1575130980870, 33, 15.203803) (1575130981870, 17, 11.518434) (1575130982870, 17, 13.152081) (1575130983870, 18, 11.378041) (1575130984870, 21, 15.390745) (1575130985870, 30, 15.127818) (1575130986870, 19, 16.530401) (1575130987870, 32, 16.542702) (1575130988870, 26, 16.366442) (1575130989870, 25, 10.306822) (1575130990870, 15, 13.691117) (1575130991870, 15, 13.476817) (1575130992870, 25, 12.529998) (1575130993870, 22, 15.550021) (1575130994870, 20, 15.064971) (1575130995870, 24, 13.313683) (1575130996870, 23, 17.002878) (1575130997870, 30, 19.991594) (1575130998870, 15, 11.116746) (1575130999860, 16, 19.405090) (1575131000860, 22, 14.377142) (1575131001860, 16, 16.868231) (1575131002860, 20, 11.565193) (1575131003860, 31, 13.009119) (1575131004860, 29, 18.136400) (1575131005860, 17, 13.806572) (1575131006860, 23, 14.688898) (1575131007860, 26, 12.931019) (1575131008860, 32, 12.185531) (1575131009860, 30, 13.608714) (1575131010860, 23, 18.624914) (1575131011860, 22, 12.970826) (1575131012860, 22, 12.065827) (1575131013860, 25, 16.967192) (1575131014860, 16, 10.283031) (1575131015860, 22, 16.072535) (1575131016860, 24, 10.794536) (1575131017860, 32, 10.591207) (1575131018860, 20, 13.015227) (1575131019860, 28, 15.410999) (1575131020860, 29, 12.785076) (1575131021860, 28, 15.305857) (1575131022860, 33, 12.820810) (1575131023860, 34, 13.618055) (1575131024860, 32, 12.971123) (1575131025860, 24, 10.974546) (1575131026860, 15, 10.742910) (1575131027860, 23, 16.810783) (1575131028860, 18, 13.115224) (1575131029860, 26, 17.418489) (1575131030860, 20, 17.302315) (1575131031860, 21, 14.283571) (1575131032860, 16, 16.826534) (1575131033860, 18, 19.222122) (1575131034860, 18, 14.931420) (1575131035860, 17, 19.549454) (1575131036860, 22, 16.908388) (1575131037860, 32, 15.637796) (1575131038860, 31, 15.517650) (1575131039860, 18, 14.038033) (1575131040860, 32, 19.859648) (1575131041860, 16, 13.220840) (1575131042860, 28, 16.445398) (1575131043860, 26, 16.695753) (1575131044860, 33, 13.696928) (1575131045860, 21, 15.352819) (1575131046860, 15, 12.388407) (1575131047860, 27, 11.267529) (1575131048860, 20, 14.103228) (1575131049860, 20, 16.250950) (1575131050860, 30, 16.236088) (1575131051860, 22, 18.305340) (1575131052860, 25, 17.360685) (1575131053860, 25, 14.978681) (1575131054860, 33, 14.096183) (1575131055860, 26, 10.019039) (1575131056860, 19, 19.158213) (1575131057860, 22, 15.593924) (1575131058860, 26, 18.780119) (1575131059860, 21, 16.001656) (1575131060860, 16, 18.458328) (1575131061860, 21, 16.417843) (1575131062860, 28, 11.736558) (1575131063860, 34, 18.143946) (1575131064860, 27, 10.303225) (1575131065860, 20, 19.756748) (1575131066860, 22, 12.940063) (1575131067860, 23, 11.509640) (1575131068860, 19, 18.319309) (1575131069860, 19, 16.278345) (1575131070860, 27, 10.898361) (1575131071860, 31, 13.922162) (1575131072860, 15, 19.296116) (1575131073860, 26, 15.885763) (1575131074860, 15, 15.525804) (1575131075860, 19, 19.579538) (1575131076860, 20, 11.073811) (1575131077860, 16, 13.932510) (1575131078860, 17, 11.900328) (1575131079860, 22, 16.540415) (1575131080860, 33, 15.203803) (1575131081860, 17, 11.518434) (1575131082860, 17, 13.152081) (1575131083860, 18, 11.378041) (1575131084860, 21, 15.390745) (1575131085860, 30, 15.127818) (1575131086860, 19, 16.530401) (1575131087860, 32, 16.542702) (1575131088860, 26, 16.366442) (1575131089860, 25, 10.306822) (1575131090860, 15, 13.691117) (1575131091860, 15, 13.476817) (1575131092860, 25, 12.529998) (1575131093860, 22, 15.550021) (1575131094860, 20, 15.064971) (1575131095860, 24, 13.313683) (1575131096860, 23, 17.002878) (1575131097860, 30, 19.991594) (1575131098860, 15, 11.116746) (1575131099850, 16, 19.405090) (1575131100850, 22, 14.377142) (1575131101850, 16, 16.868231) (1575131102850, 20, 11.565193) (1575131103850, 31, 13.009119) (1575131104850, 29, 18.136400) (1575131105850, 17, 13.806572) (1575131106850, 23, 14.688898) (1575131107850, 26, 12.931019) (1575131108850, 32, 12.185531) (1575131109850, 30, 13.608714) (1575131110850, 23, 18.624914) (1575131111850, 22, 12.970826) (1575131112850, 22, 12.065827) (1575131113850, 25, 16.967192) (1575131114850, 16, 10.283031) (1575131115850, 22, 16.072535) (1575131116850, 24, 10.794536) (1575131117850, 32, 10.591207) (1575131118850, 20, 13.015227) (1575131119850, 28, 15.410999) (1575131120850, 29, 12.785076) (1575131121850, 28, 15.305857) (1575131122850, 33, 12.820810) (1575131123850, 34, 13.618055) (1575131124850, 32, 12.971123) (1575131125850, 24, 10.974546) (1575131126850, 15, 10.742910) (1575131127850, 23, 16.810783) (1575131128850, 18, 13.115224) (1575131129850, 26, 17.418489) (1575131130850, 20, 17.302315) (1575131131850, 21, 14.283571) (1575131132850, 16, 16.826534) (1575131133850, 18, 19.222122) (1575131134850, 18, 14.931420) (1575131135850, 17, 19.549454) (1575131136850, 22, 16.908388) (1575131137850, 32, 15.637796) (1575131138850, 31, 15.517650) (1575131139850, 18, 14.038033) (1575131140850, 32, 19.859648) (1575131141850, 16, 13.220840) (1575131142850, 28, 16.445398) (1575131143850, 26, 16.695753) (1575131144850, 33, 13.696928) (1575131145850, 21, 15.352819) (1575131146850, 15, 12.388407) (1575131147850, 27, 11.267529) (1575131148850, 20, 14.103228) (1575131149850, 20, 16.250950) (1575131150850, 30, 16.236088) (1575131151850, 22, 18.305340) (1575131152850, 25, 17.360685) (1575131153850, 25, 14.978681) (1575131154850, 33, 14.096183) (1575131155850, 26, 10.019039) (1575131156850, 19, 19.158213) (1575131157850, 22, 15.593924) (1575131158850, 26, 18.780119) (1575131159850, 21, 16.001656) (1575131160850, 16, 18.458328) (1575131161850, 21, 16.417843) (1575131162850, 28, 11.736558) (1575131163850, 34, 18.143946) (1575131164850, 27, 10.303225) (1575131165850, 20, 19.756748) (1575131166850, 22, 12.940063) (1575131167850, 23, 11.509640) (1575131168850, 19, 18.319309) (1575131169850, 19, 16.278345) (1575131170850, 27, 10.898361) (1575131171850, 31, 13.922162) (1575131172850, 15, 19.296116) (1575131173850, 26, 15.885763) (1575131174850, 15, 15.525804) (1575131175850, 19, 19.579538) (1575131176850, 20, 11.073811) (1575131177850, 16, 13.932510) (1575131178850, 17, 11.900328) (1575131179850, 22, 16.540415) (1575131180850, 33, 15.203803) (1575131181850, 17, 11.518434) (1575131182850, 17, 13.152081) (1575131183850, 18, 11.378041) (1575131184850, 21, 15.390745) (1575131185850, 30, 15.127818) (1575131186850, 19, 16.530401) (1575131187850, 32, 16.542702) (1575131188850, 26, 16.366442) (1575131189850, 25, 10.306822) (1575131190850, 15, 13.691117) (1575131191850, 15, 13.476817) (1575131192850, 25, 12.529998) (1575131193850, 22, 15.550021) (1575131194850, 20, 15.064971) (1575131195850, 24, 13.313683) (1575131196850, 23, 17.002878) (1575131197850, 30, 19.991594) (1575131198850, 15, 11.116746) (1575131199840, 16, 19.405090) (1575131200840, 22, 14.377142) (1575131201840, 16, 16.868231) (1575131202840, 20, 11.565193) (1575131203840, 31, 13.009119) (1575131204840, 29, 18.136400) (1575131205840, 17, 13.806572) (1575131206840, 23, 14.688898) (1575131207840, 26, 12.931019) (1575131208840, 32, 12.185531) (1575131209840, 30, 13.608714) (1575131210840, 23, 18.624914) (1575131211840, 22, 12.970826) (1575131212840, 22, 12.065827) (1575131213840, 25, 16.967192) (1575131214840, 16, 10.283031) (1575131215840, 22, 16.072535) (1575131216840, 24, 10.794536) (1575131217840, 32, 10.591207) (1575131218840, 20, 13.015227) (1575131219840, 28, 15.410999) (1575131220840, 29, 12.785076) (1575131221840, 28, 15.305857) (1575131222840, 33, 12.820810) (1575131223840, 34, 13.618055) (1575131224840, 32, 12.971123) (1575131225840, 24, 10.974546) (1575131226840, 15, 10.742910) (1575131227840, 23, 16.810783) (1575131228840, 18, 13.115224) (1575131229840, 26, 17.418489) (1575131230840, 20, 17.302315) (1575131231840, 21, 14.283571) (1575131232840, 16, 16.826534) (1575131233840, 18, 19.222122) (1575131234840, 18, 14.931420) (1575131235840, 17, 19.549454) (1575131236840, 22, 16.908388) (1575131237840, 32, 15.637796) (1575131238840, 31, 15.517650) (1575131239840, 18, 14.038033) (1575131240840, 32, 19.859648) (1575131241840, 16, 13.220840) (1575131242840, 28, 16.445398) (1575131243840, 26, 16.695753) (1575131244840, 33, 13.696928) (1575131245840, 21, 15.352819) (1575131246840, 15, 12.388407) (1575131247840, 27, 11.267529) (1575131248840, 20, 14.103228) (1575131249840, 20, 16.250950) (1575131250840, 30, 16.236088) (1575131251840, 22, 18.305340) (1575131252840, 25, 17.360685) (1575131253840, 25, 14.978681) (1575131254840, 33, 14.096183) (1575131255840, 26, 10.019039) (1575131256840, 19, 19.158213) (1575131257840, 22, 15.593924) (1575131258840, 26, 18.780119) (1575131259840, 21, 16.001656) (1575131260840, 16, 18.458328) (1575131261840, 21, 16.417843) (1575131262840, 28, 11.736558) (1575131263840, 34, 18.143946) (1575131264840, 27, 10.303225) (1575131265840, 20, 19.756748) (1575131266840, 22, 12.940063) (1575131267840, 23, 11.509640) (1575131268840, 19, 18.319309) (1575131269840, 19, 16.278345) (1575131270840, 27, 10.898361) (1575131271840, 31, 13.922162) (1575131272840, 15, 19.296116) (1575131273840, 26, 15.885763) (1575131274840, 15, 15.525804) (1575131275840, 19, 19.579538) (1575131276840, 20, 11.073811) (1575131277840, 16, 13.932510) (1575131278840, 17, 11.900328) (1575131279840, 22, 16.540415) (1575131280840, 33, 15.203803) (1575131281840, 17, 11.518434) (1575131282840, 17, 13.152081) (1575131283840, 18, 11.378041) (1575131284840, 21, 15.390745) (1575131285840, 30, 15.127818) (1575131286840, 19, 16.530401) (1575131287840, 32, 16.542702) (1575131288840, 26, 16.366442) (1575131289840, 25, 10.306822) (1575131290840, 15, 13.691117) (1575131291840, 15, 13.476817) (1575131292840, 25, 12.529998) (1575131293840, 22, 15.550021) (1575131294840, 20, 15.064971) (1575131295840, 24, 13.313683) (1575131296840, 23, 17.002878) (1575131297840, 30, 19.991594) (1575131298840, 15, 11.116746) (1575131299830, 16, 19.405090) (1575131300830, 22, 14.377142) (1575131301830, 16, 16.868231) (1575131302830, 20, 11.565193) (1575131303830, 31, 13.009119) (1575131304830, 29, 18.136400) (1575131305830, 17, 13.806572) (1575131306830, 23, 14.688898) (1575131307830, 26, 12.931019) (1575131308830, 32, 12.185531) (1575131309830, 30, 13.608714) (1575131310830, 23, 18.624914) (1575131311830, 22, 12.970826) (1575131312830, 22, 12.065827) (1575131313830, 25, 16.967192) (1575131314830, 16, 10.283031) (1575131315830, 22, 16.072535) (1575131316830, 24, 10.794536) (1575131317830, 32, 10.591207) (1575131318830, 20, 13.015227) (1575131319830, 28, 15.410999) (1575131320830, 29, 12.785076) (1575131321830, 28, 15.305857) (1575131322830, 33, 12.820810) (1575131323830, 34, 13.618055) (1575131324830, 32, 12.971123) (1575131325830, 24, 10.974546) (1575131326830, 15, 10.742910) (1575131327830, 23, 16.810783) (1575131328830, 18, 13.115224) (1575131329830, 26, 17.418489) (1575131330830, 20, 17.302315) (1575131331830, 21, 14.283571) (1575131332830, 16, 16.826534) (1575131333830, 18, 19.222122) (1575131334830, 18, 14.931420) (1575131335830, 17, 19.549454) (1575131336830, 22, 16.908388) (1575131337830, 32, 15.637796) (1575131338830, 31, 15.517650) (1575131339830, 18, 14.038033) (1575131340830, 32, 19.859648) (1575131341830, 16, 13.220840) (1575131342830, 28, 16.445398) (1575131343830, 26, 16.695753) (1575131344830, 33, 13.696928) (1575131345830, 21, 15.352819) (1575131346830, 15, 12.388407) (1575131347830, 27, 11.267529) (1575131348830, 20, 14.103228) (1575131349830, 20, 16.250950) (1575131350830, 30, 16.236088) (1575131351830, 22, 18.305340) (1575131352830, 25, 17.360685) (1575131353830, 25, 14.978681) (1575131354830, 33, 14.096183) (1575131355830, 26, 10.019039) (1575131356830, 19, 19.158213) (1575131357830, 22, 15.593924) (1575131358830, 26, 18.780119) (1575131359830, 21, 16.001656) (1575131360830, 16, 18.458328) (1575131361830, 21, 16.417843) (1575131362830, 28, 11.736558) (1575131363830, 34, 18.143946) (1575131364830, 27, 10.303225) (1575131365830, 20, 19.756748) (1575131366830, 22, 12.940063) (1575131367830, 23, 11.509640) (1575131368830, 19, 18.319309) (1575131369830, 19, 16.278345) (1575131370830, 27, 10.898361) (1575131371830, 31, 13.922162) (1575131372830, 15, 19.296116) (1575131373830, 26, 15.885763) (1575131374830, 15, 15.525804) (1575131375830, 19, 19.579538) (1575131376830, 20, 11.073811) (1575131377830, 16, 13.932510) (1575131378830, 17, 11.900328) (1575131379830, 22, 16.540415) (1575131380830, 33, 15.203803) (1575131381830, 17, 11.518434) (1575131382830, 17, 13.152081) (1575131383830, 18, 11.378041) (1575131384830, 21, 15.390745) (1575131385830, 30, 15.127818) (1575131386830, 19, 16.530401) (1575131387830, 32, 16.542702) (1575131388830, 26, 16.366442) (1575131389830, 25, 10.306822) (1575131390830, 15, 13.691117) (1575131391830, 15, 13.476817) (1575131392830, 25, 12.529998) (1575131393830, 22, 15.550021) (1575131394830, 20, 15.064971) (1575131395830, 24, 13.313683) (1575131396830, 23, 17.002878) (1575131397830, 30, 19.991594) (1575131398830, 15, 11.116746) (1575131399820, 16, 19.405090) (1575131400820, 22, 14.377142) (1575131401820, 16, 16.868231) (1575131402820, 20, 11.565193) (1575131403820, 31, 13.009119) (1575131404820, 29, 18.136400) (1575131405820, 17, 13.806572) (1575131406820, 23, 14.688898) (1575131407820, 26, 12.931019) (1575131408820, 32, 12.185531) (1575131409820, 30, 13.608714) (1575131410820, 23, 18.624914) (1575131411820, 22, 12.970826) (1575131412820, 22, 12.065827) (1575131413820, 25, 16.967192) (1575131414820, 16, 10.283031) (1575131415820, 22, 16.072535) (1575131416820, 24, 10.794536) (1575131417820, 32, 10.591207) (1575131418820, 20, 13.015227) (1575131419820, 28, 15.410999) (1575131420820, 29, 12.785076) (1575131421820, 28, 15.305857) (1575131422820, 33, 12.820810) (1575131423820, 34, 13.618055) (1575131424820, 32, 12.971123) (1575131425820, 24, 10.974546) (1575131426820, 15, 10.742910) (1575131427820, 23, 16.810783) (1575131428820, 18, 13.115224) (1575131429820, 26, 17.418489) (1575131430820, 20, 17.302315) (1575131431820, 21, 14.283571) (1575131432820, 16, 16.826534) (1575131433820, 18, 19.222122) (1575131434820, 18, 14.931420) (1575131435820, 17, 19.549454) (1575131436820, 22, 16.908388) (1575131437820, 32, 15.637796) (1575131438820, 31, 15.517650) (1575131439820, 18, 14.038033) (1575131440820, 32, 19.859648) (1575131441820, 16, 13.220840) (1575131442820, 28, 16.445398) (1575131443820, 26, 16.695753) (1575131444820, 33, 13.696928) (1575131445820, 21, 15.352819) (1575131446820, 15, 12.388407) (1575131447820, 27, 11.267529) (1575131448820, 20, 14.103228) (1575131449820, 20, 16.250950) (1575131450820, 30, 16.236088) (1575131451820, 22, 18.305340) (1575131452820, 25, 17.360685) (1575131453820, 25, 14.978681) (1575131454820, 33, 14.096183) (1575131455820, 26, 10.019039) (1575131456820, 19, 19.158213) (1575131457820, 22, 15.593924) (1575131458820, 26, 18.780119) (1575131459820, 21, 16.001656) (1575131460820, 16, 18.458328) (1575131461820, 21, 16.417843) (1575131462820, 28, 11.736558) (1575131463820, 34, 18.143946) (1575131464820, 27, 10.303225) (1575131465820, 20, 19.756748) (1575131466820, 22, 12.940063) (1575131467820, 23, 11.509640) (1575131468820, 19, 18.319309) (1575131469820, 19, 16.278345) (1575131470820, 27, 10.898361) (1575131471820, 31, 13.922162) (1575131472820, 15, 19.296116) (1575131473820, 26, 15.885763) (1575131474820, 15, 15.525804) (1575131475820, 19, 19.579538) (1575131476820, 20, 11.073811) (1575131477820, 16, 13.932510) (1575131478820, 17, 11.900328) (1575131479820, 22, 16.540415) (1575131480820, 33, 15.203803) (1575131481820, 17, 11.518434) (1575131482820, 17, 13.152081) (1575131483820, 18, 11.378041) (1575131484820, 21, 15.390745) (1575131485820, 30, 15.127818) (1575131486820, 19, 16.530401) (1575131487820, 32, 16.542702) (1575131488820, 26, 16.366442) (1575131489820, 25, 10.306822) (1575131490820, 15, 13.691117) (1575131491820, 15, 13.476817) (1575131492820, 25, 12.529998) (1575131493820, 22, 15.550021) (1575131494820, 20, 15.064971) (1575131495820, 24, 13.313683) (1575131496820, 23, 17.002878) (1575131497820, 30, 19.991594) (1575131498820, 15, 11.116746) (1575131499810, 16, 19.405090) (1575131500810, 22, 14.377142) (1575131501810, 16, 16.868231) (1575131502810, 20, 11.565193) (1575131503810, 31, 13.009119) (1575131504810, 29, 18.136400) (1575131505810, 17, 13.806572) (1575131506810, 23, 14.688898) (1575131507810, 26, 12.931019) (1575131508810, 32, 12.185531) (1575131509810, 30, 13.608714) (1575131510810, 23, 18.624914) (1575131511810, 22, 12.970826) (1575131512810, 22, 12.065827) (1575131513810, 25, 16.967192) (1575131514810, 16, 10.283031) (1575131515810, 22, 16.072535) (1575131516810, 24, 10.794536) (1575131517810, 32, 10.591207) (1575131518810, 20, 13.015227) (1575131519810, 28, 15.410999) (1575131520810, 29, 12.785076) (1575131521810, 28, 15.305857) (1575131522810, 33, 12.820810) (1575131523810, 34, 13.618055) (1575131524810, 32, 12.971123) (1575131525810, 24, 10.974546) (1575131526810, 15, 10.742910) (1575131527810, 23, 16.810783) (1575131528810, 18, 13.115224) (1575131529810, 26, 17.418489) (1575131530810, 20, 17.302315) (1575131531810, 21, 14.283571) (1575131532810, 16, 16.826534) (1575131533810, 18, 19.222122) (1575131534810, 18, 14.931420) (1575131535810, 17, 19.549454) (1575131536810, 22, 16.908388) (1575131537810, 32, 15.637796) (1575131538810, 31, 15.517650) (1575131539810, 18, 14.038033) (1575131540810, 32, 19.859648) (1575131541810, 16, 13.220840) (1575131542810, 28, 16.445398) (1575131543810, 26, 16.695753) (1575131544810, 33, 13.696928) (1575131545810, 21, 15.352819) (1575131546810, 15, 12.388407) (1575131547810, 27, 11.267529) (1575131548810, 20, 14.103228) (1575131549810, 20, 16.250950) (1575131550810, 30, 16.236088) (1575131551810, 22, 18.305340) (1575131552810, 25, 17.360685) (1575131553810, 25, 14.978681) (1575131554810, 33, 14.096183) (1575131555810, 26, 10.019039) (1575131556810, 19, 19.158213) (1575131557810, 22, 15.593924) (1575131558810, 26, 18.780119) (1575131559810, 21, 16.001656) (1575131560810, 16, 18.458328) (1575131561810, 21, 16.417843) (1575131562810, 28, 11.736558) (1575131563810, 34, 18.143946) (1575131564810, 27, 10.303225) (1575131565810, 20, 19.756748) (1575131566810, 22, 12.940063) (1575131567810, 23, 11.509640) (1575131568810, 19, 18.319309) (1575131569810, 19, 16.278345) (1575131570810, 27, 10.898361) (1575131571810, 31, 13.922162) (1575131572810, 15, 19.296116) (1575131573810, 26, 15.885763) (1575131574810, 15, 15.525804) (1575131575810, 19, 19.579538) (1575131576810, 20, 11.073811) (1575131577810, 16, 13.932510) (1575131578810, 17, 11.900328) (1575131579810, 22, 16.540415) (1575131580810, 33, 15.203803) (1575131581810, 17, 11.518434) (1575131582810, 17, 13.152081) (1575131583810, 18, 11.378041) (1575131584810, 21, 15.390745) (1575131585810, 30, 15.127818) (1575131586810, 19, 16.530401) (1575131587810, 32, 16.542702) (1575131588810, 26, 16.366442) (1575131589810, 25, 10.306822) (1575131590810, 15, 13.691117) (1575131591810, 15, 13.476817) (1575131592810, 25, 12.529998) (1575131593810, 22, 15.550021) (1575131594810, 20, 15.064971) (1575131595810, 24, 13.313683) (1575131596810, 23, 17.002878) (1575131597810, 30, 19.991594) (1575131598810, 15, 11.116746) (1575131599800, 16, 19.405090) (1575131600800, 22, 14.377142) (1575131601800, 16, 16.868231) (1575131602800, 20, 11.565193) (1575131603800, 31, 13.009119) (1575131604800, 29, 18.136400) (1575131605800, 17, 13.806572) (1575131606800, 23, 14.688898) (1575131607800, 26, 12.931019) (1575131608800, 32, 12.185531) (1575131609800, 30, 13.608714) (1575131610800, 23, 18.624914) (1575131611800, 22, 12.970826) (1575131612800, 22, 12.065827) (1575131613800, 25, 16.967192) (1575131614800, 16, 10.283031) (1575131615800, 22, 16.072535) (1575131616800, 24, 10.794536) (1575131617800, 32, 10.591207) (1575131618800, 20, 13.015227) (1575131619800, 28, 15.410999) (1575131620800, 29, 12.785076) (1575131621800, 28, 15.305857) (1575131622800, 33, 12.820810) (1575131623800, 34, 13.618055) (1575131624800, 32, 12.971123) (1575131625800, 24, 10.974546) (1575131626800, 15, 10.742910) (1575131627800, 23, 16.810783) (1575131628800, 18, 13.115224) (1575131629800, 26, 17.418489) (1575131630800, 20, 17.302315) (1575131631800, 21, 14.283571) (1575131632800, 16, 16.826534) (1575131633800, 18, 19.222122) (1575131634800, 18, 14.931420) (1575131635800, 17, 19.549454) (1575131636800, 22, 16.908388) (1575131637800, 32, 15.637796) (1575131638800, 31, 15.517650) (1575131639800, 18, 14.038033) (1575131640800, 32, 19.859648) (1575131641800, 16, 13.220840) (1575131642800, 28, 16.445398) (1575131643800, 26, 16.695753) (1575131644800, 33, 13.696928) (1575131645800, 21, 15.352819) (1575131646800, 15, 12.388407) (1575131647800, 27, 11.267529) (1575131648800, 20, 14.103228) (1575131649800, 20, 16.250950) (1575131650800, 30, 16.236088) (1575131651800, 22, 18.305340) (1575131652800, 25, 17.360685) (1575131653800, 25, 14.978681) (1575131654800, 33, 14.096183) (1575131655800, 26, 10.019039) (1575131656800, 19, 19.158213) (1575131657800, 22, 15.593924) (1575131658800, 26, 18.780119) (1575131659800, 21, 16.001656) (1575131660800, 16, 18.458328) (1575131661800, 21, 16.417843) (1575131662800, 28, 11.736558) (1575131663800, 34, 18.143946) (1575131664800, 27, 10.303225) (1575131665800, 20, 19.756748) (1575131666800, 22, 12.940063) (1575131667800, 23, 11.509640) (1575131668800, 19, 18.319309) (1575131669800, 19, 16.278345) (1575131670800, 27, 10.898361) (1575131671800, 31, 13.922162) (1575131672800, 15, 19.296116) (1575131673800, 26, 15.885763) (1575131674800, 15, 15.525804) (1575131675800, 19, 19.579538) (1575131676800, 20, 11.073811) (1575131677800, 16, 13.932510) (1575131678800, 17, 11.900328) (1575131679800, 22, 16.540415) (1575131680800, 33, 15.203803) (1575131681800, 17, 11.518434) (1575131682800, 17, 13.152081) (1575131683800, 18, 11.378041) (1575131684800, 21, 15.390745) (1575131685800, 30, 15.127818) (1575131686800, 19, 16.530401) (1575131687800, 32, 16.542702) (1575131688800, 26, 16.366442) (1575131689800, 25, 10.306822) (1575131690800, 15, 13.691117) (1575131691800, 15, 13.476817) (1575131692800, 25, 12.529998) (1575131693800, 22, 15.550021)") + + end_time = int(round(time.time() * 1000)) + tdLog.info("Execute time without compress: %dms" % (end_time - start_time)) + + simLogPath = tdDnodes.getSimLogPath() + grepCmd = "grep -a 'compress rpc msg, before:' -r %s | head -2" % simLogPath + output = subprocess.check_output(grepCmd, shell=True).decode("utf-8") + + if output != "": + tdLog.info("Find %s in log file." % output) + tdLog.exit("%s successfully executed! Compress works but NOT expected." % __file__) + else: + tdLog.success("%s failed! Compress does NOT works as expected." % __file__) + + conn.close() diff --git a/tests/pytest/util/dnodes-random-fail.py b/tests/pytest/util/dnodes-random-fail.py new file mode 100644 index 0000000000..db3a5fea93 --- /dev/null +++ b/tests/pytest/util/dnodes-random-fail.py @@ -0,0 +1,500 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import os.path +import subprocess +from util.log import * + + +class TDSimClient: + def __init__(self): + self.testCluster = False + + self.cfgDict = { + "numOfLogLines": "100000000", + "numOfThreadsPerCore": "2.0", + "locale": "en_US.UTF-8", + "charset": "UTF-8", + "asyncLog": "0", + "anyIp": "0", + "sdbDebugFlag": "135", + "rpcDebugFlag": "135", + "tmrDebugFlag": "131", + "cDebugFlag": "135", + "udebugFlag": "135", + "jnidebugFlag": "135", + "qdebugFlag": "135", + } + + def init(self, path): + self.__init__() + self.path = path + + def getLogDir(self): + self.logDir = "%s/sim/psim/log" % (self.path) + return self.logDir + + def getCfgDir(self): + self.cfgDir = "%s/sim/psim/cfg" % (self.path) + return self.cfgDir + + def setTestCluster(self, value): + self.testCluster = value + + def addExtraCfg(self, option, value): + self.cfgDict.update({option: value}) + + def cfg(self, option, value): + cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath) + if os.system(cmd) != 0: + tdLog.exit(cmd) + + def deploy(self): + self.logDir = "%s/sim/psim/log" % (self.path) + self.cfgDir = "%s/sim/psim/cfg" % (self.path) + self.cfgPath = "%s/sim/psim/cfg/taos.cfg" % (self.path) + + cmd = "rm -rf " + self.logDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "mkdir -p " + self.logDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "rm -rf " + self.cfgDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "mkdir -p " + self.cfgDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "touch " + self.cfgPath + if os.system(cmd) != 0: + tdLog.exit(cmd) + + if self.testCluster: + self.cfg("masterIp", "192.168.0.1") + self.cfg("secondIp", "192.168.0.2") + self.cfg("logDir", self.logDir) + + for key, value in self.cfgDict.items(): + self.cfg(key, value) + + tdLog.debug("psim is deployed and configured by %s" % (self.cfgPath)) + + +class TDDnode: + def __init__(self, index): + self.index = index + self.running = 0 + self.deployed = 0 + self.testCluster = False + self.valgrind = 0 + + def init(self, path): + self.path = path + + def setTestCluster(self, value): + self.testCluster = value + + def setValgrind(self, value): + self.valgrind = value + + def getDataSize(self): + totalSize = 0 + + if (self.deployed == 1): + for dirpath, dirnames, filenames in os.walk(self.dataDir): + for f in filenames: + fp = os.path.join(dirpath, f) + + if not os.path.islink(fp): + totalSize = totalSize + os.path.getsize(fp) + + return totalSize + + def deploy(self): + self.logDir = "%s/sim/dnode%d/log" % (self.path, self.index) + self.dataDir = "%s/sim/dnode%d/data" % (self.path, self.index) + self.cfgDir = "%s/sim/dnode%d/cfg" % (self.path, self.index) + self.cfgPath = "%s/sim/dnode%d/cfg/taos.cfg" % ( + self.path, self.index) + + cmd = "rm -rf " + self.dataDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "rm -rf " + self.logDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "rm -rf " + self.cfgDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "mkdir -p " + self.dataDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "mkdir -p " + self.logDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "mkdir -p " + self.cfgDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "touch " + self.cfgPath + if os.system(cmd) != 0: + tdLog.exit(cmd) + + if self.testCluster: + self.startIP() + + if self.testCluster: + self.cfg("masterIp", "192.168.0.1") + self.cfg("secondIp", "192.168.0.2") + self.cfg("publicIp", "192.168.0.%d" % (self.index)) + self.cfg("internalIp", "192.168.0.%d" % (self.index)) + self.cfg("privateIp", "192.168.0.%d" % (self.index)) + self.cfg("dataDir", self.dataDir) + self.cfg("logDir", self.logDir) + self.cfg("numOfLogLines", "100000000") + self.cfg("mnodeEqualVnodeNum", "0") + self.cfg("walLevel", "1") + self.cfg("statusInterval", "1") + self.cfg("numOfTotalVnodes", "64") + self.cfg("numOfMnodes", "3") + self.cfg("numOfThreadsPerCore", "2.0") + self.cfg("monitor", "0") + self.cfg("maxVnodeConnections", "30000") + self.cfg("maxMgmtConnections", "30000") + self.cfg("maxMeterConnections", "30000") + self.cfg("maxShellConns", "30000") + self.cfg("locale", "en_US.UTF-8") + self.cfg("charset", "UTF-8") + self.cfg("asyncLog", "0") + self.cfg("anyIp", "0") + self.cfg("dDebugFlag", "135") + self.cfg("mDebugFlag", "135") + self.cfg("sdbDebugFlag", "135") + self.cfg("rpcDebugFlag", "135") + self.cfg("tmrDebugFlag", "131") + self.cfg("cDebugFlag", "135") + self.cfg("httpDebugFlag", "135") + self.cfg("monitorDebugFlag", "135") + self.cfg("udebugFlag", "135") + self.cfg("jnidebugFlag", "135") + self.cfg("qdebugFlag", "135") + self.deployed = 1 + tdLog.debug( + "dnode:%d is deployed and configured by %s" % + (self.index, self.cfgPath)) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def start(self): + buildPath = self.getBuildPath() + + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + + binPath = buildPath + "/build/bin/taosd" + + if self.deployed == 0: + tdLog.exit("dnode:%d is not deployed" % (self.index)) + + if self.valgrind == 0: + cmd = "nohup %s -c %s > /dev/null 2>&1 & " % ( + binPath, self.cfgDir) + else: + valgrindCmdline = "valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes" + + cmd = "nohup %s %s -c %s --random-file-fail-factor 5 2>&1 & " % ( + valgrindCmdline, binPath, self.cfgDir) + + print(cmd) + + if os.system(cmd) != 0: + tdLog.exit(cmd) + self.running = 1 + tdLog.debug("dnode:%d is running with %s " % (self.index, cmd)) + + tdLog.debug("wait 5 seconds for the dnode:%d to start." % (self.index)) + time.sleep(5) + + def stop(self): + if self.valgrind == 0: + toBeKilled = "taosd" + else: + toBeKilled = "valgrind.bin" + + if self.running != 0: + psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + while(processID): + killCmd = "kill -INT %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + for port in range(6030, 6041): + fuserCmd = "fuser -k -n tcp %d" % port + os.system(fuserCmd) + if self.valgrind: + time.sleep(2) + + self.running = 0 + tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index)) + + def forcestop(self): + if self.valgrind == 0: + toBeKilled = "taosd" + else: + toBeKilled = "valgrind.bin" + + if self.running != 0: + psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + while(processID): + killCmd = "kill -KILL %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + for port in range(6030, 6041): + fuserCmd = "fuser -k -n tcp %d" % port + os.system(fuserCmd) + if self.valgrind: + time.sleep(2) + + self.running = 0 + tdLog.debug("dnode:%d is stopped by kill -KILL" % (self.index)) + + def startIP(self): + cmd = "sudo ifconfig lo:%d 192.168.0.%d up" % (self.index, self.index) + if os.system(cmd) != 0: + tdLog.exit(cmd) + + def stopIP(self): + cmd = "sudo ifconfig lo:%d 192.168.0.%d down" % ( + self.index, self.index) + if os.system(cmd) != 0: + tdLog.exit(cmd) + + def cfg(self, option, value): + cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath) + if os.system(cmd) != 0: + tdLog.exit(cmd) + + def getDnodeRootDir(self, index): + dnodeRootDir = "%s/sim/psim/dnode%d" % (self.path, index) + return dnodeRootDir + + def getDnodesRootDir(self): + dnodesRootDir = "%s/sim/psim" % (self.path) + return dnodesRootDir + + +class TDDnodes: + def __init__(self): + self.dnodes = [] + self.dnodes.append(TDDnode(1)) + self.dnodes.append(TDDnode(2)) + self.dnodes.append(TDDnode(3)) + self.dnodes.append(TDDnode(4)) + self.dnodes.append(TDDnode(5)) + self.dnodes.append(TDDnode(6)) + self.dnodes.append(TDDnode(7)) + self.dnodes.append(TDDnode(8)) + self.dnodes.append(TDDnode(9)) + self.dnodes.append(TDDnode(10)) + self.simDeployed = False + + def init(self, path): + psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + while(processID): + killCmd = "kill -KILL %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + while(processID): + killCmd = "kill -KILL %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + binPath = os.path.dirname(os.path.realpath(__file__)) + binPath = binPath + "/../../../debug/" + tdLog.debug("binPath %s" % (binPath)) + binPath = os.path.realpath(binPath) + tdLog.debug("binPath real path %s" % (binPath)) + + # cmd = "sudo cp %s/build/lib/libtaos.so /usr/local/lib/taos/" % (binPath) + # tdLog.debug(cmd) + # os.system(cmd) + + # cmd = "sudo cp %s/build/bin/taos /usr/local/bin/taos/" % (binPath) + # if os.system(cmd) != 0 : + # tdLog.exit(cmd) + # tdLog.debug("execute %s" % (cmd)) + + # cmd = "sudo cp %s/build/bin/taosd /usr/local/bin/taos/" % (binPath) + # if os.system(cmd) != 0 : + # tdLog.exit(cmd) + # tdLog.debug("execute %s" % (cmd)) + + if path == "": + # self.path = os.path.expanduser('~') + self.path = os.path.abspath(binPath + "../../") + else: + self.path = os.path.realpath(path) + + for i in range(len(self.dnodes)): + self.dnodes[i].init(self.path) + + self.sim = TDSimClient() + self.sim.init(self.path) + + def setTestCluster(self, value): + self.testCluster = value + + def setValgrind(self, value): + self.valgrind = value + + def deploy(self, index): + self.sim.setTestCluster(self.testCluster) + + if (self.simDeployed == False): + self.sim.deploy() + self.simDeployed = True + + self.check(index) + self.dnodes[index - 1].setTestCluster(self.testCluster) + self.dnodes[index - 1].setValgrind(self.valgrind) + self.dnodes[index - 1].deploy() + + def cfg(self, index, option, value): + self.check(index) + self.dnodes[index - 1].cfg(option, value) + + def start(self, index): + self.check(index) + self.dnodes[index - 1].start() + + def stop(self, index): + self.check(index) + self.dnodes[index - 1].stop() + + def getDataSize(self, index): + self.check(index) + return self.dnodes[index - 1].getDataSize() + + def forcestop(self, index): + self.check(index) + self.dnodes[index - 1].forcestop() + + def startIP(self, index): + self.check(index) + + if self.testCluster: + self.dnodes[index - 1].startIP() + + def stopIP(self, index): + self.check(index) + + if self.dnodes[index - 1].testCluster: + self.dnodes[index - 1].stopIP() + + def check(self, index): + if index < 1 or index > 10: + tdLog.exit("index:%d should on a scale of [1, 10]" % (index)) + + def stopAll(self): + tdLog.info("stop all dnodes") + for i in range(len(self.dnodes)): + self.dnodes[i].stop() + + psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep | awk '{print $2}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + if processID: + cmd = "sudo systemctl stop taosd" + os.system(cmd) + # if os.system(cmd) != 0 : + # tdLog.exit(cmd) + psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + while(processID): + killCmd = "kill -KILL %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + while(processID): + killCmd = "kill -KILL %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + # if os.system(cmd) != 0 : + # tdLog.exit(cmd) + + def getDnodesRootDir(self): + dnodesRootDir = "%s/sim" % (self.path) + return dnodesRootDir + + def getSimCfgPath(self): + return self.sim.getCfgDir() + + def getSimLogPath(self): + return self.sim.getLogDir() + + def addSimExtraCfg(self, option, value): + self.sim.addExtraCfg(option, value) + + +tdDnodes = TDDnodes() diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index 370af1ba13..226682ff92 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -22,35 +22,59 @@ class TDSimClient: def __init__(self): self.testCluster = False + self.cfgDict = { + "numOfLogLines": "100000000", + "numOfThreadsPerCore": "2.0", + "locale": "en_US.UTF-8", + "charset": "UTF-8", + "asyncLog": "0", + "anyIp": "0", + "sdbDebugFlag": "135", + "rpcDebugFlag": "135", + "tmrDebugFlag": "131", + "cDebugFlag": "135", + "udebugFlag": "135", + "jnidebugFlag": "135", + "qdebugFlag": "135", + } + def init(self, path): self.__init__() self.path = path + def getLogDir(self): + self.logDir = "%s/sim/psim/log" % (self.path) + return self.logDir + def getCfgDir(self): + self.cfgDir = "%s/sim/psim/cfg" % (self.path) return self.cfgDir def setTestCluster(self, value): self.testCluster = value + def addExtraCfg(self, option, value): + self.cfgDict.update({option: value}) + def cfg(self, option, value): cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath) if os.system(cmd) != 0: tdLog.exit(cmd) def deploy(self): - self.logDir = "%s/sim/psim/log" % (self.path,) + self.logDir = "%s/sim/psim/log" % (self.path) self.cfgDir = "%s/sim/psim/cfg" % (self.path) self.cfgPath = "%s/sim/psim/cfg/taos.cfg" % (self.path) cmd = "rm -rf " + self.logDir if os.system(cmd) != 0: tdLog.exit(cmd) - - cmd = "rm -rf " + self.cfgDir + + cmd = "mkdir -p " + self.logDir if os.system(cmd) != 0: tdLog.exit(cmd) - cmd = "mkdir -p " + self.logDir + cmd = "rm -rf " + self.cfgDir if os.system(cmd) != 0: tdLog.exit(cmd) @@ -66,19 +90,10 @@ class TDSimClient: self.cfg("masterIp", "192.168.0.1") self.cfg("secondIp", "192.168.0.2") self.cfg("logDir", self.logDir) - self.cfg("numOfLogLines", "100000000") - self.cfg("numOfThreadsPerCore", "2.0") - self.cfg("locale", "en_US.UTF-8") - self.cfg("charset", "UTF-8") - self.cfg("asyncLog", "0") - self.cfg("anyIp", "0") - self.cfg("sdbDebugFlag", "135") - self.cfg("rpcDebugFlag", "135") - self.cfg("tmrDebugFlag", "131") - self.cfg("cDebugFlag", "135") - self.cfg("udebugFlag", "135") - self.cfg("jnidebugFlag", "135") - self.cfg("qdebugFlag", "135") + + for key, value in self.cfgDict.items(): + self.cfg(key, value) + tdLog.debug("psim is deployed and configured by %s" % (self.cfgPath)) @@ -190,32 +205,31 @@ class TDDnode: "dnode:%d is deployed and configured by %s" % (self.index, self.cfgPath)) - def start(self): + def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) - binPath = "" if ("community" in selfPath): - projPath = selfPath + "/../../../../" - - for root, dirs, files in os.walk(projPath): - if ("taosd" in files): - rootRealPath = os.path.dirname(os.path.realpath(root)) - if ("packaging" not in rootRealPath): - binPath = os.path.join(root, "taosd") - break + projPath = selfPath[:selfPath.find("community")] else: - projPath = selfPath + "/../../../" - for root, dirs, files in os.walk(projPath): - if ("taosd" in files): - rootRealPath = os.path.dirname(os.path.realpath(root)) - if ("packaging" not in rootRealPath): - binPath = os.path.join(root, "taosd") - break + projPath = selfPath[:selfPath.find("tests")] - if (binPath == ""): + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def start(self): + buildPath = self.getBuildPath() + + if (buildPath == ""): tdLog.exit("taosd not found!") else: - tdLog.info("taosd found in %s" % rootRealPath) + tdLog.info("taosd found in %s" % buildPath) + + binPath = buildPath + "/build/bin/taosd" if self.deployed == 0: tdLog.exit("dnode:%d is not deployed" % (self.index)) @@ -379,6 +393,9 @@ class TDDnodes: for i in range(len(self.dnodes)): self.dnodes[i].init(self.path) + self.sim = TDSimClient() + self.sim.init(self.path) + def setTestCluster(self, value): self.testCluster = value @@ -386,8 +403,6 @@ class TDDnodes: self.valgrind = value def deploy(self, index): - self.sim = TDSimClient() - self.sim.init(self.path) self.sim.setTestCluster(self.testCluster) if (self.simDeployed == False): @@ -475,5 +490,11 @@ class TDDnodes: def getSimCfgPath(self): return self.sim.getCfgDir() + def getSimLogPath(self): + return self.sim.getLogDir() + + def addSimExtraCfg(self, option, value): + self.sim.addExtraCfg(option, value) + tdDnodes = TDDnodes() diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py index dc0366b214..ec39ab61b9 100644 --- a/tests/pytest/util/sql.py +++ b/tests/pytest/util/sql.py @@ -29,10 +29,8 @@ class TDSql: self.cursor = cursor if (log): - frame = inspect.stack()[1] - callerModule = inspect.getmodule(frame[0]) - callerFilename = callerModule.__file__ - self.cursor.log(callerFilename + ".sql") + caller = inspect.getframeinfo(inspect.stack()[1][0]) + self.cursor.log(caller.filename + ".sql") def close(self): self.cursor.close() @@ -55,12 +53,8 @@ class TDSql: except BaseException: expectErrNotOccured = False if expectErrNotOccured: - frame = inspect.stack()[1] - callerModule = inspect.getmodule(frame[0]) - callerFilename = callerModule.__file__ - tdLog.exit( - "%s failed: sql:%s, expect error not occured" % - (callerFilename, sql)) + caller = inspect.getframeinfo(inspect.stack()[1][0]) + tdLog.exit("%s(%d) failed: sql:%s, expect error not occured" % (caller.filename, caller.lineno, sql)) else: self.queryRows = 0 self.queryCols = 0 @@ -69,75 +63,70 @@ class TDSql: def query(self, sql): self.sql = sql - self.cursor.execute(sql) - self.queryResult = self.cursor.fetchall() - self.queryRows = len(self.queryResult) - self.queryCols = len(self.cursor.description) - # if self.queryRows == 1 and self.queryCols == 1: - # tdLog.info("sql:%s, rows:%d cols:%d data:%s" % (self.sql, self.queryRows, self.queryCols, self.queryResult[0][0])) - # else: - # tdLog.info("sql:%s, rows:%d cols:%d" % (self.sql, self.queryRows, self.queryCols)) + try: + self.cursor.execute(sql) + self.queryResult = self.cursor.fetchall() + self.queryRows = len(self.queryResult) + self.queryCols = len(self.cursor.description) + except Exception as e: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, sql, repr(e)) + tdLog.notice("%s(%d) failed: sql:%s, %s" % args) + raise Exception(repr(e)) return self.queryRows + def waitedQuery(self, sql, expectRows, timeout): + tdLog.info("sql: %s, try to retrieve %d rows in %d seconds" % (sql, expectRows, timeout)) + self.sql = sql + try: + for i in range(timeout): + self.cursor.execute(sql) + self.queryResult = self.cursor.fetchall() + self.queryRows = len(self.queryResult) + self.queryCols = len(self.cursor.description) + if self.queryRows >= expectRows: + return (self.queryRows, i) + time.sleep(1) + except Exception as e: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, sql, repr(e)) + tdLog.notice("%s(%d) failed: sql:%s, %s" % args) + raise Exception(repr(e)) + return (self.queryRows, timeout) + def checkRows(self, expectRows): - if self.queryRows != expectRows: - frame = inspect.stack()[1] - callerModule = inspect.getmodule(frame[0]) - callerFilename = callerModule.__file__ - tdLog.exit( - "%s failed: sql:%s, queryRows:%d != expect:%d" % - (callerFilename, self.sql, self.queryRows, expectRows)) - tdLog.info("sql:%s, queryRows:%d == expect:%d" % - (self.sql, self.queryRows, expectRows)) + if self.queryRows == expectRows: + tdLog.info("sql:%s, queryRows:%d == expect:%d" % (self.sql, self.queryRows, expectRows)) + else: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, self.sql, self.queryRows, expectRows) + tdLog.exit("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args) + + def checkRowCol(self, row, col): + caller = inspect.getframeinfo(inspect.stack()[2][0]) + if row < 0: + args = (caller.filename, caller.lineno, self.sql, row) + tdLog.exit("%s(%d) failed: sql:%s, row:%d is smaller than zero" % args) + if col < 0: + args = (caller.filename, caller.lineno, self.sql, row) + tdLog.exit("%s(%d) failed: sql:%s, col:%d is smaller than zero" % args) + if row > self.queryRows: + args = (caller.filename, caller.lineno, self.sql, row, self.queryRows) + tdLog.exit("%s(%d) failed: sql:%s, row:%d is larger than queryRows:%d" % args) + if col > self.queryCols: + args = (caller.filename, caller.lineno, self.sql, col, self.queryCols) + tdLog.exit("%s(%d) failed: sql:%s, col:%d is larger than queryCols:%d" % args) def checkDataType(self, row, col, dataType): - frame = inspect.stack()[1] - callerModule = inspect.getmodule(frame[0]) - callerFilename = callerModule.__file__ - - if row < 0: - tdLog.exit( - "%s failed: sql:%s, row:%d is smaller than zero" % - (callerFilename, self.sql, row)) - if col < 0: - tdLog.exit( - "%s failed: sql:%s, col:%d is smaller than zero" % - (callerFilename, self.sql, col)) - if row > self.queryRows: - tdLog.exit( - "%s failed: sql:%s, row:%d is larger than queryRows:%d" % - (callerFilename, self.sql, row, self.queryRows)) - if col > self.queryCols: - tdLog.exit( - "%s failed: sql:%s, col:%d is larger than queryCols:%d" % - (callerFilename, self.sql, col, self.queryCols)) - + self.checkRowCol(row, col) return self.cursor.istype(col, dataType) def checkData(self, row, col, data): - frame = inspect.stack()[1] - callerModule = inspect.getmodule(frame[0]) - callerFilename = callerModule.__file__ - - if row < 0: - tdLog.exit( - "%s failed: sql:%s, row:%d is smaller than zero" % - (callerFilename, self.sql, row)) - if col < 0: - tdLog.exit( - "%s failed: sql:%s, col:%d is smaller than zero" % - (callerFilename, self.sql, col)) - if row > self.queryRows: - tdLog.exit( - "%s failed: sql:%s, row:%d is larger than queryRows:%d" % - (callerFilename, self.sql, row, self.queryRows)) - if col > self.queryCols: - tdLog.exit( - "%s failed: sql:%s, col:%d is larger than queryCols:%d" % - (callerFilename, self.sql, col, self.queryCols)) + self.checkRowCol(row, col) if self.queryResult[row][col] != data: - tdLog.exit("%s failed: sql:%s row:%d col:%d data:%s != expect:%s" % ( - callerFilename, self.sql, row, col, self.queryResult[row][col], data)) + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) + tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) if data is None: tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % @@ -153,26 +142,7 @@ class TDSql: (self.sql, row, col, self.queryResult[row][col], data)) def getData(self, row, col): - frame = inspect.stack()[1] - callerModule = inspect.getmodule(frame[0]) - callerFilename = callerModule.__file__ - - if row < 0: - tdLog.exit( - "%s failed: sql:%s, row:%d is smaller than zero" % - (callerFilename, self.sql, row)) - if col < 0: - tdLog.exit( - "%s failed: sql:%s, col:%d is smaller than zero" % - (callerFilename, self.sql, col)) - if row > self.queryRows: - tdLog.exit( - "%s failed: sql:%s, row:%d is larger than queryRows:%d" % - (callerFilename, self.sql, row, self.queryRows)) - if col > self.queryCols: - tdLog.exit( - "%s failed: sql:%s, col:%d is larger than queryCols:%d" % - (callerFilename, self.sql, col, self.queryCols)) + self.checkRowCol(row, col) return self.queryResult[row][col] def executeTimes(self, sql, times): @@ -185,20 +155,22 @@ class TDSql: def execute(self, sql): self.sql = sql - self.affectedRows = self.cursor.execute(sql) + try: + self.affectedRows = self.cursor.execute(sql) + except Exception as e: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, sql, repr(e)) + tdLog.notice("%s(%d) failed: sql:%s, %s" % args) + raise Exception(repr(e)) return self.affectedRows def checkAffectedRows(self, expectAffectedRows): if self.affectedRows != expectAffectedRows: - frame = inspect.stack()[1] - callerModule = inspect.getmodule(frame[0]) - callerFilename = callerModule.__file__ + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, self.sql, self.affectedRows, expectAffectedRows) + tdLog.exit("%s(%d) failed: sql:%s, affectedRows:%d != expect:%d" % args) - tdLog.exit( - "%s failed: sql:%s, affectedRows:%d != expect:%d" % - (callerFilename, self.sql, self.affectedRows, expectAffectedRows)) - tdLog.info("sql:%s, affectedRows:%d == expect:%d" % - (self.sql, self.affectedRows, expectAffectedRows)) + tdLog.info("sql:%s, affectedRows:%d == expect:%d" % (self.sql, self.affectedRows, expectAffectedRows)) tdSql = TDSql() diff --git a/tests/script/general/alter/cached_schema_after_alter.sim b/tests/script/general/alter/cached_schema_after_alter.sim index 2d049ec595..bf9b9eb6a3 100644 --- a/tests/script/general/alter/cached_schema_after_alter.sim +++ b/tests/script/general/alter/cached_schema_after_alter.sim @@ -68,7 +68,7 @@ endi if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != NULL then return -1 endi @@ -80,7 +80,7 @@ endi if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != NULL then return -1 endi diff --git a/tests/script/general/parser/fill.sim b/tests/script/general/parser/fill.sim index 4dfee222cc..488f807fbc 100644 --- a/tests/script/general/parser/fill.sim +++ b/tests/script/general/parser/fill.sim @@ -843,6 +843,14 @@ if $data81 != 4 then return -1 endi +# desc fill query +print desc fill query +sql select count(*) from m_fl_tb0 where ts>='2018-9-17 9:0:0' and ts<='2018-9-17 9:11:00' interval(1m) fill(value,10) order by ts desc; +if $rows != 12 then + return -1 +endi + + print =============== clear sql drop database $db sql show databases diff --git a/tests/script/general/parser/fill_us.sim b/tests/script/general/parser/fill_us.sim new file mode 100644 index 0000000000..a66629c90b --- /dev/null +++ b/tests/script/general/parser/fill_us.sim @@ -0,0 +1,1037 @@ +system sh/stop_dnodes.sh + +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c walLevel -v 0 +system sh/exec.sh -n dnode1 -s start +sleep 3000 +sql connect + +$dbPrefix = m_fl_db +$tbPrefix = m_fl_tb +$mtPrefix = m_fl_mt +$tbNum = 10 +$rowNum = 5 +$totalNum = $tbNum * $rowNum +$ts0 = 1537146000000000 # 2018-09-17 09:00:00.000000 +$delta = 600000000 +print ========== fill_us.sim +$i = 0 +$db = $dbPrefix . $i +$mt = $mtPrefix . $i + +sql drop database $db -x step1 +step1: +sql create database $db precision 'us' +sql use $db +sql create table $mt (ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 smallint, c6 bool, c7 binary(10), c8 nchar(10)) tags(tgcol int) + +$i = 0 +$ts = $ts0 +while $i < $tbNum + $tb = $tbPrefix . $i + sql create table $tb using $mt tags( $i ) + + $x = 0 + while $x < $rowNum + $xs = $x * $delta + $ts = $ts0 + $xs + sql insert into $tb values ( $ts , $x , $x , $x , $x , $x , true, 'BINARY', 'NCHAR' ) + $x = $x + 1 + endw + + $i = $i + 1 +endw + +# setup +$i = 0 +$tb = $tbPrefix . $i +$tsu = 4 * $delta +$tsu = $tsu + $ts0 + +## fill syntax test +# number of fill values exceeds number of selected columns +print select max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6) +sql select max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6) +if $data11 != 6 then + return -1 +endi +if $data12 != 6 then + return -1 +endi +if $data13 != 6.00000 then + return -1 +endi +if $data14 != 6.000000000 then + return -1 +endi + +# number of fill values is smaller than number of selected columns +print sql select max(c1), max(c2), max(c3) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6) +sql select max(c1), max(c2), max(c3) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6) +if $data11 != 6 then + return -1 +endi +if $data12 != 6 then + return -1 +endi +if $data13 != 6.00000 then + return -1 +endi + +# unspecified filling method +sql_error select max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill (6, 6, 6, 6, 6) + +## constant fill test +# count_with_fill +print constant_fill test +print count_with_constant_fill +print sql select count(c1), count(c2), count(c3), count(c4), count(c5), count(c6), count(c7), count(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6) +sql select count(c1), count(c2), count(c3), count(c4), count(c5), count(c6), count(c7), count(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6) +if $rows != 9 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data11 != 6 then + return -1 +endi +if $data21 != 1 then + return -1 +endi +if $data31 != 6 then + return -1 +endi +if $data41 != 1 then + return -1 +endi +if $data51 != 6 then + return -1 +endi +if $data61 != 1 then + return -1 +endi +if $data71 != 6 then + return -1 +endi +if $data81 != 1 then + return -1 +endi + +# avg_with_fill +print avg_witt_constant_fill +sql select avg(c1), avg(c2), avg(c3), avg(c4), avg(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6) +if $rows != 9 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data11 != 6.000000000 then + return -1 +endi +if $data21 != 1.000000000 then + return -1 +endi +if $data31 != 6.000000000 then + return -1 +endi +if $data41 != 2.000000000 then + return -1 +endi +if $data51 != 6.000000000 then + return -1 +endi +if $data61 != 3.000000000 then + return -1 +endi +if $data71 != 6.000000000 then + return -1 +endi +if $data81 != 4.000000000 then + return -1 +endi + +# max_with_fill +print max_with_fill +sql select max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6) +if $rows != 9 then + return -1 +endi +if $data01 != 0 then + return -1 +endi +if $data11 != 6 then + return -1 +endi +if $data21 != 1 then + return -1 +endi +if $data31 != 6 then + return -1 +endi +if $data41 != 2 then + return -1 +endi +if $data51 != 6 then + return -1 +endi +if $data61 != 3 then + return -1 +endi +if $data71 != 6 then + return -1 +endi +if $data81 != 4 then + return -1 +endi + +# min_with_fill +print min_with_fill +sql select min(c1), min(c2), min(c3), min(c4), min(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6) +if $rows != 9 then + return -1 +endi +if $data01 != 0 then + return -1 +endi +if $data11 != 6 then + return -1 +endi +if $data21 != 1 then + return -1 +endi +if $data31 != 6 then + return -1 +endi +if $data41 != 2 then + return -1 +endi +if $data51 != 6 then + return -1 +endi +if $data61 != 3 then + return -1 +endi +if $data71 != 6 then + return -1 +endi +if $data81 != 4 then + return -1 +endi + +# first_with_fill +print first_with_fill +sql select first(c1), first(c2), first(c3), first(c4), first(c5), first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6) +if $rows != 9 then + return -1 +endi +if $data01 != 0 then + return -1 +endi +if $data11 != 6 then + return -1 +endi +if $data21 != 1 then + return -1 +endi +if $data31 != 6 then + return -1 +endi +if $data41 != 2 then + return -1 +endi +if $data51 != 6 then + return -1 +endi +if $data61 != 3 then + return -1 +endi +if $data71 != 6 then + return -1 +endi +if $data81 != 4 then + return -1 +endi + +# check double type values +if $data04 != 0.000000000 then + return -1 +endi +print data14 = $data14 +if $data14 != 6.000000000 then + return -1 +endi +if $data24 != 1.000000000 then + return -1 +endi +if $data34 != 6.000000000 then + return -1 +endi +if $data44 != 2.000000000 then + return -1 +endi +if $data54 != 6.000000000 then + return -1 +endi +if $data64 != 3.000000000 then + return -1 +endi + +# check float type values +print $data03 $data13 +if $data03 != 0.00000 then + return -1 +endi +if $data13 != 6.00000 then + return -1 +endi +if $data23 != 1.00000 then + return -1 +endi +if $data33 != 6.00000 then + return -1 +endi +if $data43 != 2.00000 then + return -1 +endi +if $data53 != 6.00000 then + return -1 +endi +if $data63 != 3.00000 then + return -1 +endi +if $data73 != 6.00000 then + return -1 +endi +if $data83 != 4.00000 then + return -1 +endi + + +# last_with_fill +print last_with_fill +sql select last(c1), last(c2), last(c3), last(c4), last(c5), last(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6) +if $rows != 9 then + return -1 +endi +if $data01 != 0 then + return -1 +endi +if $data11 != 6 then + return -1 +endi +if $data21 != 1 then + return -1 +endi +if $data31 != 6 then + return -1 +endi +if $data41 != 2 then + return -1 +endi +if $data51 != 6 then + return -1 +endi +if $data61 != 3 then + return -1 +endi +if $data71 != 6 then + return -1 +endi +if $data81 != 4 then + return -1 +endi + +# fill_negative_values +sql select sum(c1), avg(c2), max(c3), min(c4), count(c5), count(c6), count(c7), count(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, -1, -1, -1, -1, -1, -1, -1, -1) +if $rows != 9 then + return -1 +endi +if $data01 != 0 then + return -1 +endi +if $data11 != -1 then + return -1 +endi + +# fill_char_values_to_arithmetic_fields +sql_error select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c') + +# fill_multiple_columns +sql_error select sum(c1), avg(c2), min(c3), max(c4), count(c6), first(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 99, 99, 99, 99, 99, abc, abc) +sql select sum(c1), avg(c2), min(c3), max(c4) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 99, 99, 99, 99) +if $rows != 9 then + return -1 +endi +print data01 = $data01 +print data11 = $data11 +if $data01 != 0 then + return -1 +endi +if $data11 != 99 then + return -1 +endi + +sql select * from $tb +#print data08 = $data08 +if $data08 != NCHAR then + return -1 +endi +#return -1 + + +# fill_into_nonarithmetic_fieds +sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000) +#if $data11 != 20000000 then +if $data11 != 1 then + return -1 +endi + +sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1, 1, 1) +sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1.1, 1.1, 1.1) +sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1, 1e1, 1e1) +sql select first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1') +# fill quoted values into bool column will throw error unless the value is 'true' or 'false' Note:2018-10-24 +# fill values into binary or nchar columns will be set to null automatically Note:2018-10-24 +sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1','1e1') +sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, true, true, true) +sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true', 'true','true') + + +# fill nonarithmetic values into arithmetic fields +sql_error select count(*) where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, abc); +sql_error select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true'); + +sql select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e1'); +if $rows != 9 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data11 != 10 then + return -1 +endi + +sql select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1); +if $rows != 9 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data11 != 10 then + return -1 +endi + +sql select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '10'); +if $rows != 9 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data11 != 10 then + return -1 +endi + + +## linear fill +# feature currently switched off 2018/09/29 +#sql select count(c1), count(c2), count(c3), count(c4), count(c5), count(c6), count(c7), count(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(linear) + +## previous fill +print fill(prev) +sql select count(c1), count(c2), count(c3), count(c4), count(c5), count(c6), count(c7), count(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(prev) +if $rows != 9 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data11 != 1 then + return -1 +endi +if $data21 != 1 then + return -1 +endi +if $data31 != 1 then + return -1 +endi +if $data41 != 1 then + return -1 +endi +if $data51 != 1 then + return -1 +endi +if $data61 != 1 then + return -1 +endi +if $data71 != 1 then + return -1 +endi +if $data81 != 1 then + return -1 +endi + +# avg_with_fill +sql select avg(c1), avg(c2), avg(c3), avg(c4), avg(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(prev) +if $rows != 9 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data21 != 1.000000000 then + return -1 +endi +if $data31 != 1.000000000 then + return -1 +endi +if $data41 != 2.000000000 then + return -1 +endi +if $data51 != 2.000000000 then + return -1 +endi +if $data61 != 3.000000000 then + return -1 +endi +if $data71 != 3.000000000 then + return -1 +endi +if $data81 != 4.000000000 then + return -1 +endi + +# max_with_fill +sql select max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(prev) +if $rows != 9 then + return -1 +endi +if $data01 != 0 then + return -1 +endi +if $data11 != 0 then + return -1 +endi +if $data21 != 1 then + return -1 +endi +if $data31 != 1 then + return -1 +endi +if $data41 != 2 then + return -1 +endi +if $data51 != 2 then + return -1 +endi +if $data61 != 3 then + return -1 +endi +if $data71 != 3 then + return -1 +endi +if $data81 != 4 then + return -1 +endi + +# min_with_fill +sql select min(c1), min(c2), min(c3), min(c4), min(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(prev) +if $rows != 9 then + return -1 +endi +if $data01 != 0 then + return -1 +endi +if $data11 != 0 then + return -1 +endi +if $data21 != 1 then + return -1 +endi +if $data31 != 1 then + return -1 +endi +if $data41 != 2 then + return -1 +endi +if $data51 != 2 then + return -1 +endi +if $data61 != 3 then + return -1 +endi +if $data71 != 3 then + return -1 +endi +if $data81 != 4 then + return -1 +endi + +# first_with_fill +sql select first(c1), first(c2), first(c3), first(c4), first(c5), first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(prev) +if $rows != 9 then + return -1 +endi +if $data01 != 0 then + return -1 +endi +if $data11 != 0 then + return -1 +endi +if $data21 != 1 then + return -1 +endi +if $data31 != 1 then + return -1 +endi +if $data41 != 2 then + return -1 +endi +if $data51 != 2 then + return -1 +endi +if $data61 != 3 then + return -1 +endi +if $data71 != 3 then + return -1 +endi +if $data81 != 4 then + return -1 +endi + +# last_with_fill +sql select last(c1), last(c2), last(c3), last(c4), last(c5), last(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(prev) +if $rows != 9 then + return -1 +endi +if $data01 != 0 then + return -1 +endi +if $data11 != 0 then + return -1 +endi +if $data21 != 1 then + return -1 +endi +if $data31 != 1 then + return -1 +endi +if $data41 != 2 then + return -1 +endi +if $data51 != 2 then + return -1 +endi +if $data61 != 3 then + return -1 +endi +if $data71 != 3 then + return -1 +endi +if $data81 != 4 then + return -1 +endi + +## NULL fill +print fill(value, NULL) +# count_with_fill +sql select count(c1), count(c2), count(c3), count(c4), count(c5), count(c6), count(c7), count(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill( NULL) +print select count(c1), count(c2), count(c3), count(c4), count(c5), count(c6), count(c7), count(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill( NULL) +sql select count(c1), count(c2), count(c3), count(c4), count(c5), count(c6), count(c7), count(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, NULL) +if $rows != 9 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data11 != null then + return -1 +endi +if $data21 != 1 then + return -1 +endi +if $data31 != null then + return -1 +endi +if $data41 != 1 then + return -1 +endi +if $data51 != null then + return -1 +endi +if $data61 != 1 then + return -1 +endi +if $data71 != null then + return -1 +endi +if $data81 != 1 then + return -1 +endi +sql select count(c1), count(c2), count(c3), count(c4), count(c5), count(c6), count(c7), count(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(none) +if $rows != 5 then + return -1 +endi + +# avg_with_fill +sql select avg(c1), avg(c2), avg(c3), avg(c4), avg(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, NULL) +if $rows != 9 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data11 != null then + return -1 +endi +if $data21 != 1.000000000 then + return -1 +endi +if $data31 != null then + return -1 +endi +if $data41 != 2.000000000 then + return -1 +endi +if $data51 != null then + return -1 +endi +if $data61 != 3.000000000 then + return -1 +endi +if $data71 != null then + return -1 +endi +if $data81 != 4.000000000 then + return -1 +endi + +# max_with_fill +sql select max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, NULL) +if $rows != 9 then + return -1 +endi +if $data01 != 0 then + return -1 +endi +if $data11 != null then + return -1 +endi +if $data21 != 1 then + return -1 +endi +if $data31 != null then + return -1 +endi +if $data41 != 2 then + return -1 +endi +if $data51 != null then + return -1 +endi +if $data61 != 3 then + return -1 +endi +if $data71 != null then + return -1 +endi +if $data81 != 4 then + return -1 +endi + +# min_with_fill +sql select min(c1), min(c2), min(c3), min(c4), min(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, NULL) +if $rows != 9 then + return -1 +endi +if $data01 != 0 then + return -1 +endi +if $data11 != null then + return -1 +endi +if $data21 != 1 then + return -1 +endi +if $data31 != null then + return -1 +endi +if $data41 != 2 then + return -1 +endi +if $data51 != null then + return -1 +endi +if $data61 != 3 then + return -1 +endi +if $data71 != null then + return -1 +endi +if $data81 != 4 then + return -1 +endi + +# first_with_fill +sql select first(c1), first(c2), first(c3), first(c4), first(c5), first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, NULL) +if $rows != 9 then + return -1 +endi +if $data01 != 0 then + return -1 +endi +if $data11 != null then + return -1 +endi +if $data21 != 1 then + return -1 +endi +if $data31 != null then + return -1 +endi +if $data41 != 2 then + return -1 +endi +if $data51 != null then + return -1 +endi +if $data61 != 3 then + return -1 +endi +if $data71 != null then + return -1 +endi +if $data81 != 4 then + return -1 +endi + +# last_with_fill +sql select last(c1), last(c2), last(c3), last(c4), last(c5), last(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, NULL) +if $rows != 9 then + return -1 +endi +if $data01 != 0 then + return -1 +endi +if $data11 != null then + return -1 +endi +if $data21 != 1 then + return -1 +endi +if $data31 != null then + return -1 +endi +if $data41 != 2 then + return -1 +endi +if $data51 != null then + return -1 +endi +if $data61 != 3 then + return -1 +endi +if $data71 != null then + return -1 +endi +if $data81 != 4 then + return -1 +endi + +# desc fill query +print desc fill query +sql select count(*) from m_fl_tb0 where ts>='2018-9-17 9:0:0' and ts<='2018-9-17 9:11:00' interval(1m) fill(value,10) order by ts desc; +if $rows != 12 then + return -1 +endi + + +#print =============== clear +#sql drop database $db +#sql show databases +#if $rows != 0 then +# return -1 +#endi + +######################### us ########################## +$start = 1537146000000000 # 2018-09-17 09:00:00.000000 +$delta = 600000000 + +sql create table us_st (ts timestamp, c1 int, c2 double) tags(tgcol int) +sql create table us_t1 using us_st tags( 1 ) + +sql insert into us_t1 values ('2018-09-17 09:00:00.000001', 1 , 1) +sql insert into us_t1 values ('2018-09-17 09:00:00.000002', 2 , 2) +sql insert into us_t1 values ('2018-09-17 09:00:00.000003', 3 , 3) +sql insert into us_t1 values ('2018-09-17 09:00:00.000004', 4 , 4) +sql insert into us_t1 values ('2018-09-17 09:00:00.000005', 5 , 5) +sql insert into us_t1 values ('2018-09-17 09:00:00.000006', 6 , 6) +sql insert into us_t1 values ('2018-09-17 09:00:00.000007', 7 , 7) +sql insert into us_t1 values ('2018-09-17 09:00:00.000008', 8 , 8) +sql insert into us_t1 values ('2018-09-17 09:00:00.000009', 9 , 9) + +sql insert into us_t1 values ('2018-09-17 09:00:00.000015', 15 , 15) +sql insert into us_t1 values ('2018-09-17 09:00:00.000016', 16 , 16) +sql insert into us_t1 values ('2018-09-17 09:00:00.000017', 17 , 17) + +sql insert into us_t1 values ('2018-09-17 09:00:00.000021', 21 , 21) +sql insert into us_t1 values ('2018-09-17 09:00:00.000022', 22 , 22) +sql insert into us_t1 values ('2018-09-17 09:00:00.000023', 23 , 23) + +sql insert into us_t1 values ('2018-09-17 09:00:00.000027', 27 , 27) +sql insert into us_t1 values ('2018-09-17 09:00:00.000028', 28 , 28) +sql insert into us_t1 values ('2018-09-17 09:00:00.000029', 29 , 29) + +print sql select avg(c1), avg(c2) from us_t1 where ts >= '2018-09-17 09:00:00.000002' and ts <= '2018-09-17 09:00:00.000021' interval(3u) fill(value, 999, 999) +sql select avg(c1), avg(c2) from us_t1 where ts >= '2018-09-17 09:00:00.000002' and ts <= '2018-09-17 09:00:00.000021' interval(3u) fill(value, 999, 999) +if $rows != 8 then + return -1 +endi +if $data01 != 2.000000000 then + return -1 +endi +if $data11 != 4.000000000 then + return -1 +endi +if $data21 != 7.000000000 then + return -1 +endi +if $data31 != 9.000000000 then + return -1 +endi +if $data41 != 999.000000000 then + return -1 +endi +if $data51 != 16.000000000 then + return -1 +endi +if $data61 != 999.000000000 then + return -1 +endi +if $data71 != 21.000000000 then + return -1 +endi + +sql select avg(c1), avg(c2) from us_t1 where ts >= '2018-09-17 09:00:00.000002' and ts <= '2018-09-17 09:00:00.000021' interval(3u) fill(none) +if $rows != 6 then + return -1 +endi +if $data01 != 2.000000000 then + return -1 +endi +if $data11 != 4.000000000 then + return -1 +endi +if $data21 != 7.000000000 then + return -1 +endi +if $data31 != 9.000000000 then + return -1 +endi +if $data41 != 16.000000000 then + return -1 +endi +if $data51 != 21.000000000 then + return -1 +endi + +sql select avg(c1), avg(c2) from us_t1 where ts >= '2018-09-17 09:00:00.000002' and ts <= '2018-09-17 09:00:00.000021' interval(3u) fill(null) +if $rows != 8 then + return -1 +endi +if $data01 != 2.000000000 then + return -1 +endi +if $data11 != 4.000000000 then + return -1 +endi +if $data21 != 7.000000000 then + return -1 +endi +if $data31 != 9.000000000 then + return -1 +endi +if $data41 != null then + print ===== $data41 + return -1 +endi +if $data51 != 16.000000000 then + return -1 +endi +if $data61 != null then + print ===== $data61 + return -1 +endi +if $data71 != 21.000000000 then + return -1 +endi + + + +sql select avg(c1), avg(c2) from us_t1 where ts >= '2018-09-17 09:00:00.000002' and ts <= '2018-09-17 09:00:00.000021' interval(3u) fill(prev) +if $rows != 8 then + return -1 +endi +if $data01 != 2.000000000 then + return -1 +endi +if $data11 != 4.000000000 then + return -1 +endi +if $data21 != 7.000000000 then + return -1 +endi +if $data31 != 9.000000000 then + return -1 +endi +if $data41 != 9.000000000 then + return -1 +endi +if $data51 != 16.000000000 then + return -1 +endi +if $data61 != 16.000000000 then + return -1 +endi +if $data71 != 21.000000000 then + return -1 +endi + +sql select avg(c1), avg(c2) from us_t1 where ts >= '2018-09-17 09:00:00.000002' and ts <= '2018-09-17 09:00:00.000021' interval(3u) fill(linear) +if $rows != 8 then + return -1 +endi +if $data01 != 2.000000000 then + return -1 +endi +if $data11 != 4.000000000 then + return -1 +endi +if $data21 != 7.000000000 then + return -1 +endi +if $data31 != 9.000000000 then + return -1 +endi +if $data41 != 12.500000000 then + return -1 +endi +if $data51 != 16.000000000 then + return -1 +endi +if $data61 != 18.500000000 then + return -1 +endi +if $data71 != 21.000000000 then + return -1 +endi + +print ======== fill_us.sim run end...... ================ \ No newline at end of file diff --git a/tests/script/general/parser/groupby.sim b/tests/script/general/parser/groupby.sim index 5d785a2fc3..70edf3535b 100644 --- a/tests/script/general/parser/groupby.sim +++ b/tests/script/general/parser/groupby.sim @@ -422,4 +422,63 @@ if $data97 != @group_tb0@ then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file + +#=========================== group by multi tags ====================== +sql create table st (ts timestamp, c int) tags (t1 int, t2 int, t3 int, t4 int); +sql create table t1 using st tags(1, 1, 1, 1); +sql create table t2 using st tags(1, 2, 2, 2); +sql insert into t1 values ('2020-03-27 04:11:16.000', 1)('2020-03-27 04:11:17.000', 2) ('2020-03-27 04:11:18.000', 3) ('2020-03-27 04:11:19.000', 4) ; +sql insert into t1 values ('2020-03-27 04:21:16.000', 1)('2020-03-27 04:31:17.000', 2) ('2020-03-27 04:51:18.000', 3) ('2020-03-27 05:10:19.000', 4) ; +sql insert into t2 values ('2020-03-27 04:11:16.000', 1)('2020-03-27 04:11:17.000', 2) ('2020-03-27 04:11:18.000', 3) ('2020-03-27 04:11:19.000', 4) ; +sql insert into t2 values ('2020-03-27 04:21:16.000', 1)('2020-03-27 04:31:17.000', 2) ('2020-03-27 04:51:18.000', 3) ('2020-03-27 05:10:19.000', 4) ; + +sql select irate(c) from st where t1="1" and ts >= '2020-03-27 04:11:17.732' and ts < '2020-03-27 05:11:17.732' interval(1m) sliding(15s) group by tbname,t1,t2; +if $rows != 40 then + return -1 +endi + +if $data01 != 1.000000000 then + return -1 +endi +if $data02 != t1 then + return -1 +endi +if $data03 != 1 then + return -1 +endi +if $data04 != 1 then + return -1 +endi + +if $data11 != 1.000000000 then + return -1 +endi +if $data12 != t1 then + return -1 +endi +if $data13 != 1 then + return -1 +endi +if $data14 != 1 then + return -1 +endi + +sql select irate(c) from st where t1="1" and ts >= '2020-03-27 04:11:17.732' and ts < '2020-03-27 05:11:17.732' interval(1m) sliding(15s) group by tbname,t1,t2 limit 1; +if $rows != 2 then + return -1 +endi + +if $data11 != 1.000000000 then + return -1 +endi +if $data12 != t2 then + return -1 +endi +if $data13 != 1 then + return -1 +endi +if $data14 != 2 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/parser/import_commit1.sim b/tests/script/general/parser/import_commit1.sim index a929c1846b..197ae58453 100644 --- a/tests/script/general/parser/import_commit1.sim +++ b/tests/script/general/parser/import_commit1.sim @@ -40,7 +40,7 @@ while $x < $rowNum endw print ====== tables created -sleep 60000 +sleep 6000 $ts = $ts0 + $delta $ts = $ts + 1 diff --git a/tests/script/general/parser/import_commit2.sim b/tests/script/general/parser/import_commit2.sim index c93b3168f1..e400d0c3cb 100644 --- a/tests/script/general/parser/import_commit2.sim +++ b/tests/script/general/parser/import_commit2.sim @@ -39,7 +39,7 @@ while $x < $rowNum endw print ====== tables created -sleep 60000 +sleep 6000 $ts = $ts0 + $delta $ts = $ts + 1 diff --git a/tests/script/general/parser/import_commit3.sim b/tests/script/general/parser/import_commit3.sim index 99ece98278..7e7451e689 100644 --- a/tests/script/general/parser/import_commit3.sim +++ b/tests/script/general/parser/import_commit3.sim @@ -39,7 +39,7 @@ while $x < $rowNum endw print ====== tables created -sleep 60000 +sleep 6000 $ts = $ts + 1 sql insert into $tb values ( $ts , -1, -1, -1, -1, -1) @@ -47,7 +47,7 @@ $ts = $ts0 + $delta $ts = $ts + 1 sql import into $tb values ( $ts , -2, -2, -2, -2, -2) -sleep 60000 +sleep 6000 sql show databases diff --git a/tests/script/general/parser/join.sim b/tests/script/general/parser/join.sim index 1bce6f1950..07f2cd3f77 100644 --- a/tests/script/general/parser/join.sim +++ b/tests/script/general/parser/join.sim @@ -210,6 +210,11 @@ if $data10 != @70-01-01 08:01:40.200@ then return -1 endi +print data06 = $data06 +print data07 = $data07 +print data08 = $data08 +print data00 = $data00 + if $data07 != 0 then return -1 endi @@ -255,14 +260,17 @@ endi print 3 #agg + where condition sql select count(join_tb1.c3), count(join_tb0.ts) from $tb1 , $tb2 where $ts1 = $ts2 and join_tb1.ts <= 100002 and join_tb0.c7 = true; - -$val = 2 -if $data00 != $val then - print expect 2, actaul: $data00 +if $rows != 1 then return -1 endi -if $data01 != $val then +print $data00 + +if $data00 != 2 then + return -1 +endi + +if $data01 != 2 then return -1 endi @@ -412,7 +420,7 @@ endi #======================limit offset=================================== # tag values not int -sql_error select count(*) from join_mt0, join_mt1 where join_mt0.ts=join_mt1.ts and join_mt0.t2=join_mt1.t2; #!!!!! +sql_error select count(*) from join_mt0, join_mt1 where join_mt0.ts=join_mt1.ts and join_mt0.t2=join_mt1.t2; # tag type not identical sql_error select count(*) from join_mt0, join_mt1 where join_mt1.t2 = join_mt0.t1 and join_mt1.ts=join_mt0.ts; @@ -447,4 +455,15 @@ sql insert into um2 using m2 tags(9) values(1000001, 10)(2000000, 20); sql_error select count(*) from m1,m2 where m1.a=m2.a and m1.ts=m2.ts; +#empty table join test, add for no result join test +sql create database ux1; +sql use ux1; +sql create table m1(ts timestamp, k int) tags(a binary(12), b int); +sql create table tm0 using m1 tags('abc', 1); +sql create table m2(ts timestamp, k int) tags(a int, b binary(12)); +sql create table tm2 using m2 tags(2, 'abc'); +sql select count(*) from tm0, tm2 where tm0.ts=tm2.ts; +sql select count(*) from m1, m2 where m1.ts=m2.ts and m1.b=m2.a +sql drop database ux1; + system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/general/parser/limit2_query.sim b/tests/script/general/parser/limit2_query.sim index 1dbd0f32bd..8294247a86 100644 --- a/tests/script/general/parser/limit2_query.sim +++ b/tests/script/general/parser/limit2_query.sim @@ -327,3 +327,7 @@ endi if $data98 != 9 then return -1 endi + +#add one more test case +sql select max(c1), last(c8) from lm2_db0.lm2_tb0 where ts >= 1537146000000 and ts <= 1543145400000 interval(5m) fill(linear) limit 10 offset 4089;" + diff --git a/tests/script/general/parser/select_with_tags.sim b/tests/script/general/parser/select_with_tags.sim index 329787b69c..9f944a586b 100644 --- a/tests/script/general/parser/select_with_tags.sim +++ b/tests/script/general/parser/select_with_tags.sim @@ -636,6 +636,15 @@ if $data00 != $data01 then return -1 endi +sql select first(ts), ts from select_tags_tb1 +if $row != 1 then + return -1 +endi + +if $data01 != @70-01-01 08:01:50.001@ then + return -1 +endi + print ======= selectivity + tags + group by + tags + filter + interval ================ sql select first(c1), t2, t1, tbname from select_tags_mt0 where c1<=2 interval(1d) group by tbname; if $row != 3 then diff --git a/tests/script/general/parser/sliding.sim b/tests/script/general/parser/sliding.sim new file mode 100644 index 0000000000..177c95651f --- /dev/null +++ b/tests/script/general/parser/sliding.sim @@ -0,0 +1,461 @@ +system sh/stop_dnodes.sh + +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c walLevel -v 0 +system sh/cfg.sh -n dnode1 -c debugFlag -v 135 +system sh/cfg.sh -n dnode1 -c rpcDebugFlag -v 135 +system sh/exec.sh -n dnode1 -s start +sleep 1000 +sql connect + +$dbPrefix = sliding_db +$tbPrefix = sliding_tb +$mtPrefix = sliding_mt +$tbNum = 8 +$rowNum = 10000 +$totalNum = $tbNum * $rowNum + +print =============== sliding.sim +$i = 0 +$db = $dbPrefix . $i +$mt = $mtPrefix . $i + +$tstart = 946656000000 + +$i = 0 +$db = $dbPrefix . $i +$mt = $mtPrefix . $i + +sql drop database if exits $db -x step1 +step1: +sql create database if not exists $db tables 4 keep 36500 +sql use $db +sql create table $mt (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int, t2 binary(12)) + +$i = 0 +while $i < $tbNum + $tb = $tbPrefix . $i + $tg2 = ' . abc + $tg2 = $tg2 . ' + sql create table $tb using $mt tags( $i , $tg2 ) + + $x = 0 + while $x < $rowNum + $ms = $x . m + $c = $x / 100 + $c = $c * 100 + $c = $x - $c + + $binary = ' . binary + $binary = $binary . $c + $binary = $binary . ' + + $nchar = ' . nchar + $nchar = $nchar . $c + $nchar = $nchar . ' + + sql insert into $tb values ($tstart , $c , $c , $c , $c , $c , $c , $c , $binary , $nchar ) + $tstart = $tstart + 30 + $x = $x + 1 + endw + + $i = $i + 1 + $tstart = 946656000000 +endw + +sleep 100 + +$i1 = 1 +$i2 = 0 + +$db = $dbPrefix . $i +$mt = $mtPrefix . $i + +$dbPrefix = sliding_db +$tbPrefix = sliding_tb +$mtPrefix = sliding_mt + +$tb1 = $tbPrefix . $i1 +$tb2 = $tbPrefix . $i2 +$ts1 = $tb1 . .ts +$ts2 = $tb2 . .ts + +print ===============================interval_sliding query +sql select count(*) from sliding_tb0 interval(30s) sliding(30s); +if $row != 10 then + return -1 +endi + +if $data00 != @00-01-01 00:00:00.000@ then + return -1 +endi + +if $data01 != 1000 then + return -1 +endi + +if $data10 != @00-01-01 00:00:30.000@ then + return -1 +endi + +if $data11 != 1000 then + return -1 +endi + +sql select stddev(c1) from sliding_tb0 interval(10a) sliding(10a) +if $row != 10000 then + return -1 +endi + +if $data00 != @00-01-01 00:00:00.000@ then + return -1 +endi + +if $data01 != 0.000000000 then + return -1 +endi + +if $data90 != @00-01-01 00:00:00.270@ then + return -1 +endi + +if $data91 != 0.000000000 then + return -1 +endi + +sql select stddev(c1),count(c2),first(c3),last(c4) from sliding_tb0 interval(10a) sliding(10a) order by ts desc; +if $row != 10000 then + return -1 +endi + +if $data00 != @00-01-01 00:04:59.970@ then + return -1 +endi + +if $data01 != 0.000000000 then + return -1 +endi + +if $data02 != 1 then + return -1 +endi + +if $data03 != 99 then + return -1 +endi + +if $data04 != 99 then + return -1 +endi + +if $data90 != @00-01-01 00:04:59.700@ then + return -1 +endi + +if $data91 != 0.000000000 then + return -1 +endi + +if $data92 != 1 then + return -1 +endi + +if $data93 != 90 then + return -1 +endi + +if $data94 != 90 then + return -1 +endi + +sql select count(c2),last(c4) from sliding_tb0 interval(30s) sliding(10s) order by ts asc; +if $row != 30 then + return -1 +endi + +if $data00 != @00-01-01 00:00:00.000@ then + return -1 +endi + +if $data01 != 1000 then + return -1 +endi + +if $data02 != 99 then + return -1 +endi + +sql select count(c2),stddev(c3),first(c4),last(c4) from sliding_tb0 where ts>'2000-01-01 0:0:0' and ts<'2000-1-1 0:0:31' interval(30s) sliding(30s) order by ts asc; +if $row != 2 then + return -1 +endi + +if $data04 != 99 then + return -1 +endi + +if $data01 != 999 then + return -1 +endi + +if $data02 != 28.837977152 then + return -1 +endi + +#interval offset + limit +sql select count(c2), first(c3),stddev(c4) from sliding_tb0 interval(10a) sliding(10a) order by ts desc limit 10 offset 990; +if $row != 10 then + return -1 +endi + +if $data00 != @00-01-01 00:04:30.270@ then + return -1 +endi + +if $data01 != 1 then + return -1 +endi + +if $data02 != 9 then + return -1 +endi + +if $data03 != 0.000000000 then + return -1 +endi + +if $data90 != @00-01-01 00:04:30.000@ then + return -1 +endi + +if $data91 != 1 then + return -1 +endi + +if $data92 != 0 then + return -1 +endi + +if $data93 != 0.000000000 then + return -1 +endi + +#interval offset test +sql select count(c2),last(c4),stddev(c3) from sliding_tb0 interval(30s) sliding(30s) order by ts asc limit 1000 offset 1; +if $row != 9 then + return -1 +endi + +if $data00 != @00-01-01 00:00:30.000@ then + return -1 +endi + +if $data01 != 1000 then + return -1 +endi + +if $data02 != 99 then + return -1 +endi + +if $data80 != @00-01-01 00:04:30.000@ then + return -1 +endi + +if $data81 != 1000 then + return -1 +endi + +sql select count(c2),last(c4),stddev(c3) from sliding_tb0 where ts>'2000-1-1 0:0:0' and ts<'2000-1-1 0:0:31' interval(30s) sliding(30s) order by ts asc limit 1000 offset 0; +if $row != 2 then + return -1 +endi + +if $data00 != @00-01-01 00:00:00.000@ then + return -1 +endi + +if $data01 != 999 then + return -1 +endi + +if $data02 != 99 then + return -1 +endi + +if $data03 != 28.837977152 then + return -1 +endi + +if $data10 != @00-01-01 00:00:30.000@ then + return -1 +endi + +if $data11 != 34 then + return -1 +endi + +if $data12 != 33 then + return -1 +endi + +if $data13 != 9.810708435 then + return -1 +endi + +sql select count(c2),last(c4),stddev(c3) from sliding_tb0 interval(30s) sliding(20s) order by ts asc limit 100 offset 1; +if $row != 14 then + return -1 +endi + +if $data00 != @00-01-01 00:00:20.000@ then + return -1 +endi + +if $data01 != 1000 then + return -1 +endi + +if $data02 != 66 then + return -1 +endi + +if $data03 != 28.866070048 then + return -1 +endi + +if $data90 != @00-01-01 00:03:20.000@ then + return -1 +endi + +if $data91 != 1000 then + return -1 +endi + +if $data92 != 66 then + return -1 +endi + +sql select count(c2),last(c4),stddev(c3) from sliding_tb0 interval(30s) sliding(20s) order by ts asc limit 100 offset 14; +if $row != 1 then + return -1 +endi + +sql select count(c2),last(c4),stddev(c3),spread(c3) from sliding_tb0 where c2 = 0 interval(30s) order by ts desc; +if $row != 10 then + return -1 +endi + +#00-01-01 00:04:30.000| 10| 0| 0.000000000| 0.000000000| +if $data00 != @00-01-01 00:04:30.000@ then + return -1 +endi + +if $data01 != 10 then + return -1 +endi + +if $data02 != 0 then + return -1 +endi + +if $data03 != 0.000000000 then + return -1 +endi + +sql select count(c2),last(c4),stddev(c3),spread(c3) from sliding_tb0 where c2 = 0 interval(30s) sliding(20s) order by ts desc limit 1 offset 15; +if $row != 0 then + return -1 +endi + +sql select count(*),stddev(c1),count(c1),first(c2),last(c3) from sliding_tb0 where ts>'2000-1-1 00:00:00' and ts<'2000-1-1 00:00:01.002' and c2 >= 0 interval(30s) sliding(10a) order by ts asc limit 1000; +if $row != 100 then + return -1 +endi + +if $data00 != @00-01-01 00:00:00.000@ then + return -1 +endi + +if $data02 != 9.521904571 then + return -1 +endi + +if $data05 != 33 then + return -1 +endi + +if $data10 != @00-01-01 00:00:00.010@ then + return -1 +endi + +if $data12 != 9.521904571 then + return -1 +endi + +if $data15 != 33 then + return -1 +endi + +if $data95 != 33 then + return -1 +endi + +sql select count(*),stddev(c1),count(c1),first(c2),last(c3) from sliding_tb0 where ts>'2000-1-1 00:00:00' and ts<'2000-1-1 00:00:01.002' and c2 >= 0 interval(30s) sliding(10a) order by ts desc limit 1000; +if $row != 100 then + return -1 +endi + +if $data00 != @00-01-01 00:00:00.990@ then + return -1 +endi + +if $data01 != 1 then + return -1 +endi + +if $data02 != 0.000000000 then + return -1 +endi + +if $data03 != 1 then + return -1 +endi + +if $data90 != @00-01-01 00:00:00.900@ then + return -1 +endi + +if $data91 != 4 then + return -1 +endi + +if $data92 != 1.118033989 then + return -1 +endi + +if $data93 != 4 then + return -1 +endi + +if $data94 != 30.00000 then + return -1 +endi + +print check boundary check crash at client side +sql select count(*) from sliding_mt0 where ts>now and ts < now-1h; + +print ========================query on super table + +print ========================error case +sql_error select sum(c1) from sliding_tb0 interval(1a) sliding(1a); +sql_error select sum(c1) from sliding_tb0 interval(10a) sliding(12a); +sql_error select sum(c1) from sliding_tb0 sliding(1n) interval(1y); +sql_error select sum(c1) from sliding_tb0 interval(-1y) sliding(1n); +sql_error select sum(c1) from sliding_tb0 interval(1y) sliding(-1n); +sql_error select sum(c1) from sliding_tb0 interval(0) sliding(0); +sql_error select sum(c1) from sliding_tb0 interval(0m) sliding(0m); +sql_error select sum(c1) from sliding_tb0 interval(m) sliding(m); +sql_error select sum(c1) from sliding_tb0 sliding(4m); + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/parser/slimit_alter_tags.sim b/tests/script/general/parser/slimit_alter_tags.sim index e8e81e8809..fb48dddba7 100644 --- a/tests/script/general/parser/slimit_alter_tags.sim +++ b/tests/script/general/parser/slimit_alter_tags.sim @@ -141,9 +141,9 @@ $res = 3 * $rowNum if $data00 != $res then return -1 endi -if $data01 != @18-09-17 09:00:00.000@ then - return -1 -endi +#if $data01 != @18-09-17 09:00:00.000@ then +# return -1 +#endi if $data02 != 3 then return -1 endi @@ -154,9 +154,9 @@ $res = 3 * $rowNum if $data10 != $res then return -1 endi -if $data11 != @18-09-17 09:00:00.000@ then - return -1 -endi +#if $data11 != @18-09-17 09:00:00.000@ then +# return -1 +#endi if $data15 != 2 then return -1 endi @@ -223,9 +223,9 @@ $res = 3 * $rowNum if $data00 != $res then return -1 endi -if $data01 != @18-09-17 09:00:00.000@ then - return -1 -endi +#if $data01 != @18-09-17 09:00:00.000@ then +# return -1 +#endi if $data02 != 3 then return -1 endi @@ -236,9 +236,9 @@ $res = 3 * $rowNum if $data10 != $res then return -1 endi -if $data11 != @18-09-17 09:00:00.000@ then - return -1 -endi +#if $data11 != @18-09-17 09:00:00.000@ then +# return -1 +#endi if $data15 != 2 then return -1 endi diff --git a/tests/script/general/parser/stream.sim b/tests/script/general/parser/stream.sim deleted file mode 100644 index 2f233b6189..0000000000 --- a/tests/script/general/parser/stream.sim +++ /dev/null @@ -1,212 +0,0 @@ -system sh/stop_dnodes.sh - -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 0 -system sh/cfg.sh -n dnode1 -c tableMetaKeepTimer -v 5 -system sh/exec.sh -n dnode1 -s start -sleep 3000 -sql connect -print ======================== stream.sim -sleep 2000 -$db = strm_db -$tb = tb -$mt = mt -$strm = strm -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== set up DB -$i = 0 - -sql drop database if exists $db -sql create database $db -sql use $db - - -## [TBASE300] -print ====== TBASE-300 -sql create table mt (ts timestamp, c1 int, c2 int) tags(t1 int) -sql create table tb1 using mt tags(1) -sql create table tb2 using mt tags(2) -sql create table strm as select count(*), avg(c1), sum(c2), max(c1), min(c2),first(c1), last(c2) from mt interval(4s) sliding(2s) -sleep 10000 -sql insert into tb2 values(now,1,1) -sql insert into tb1 values(now,1,1) -sleep 4000 -sql select * from mt -sql select * from strm -sql drop table tb1 -sleep 100000 -sql select * from strm -if $rows != 2 then - if $rows != 1 then - return -1 - endi -endi -sql drop table tb2 -sql drop table mt -sql drop table strm - -## [TBASE304] -print ====== TBASE-304 -sleep 10000 -# we cannot reset query cache in server side, as a workaround, -# set super table name to mt304, need to change back to mt later -print create mt304 -sql create table mt304 (ts timestamp, c1 int) tags(t1 int, t2 int) -print create tb1 -sql create table tb1 using mt304 tags(1, 1) -print create tb2 -sql create table tb2 using mt304 tags(1, -1) -print create strm -sql create table strm as select count(*), avg(c1) from mt304 where t2 >= 0 interval(4s) sliding(2s) -sql insert into tb1 values (now,1) -sql insert into tb2 values (now,2) -sleep 100000 -sql select * from strm; -if $rows != 2 then - print ==== expect rows = 2, actually returned rows = $rows - return -1 -endi -if $data01 != 1 then - return -1 -endi -print data02 = $data02 -if $data02 != 1.000000000 then - return -1 -endi -sql alter table mt304 drop tag t2; -sql insert into tb2 values (now,2); -sql insert into tb1 values (now,1); -sql select * from strm; -sql alter table mt304 add tag t2 int; -sleep 10000 -sql select * from strm - -print ================= create a stream with a wildcard filter on tags of a STable -sql drop database $db -sql create database $db -sql use $db -sql create table stb (ts timestamp, c1 int, c2 binary(10)) tags(t1 binary(10)) -sql create table tb1 using stb tags('a1') -sql create table tb2 using stb tags('b2') -sql create table tb3 using stb tags('a3') -sql create table strm as select count(*), avg(c1), first(c2) from stb where t1 like 'a%' interval(4s) sliding(2s) -sleep 11000 -sql insert into tb1 values (now, 0, 'tb1') -sleep 4000 -sql insert into tb2 values (now, 2, 'tb2') -sleep 4000 -sql insert into tb3 values (now, 0, 'tb3') -sleep 60000 - -sql describe strm -if $rows == 0 then - return -1 -endi - -sql select * from strm -sleep 1000 -print ======== data0: $data00 $data01 $data02 $data03 -print ======== data1: $data10 $data11 $data12 $data13 -print ======== data2: $data20 $data21 $data22 $data23 -print ======== data3: $data30 $data31 $data32 $data33 -if $rows != 4 then - print ==== expect rows = 4, actually returned rows = $rows - return -1 -endi -if $data02 != 0.000000000 then - return -1 -endi -if $data03 == tb2 then - return -1 -endi -if $data13 == tb2 then - return -1 -endi -if $data23 == tb2 then - return -1 -endi -if $data33 == tb2 then - return -1 -endi - -## The vnode client needs to refresh metadata cache to allow strm calculate tb4's data. But the current refreshing frequency is every 10 min -## commented out the case below to save running time -sql create table tb4 using stb tags('a4') -sql insert into tb4 values(now, 4, 'tb4') -sleep 60000 -sql select * from strm order by ts desc -print ======== data0: $data00 $data01 $data02 $data03 -#print ======== data1: $data10 $data11 $data12 $data13 -#print ======== data2: $data20 $data21 $data22 $data23 -#print ======== data3: $data30 $data31 $data32 $data33 -if $rows != 6 then - print ==== expect rows = 6, actually returned rows = $rows - return -1 -endi -if $data02 != 4.000000000 then - return -1 -endi -if $data03 != tb4 then - return -1 -endi - -print =============== change tag values to see if stream still works correctly -sql alter table tb4 set tag t1='b4' -sleep 3000 # waiting for new tag valid -sql insert into tb1 values (now, 1, 'tb1_a1') -sleep 4000 -sql insert into tb4 values (now, -4, 'tb4_b4') -sleep 100000 -sql select * from strm order by ts desc -sleep 1000 -print ======== data0: $data00 $data01 $data02 $data03 -#print ======== data1: $data10 $data11 $data12 $data13 -#print ======== data2: $data20 $data21 $data22 $data23 -#print ======== data3: $data30 $data31 $data32 $data33 -if $rows != 8 then - print ==== expect rows = 8, actually returned rows = $rows - return -1 -endi -if $data02 != 1.000000000 then - return -1 -endi -if $data03 != tb1_a1 then - return -1 -endi - -sql drop database if exists $db -sql drop database if exists strm_db_0 -sql create database strm_db_0 -sql use strm_db_0 - -sql create table stb (ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 binary(15), c6 nchar(15), c7 bool) tags(t1 int, t2 binary(15)) -sql create table tb0 using stb tags(0, 'tb0') -sql create table tb1 using stb tags(1, 'tb1') -sql create table tb2 using stb tags(2, 'tb2') -sql create table tb3 using stb tags(3, 'tb3') -sql create table tb4 using stb tags(4, 'tb4') - -sql create table strm0 as select count(ts), count(c1), max(c2), min(c4), first(c5), last(c6) from stb where ts < now + 30s interval(4s) sliding(2s) -sleep 1000 -sql insert into tb0 values (now, 0, 0, 0, 0, 'binary0', '涛思0', true) tb1 values (now, 1, 1, 1, 1, 'binary1', '涛思1', false) tb2 values (now, 2, 2, 2, 2, 'binary2', '涛思2', true) tb3 values (now, 3, 3, 3, 3, 'binary3', '涛思3', false) tb4 values (now, 4, 4, 4, 4, 'binary4', '涛思4', true) -sleep 30000 -sql select * from strm0 order by ts desc -sleep 1000 -if $rows != 2 then - print ==== expect rows = 2, actually returned rows = $rows - return -1 -endi - -sql insert into tb0 values (now, 10, 10, 10, 10, 'binary0', '涛思0', true) tb1 values (now, 11, 11, 11, 11, 'binary1', '涛思1', false) tb2 values (now, 12, 12, 12, 12, 'binary2', '涛思2', true) tb3 values (now, 13, 13, 13, 13, 'binary3', '涛思3', false) tb4 values (now, 14, 14, 14, 14, 'binary4', '涛思4', true) -sleep 30000 -sql select * from strm0 order by ts desc -sleep 10000 -if $rows == 4 then - print ==== actually returned rows = $rows, expect always not equal to 4 - return -1 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/parser/tags_dynamically_specifiy.sim b/tests/script/general/parser/tags_dynamically_specifiy.sim index 6e1766a91d..0a5d5c9716 100644 --- a/tests/script/general/parser/tags_dynamically_specifiy.sim +++ b/tests/script/general/parser/tags_dynamically_specifiy.sim @@ -28,16 +28,27 @@ sql insert into tb3 using stb (t3) tags (3.3) values ( now + 3s, 'binary3', 3 sql insert into tb4 (ts, c1, c2) using stb (t1, t2) tags ('tag4', 4) values ( now + 4s, 'binary4', 4) sql insert into tb5 (ts, c1, c3) using stb (t1, t3) tags ('tag5', 11.11) values ( now + 5s, 'binary5', 5.5) - +sql insert into tb6 (ts, c1, c3) using stb tags ('tag5', 6, 11.11) values ( now + 5s, 'binary6', 6.6) +sql insert into tb7 (ts, c1, c2, c3) using stb tags ('tag5', 7, 11.11) values ( now + 5s, 'binary7', 7, 7.7) sql select * from stb order by ts asc -if $rows != 5 then +if $rows != 7 then return -1 endi +sql_error insert into tb11 using stb (t1) tags () values ( now + 1s, 'binary1', 1, 1.1) +sql_error insert into tb12 using stb (t1, t3) tags () values ( now + 1s, 'binary1', 1, 1.1) +sql_error insert into tb13 using stb (t1, t2, t3) tags (8, 9.13, 'ac') values ( now + 1s, 'binary1', 1, 1.1) +sql_error insert into tb14 using stb () tags (2) values ( now + 2s, 'binary2', 2, 2.2) +sql_error insert into tb15 using stb (t2, t3) tags (3.3) values ( now + 3s, 'binary3', 3, 3.3) +sql_error insert into tb16 (ts, c1, c2) using stb (t1, t2) tags ('tag4', 4) values ( now + 4s, 'binary4') +sql_error insert into tb17 (ts, c1, c3) using stb (t1, t3) tags ('tag5', 11.11, 5) values ( now + 5s, 'binary5', 5.5) +sql_error insert into tb18 (ts, c1, c3) using stb tags ('tag5', 16) values ( now + 5s, 'binary6', 6.6) +sql_error insert into tb19 (ts, c1, c2, c3) using stb tags (19, 'tag5', 91.11) values ( now + 5s, 'binary7', 7, 7.7) + sql create table stbx (ts timestamp, c1 binary(10), c2 int, c3 float) tags (t1 binary(10), t2 int, t3 float) -sql insert into tb100 (ts, c1, c2, c3) using stbx (t1, t2, t3) tags ('tag100', 100, 100.9) values ( now + 10s, 'binary100', 100, 100.9) tb101 (ts, c1, c2, c3) using stbx (t1, t2, t3) tags ('tag101', 101, 101.9) values ( now + 10s, 'binary101', 101, 101.9) tb102 (ts, c1, c2, c3) using stbx (t1, t2, t3) tags ('tag102', 102, 102.9) values ( now + 10s, 'binary102', 102, 102.9) +sql insert into tb100 (ts, c1, c2, c3) using stbx (t1, t2, t3) tags ('tag100', 100, 100.123456) values ( now + 10s, 'binary100', 100, 100.9) tb101 (ts, c1, c2, c3) using stbx (t1, t2, t3) tags ('tag101', 101, 101.9) values ( now + 10s, 'binary101', 101, 101.9) tb102 (ts, c1, c2, c3) using stbx (t1, t2, t3) tags ('tag102', 102, 102.9) values ( now + 10s, 'binary102', 102, 102.9) sql select * from stbx if $rows != 3 then @@ -52,9 +63,38 @@ if $data05 != 100 then return -1 endi -#if $data06 != 100.90000 then -# print "expect: 100.90000, act: $data06" -# return -1 -#endi +if $data06 != 100.12346 then + print "expect: 100.12346, act: $data06" + return -1 +endi + +sql create table stby (ts timestamp, c1 binary(10), c2 int, c3 float) tags (t1 binary(10), t2 int, t3 float) +sql reset query cache +sql insert into tby1 using stby (t1) tags ('tag1') values ( now + 1s, 'binary1', 1, 1.1) +sql insert into tby2 using stby (t2) tags (2) values ( now + 2s, 'binary2', 2, 2.2) +sql insert into tby3 using stby (t3) tags (3.3) values ( now + 3s, 'binary3', 3, 3.3) +sql insert into tby4 (ts, c1, c2) using stby (t1, t2) tags ('tag4', 4) values ( now + 4s, 'binary4', 4) +sql insert into tby5 (ts, c1, c3) using stby (t1, t3) tags ('tag5', 11.11) values ( now + 5s, 'binary5', 5.5) +sql insert into tby6 (ts, c1, c3) using stby tags ('tag5', 6, 11.11) values ( now + 5s, 'binary6', 6.6) +sql insert into tby7 (ts, c1, c2, c3) using stby tags ('tag5', 7, 11.11) values ( now + 5s, 'binary7', 7, 7.7) +sql select * from stby order by ts asc +if $rows != 7 then + return -1 +endi + +sql reset query cache +sql insert into tby1 using stby (t1) tags ('tag1') values ( now + 1s, 'binary1y', 1, 1.1) +sql insert into tby2 using stby (t2) tags (2) values ( now + 2s, 'binary2y', 2, 2.2) +sql insert into tby3 using stby (t3) tags (3.3) values ( now + 3s, 'binary3y', 3, 3.3) +sql insert into tby4 (ts, c1, c2) using stby (t1, t2) tags ('tag4', 4) values ( now + 4s, 'binary4y', 4) +sql insert into tby5 (ts, c1, c3) using stby (t1, t3) tags ('tag5', 11.11) values ( now + 5s, 'binary5y', 5.5) +sql insert into tby6 (ts, c1, c3) using stby tags ('tag5', 6, 11.11) values ( now + 5s, 'binary6y', 6.6) +sql insert into tby7 (ts, c1, c2, c3) using stby tags ('tag5', 7, 11.11) values ( now + 5s, 'binary7y', 7, 7.7) + +sql select * from stby order by ts asc +if $rows != 14 then + return -1 +endi + system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/general/parser/tags_filter.sim b/tests/script/general/parser/tags_filter.sim index 42e8c8f9f9..9842b4fda6 100644 --- a/tests/script/general/parser/tags_filter.sim +++ b/tests/script/general/parser/tags_filter.sim @@ -23,7 +23,7 @@ sql create table stb (ts timestamp, c1 int) tags (t1 binary(10)) sql create table tb1 using stb tags('*') sql create table tb2 using stb tags('%') sql create table tb3 using stb tags('') -sql create table tb4 using stb tags('/'') +sql create table tb4 using stb tags('\'') sql insert into tb1 values ( $ts0 , 1) sql insert into tb2 values ( $ts0 , 2) @@ -54,7 +54,7 @@ if $data01 != 3 then return -1 endi -sql select * from stb where t1 = '/'' +sql select * from stb where t1 = '\'' if $rows != 1 then return -1 endi @@ -70,9 +70,8 @@ if $data01 != 1 then return -1 endi -sql_error select * from stb where t1 > 1 -sql_error select * from stb where t1 > '1' -sql_error select * from stb where t1 > 'a' +sql select * from stb where t1 > '1' +sql select * from stb where t1 > 'a' ## wildcard '%' #sql select * from stb where t1 like '%' @@ -91,7 +90,7 @@ if $data01 != 3 then return -1 endi -sql select * from stb where t1 like '/'' +sql select * from stb where t1 like '\'' if $rows != 1 then return -1 endi @@ -104,3 +103,50 @@ sql show databases if $rows != 0 then return -1 endi + +print ============tbase-1328 + +sql drop database if exists testselectwheretags; +sql CREATE DATABASE IF NOT EXISTS testselectwheretags; +sql USE testselectwheretags; +sql CREATE TABLE IF NOT EXISTS st1 (ts TIMESTAMP, v1 INT, v2 FLOAT, v3 BOOL) TAGS (farm NCHAR(2), period NCHAR(2), line NCHAR(2), unit INT); +sql CREATE TABLE IF NOT EXISTS a01 USING st1 TAGS ('2', 'c', '2', 2); +sql CREATE TABLE IF NOT EXISTS a02 USING st1 TAGS ('1', 'c', 'a', 1); +sql CREATE TABLE IF NOT EXISTS a03 USING st1 TAGS ('1', 'c', '02', 1); +sql INSERT INTO a01 VALUES (1574872693209, 3, 3.000000, 1); +sql INSERT INTO a02 VALUES (1574872683933, 2, 2.000000, 1); +sql INSERT INTO a03 VALUES (1574872683933, 2, 2.000000, 1); + +sql select * from st1 where line='02'; +if $rows != 1 then + return -1 +endi + +sql CREATE TABLE IF NOT EXISTS st2 (ts TIMESTAMP, v1 INT, v2 FLOAT) TAGS (farm BINARY(2), period BINARY(2), line BINARY(2)); + +sql CREATE TABLE IF NOT EXISTS b01 USING st2 TAGS ('01', '01', '01'); +sql CREATE TABLE IF NOT EXISTS b02 USING st2 TAGS ('01', '01', '01'); +sql CREATE TABLE IF NOT EXISTS b03 USING st2 TAGS ('01', '02', '01'); +sql CREATE TABLE IF NOT EXISTS b04 USING st2 TAGS ('01', '01', '02'); + +sql INSERT INTO b03 VALUES (1576043322749, 3, 3.000000); +sql INSERT INTO b03 VALUES (1576043323596, 3, 3.000000); + +sql INSERT INTO b02 VALUES (1576043315169, 2, 2.000000); +sql INSERT INTO b02 VALUES (1576043316295, 2, 2.000000); +sql INSERT INTO b02 VALUES (1576043317167, 2, 2.000000); + +sql INSERT INTO b01 VALUES (1576043305972, 1, 1.000000); +sql INSERT INTO b01 VALUES (1576043308513, 1, 1.000000); + +sql select * from st2 where period='02'; +if $rows != 2 then + return -1 +endi + +sql select sum(v2) from st2 group by farm,period,line; +if $rows != 2 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/general/parser/testSuite.sim b/tests/script/general/parser/testSuite.sim index 31acaad143..26a6a39815 100644 --- a/tests/script/general/parser/testSuite.sim +++ b/tests/script/general/parser/testSuite.sim @@ -1,3 +1,4 @@ +sleep 2000 run general/parser/alter.sim sleep 2000 run general/parser/alter1.sim @@ -7,7 +8,6 @@ sleep 2000 run general/parser/auto_create_tb.sim sleep 2000 run general/parser/auto_create_tb_drop_tb.sim - sleep 2000 run general/parser/col_arithmetic_operation.sim sleep 2000 @@ -23,77 +23,81 @@ run general/parser/create_tb.sim sleep 2000 run general/parser/dbtbnameValidate.sim sleep 2000 +run general/parser/fill.sim +sleep 2000 +run general/parser/fill_stb.sim +sleep 2000 +#run general/parser/fill_us.sim # +sleep 2000 +run general/parser/first_last.sim +sleep 2000 run general/parser/import_commit1.sim sleep 2000 run general/parser/import_commit2.sim sleep 2000 run general/parser/import_commit3.sim sleep 2000 -run general/parser/insert_tb.sim -sleep 2000 -run general/parser/first_last.sim -sleep 2000 #run general/parser/import_file.sim sleep 2000 -run general/parser/lastrow.sim -sleep 2000 -run general/parser/nchar.sim -sleep 2000 -#run general/parser/null_char.sim -sleep 2000 -run general/parser/single_row_in_tb.sim -sleep 2000 -run general/parser/select_from_cache_disk.sim -sleep 2000 -run general/parser/selectResNum.sim -sleep 2000 -run general/parser/mixed_blocks.sim -sleep 2000 -run general/parser/limit1.sim -sleep 2000 -run general/parser/limit.sim -sleep 2000 -run general/parser/limit1_tblocks100.sim -sleep 2000 -run general/parser/select_across_vnodes.sim -sleep 2000 -run general/parser/slimit1.sim -sleep 2000 -run general/parser/tbnameIn.sim -sleep 2000 -run general/parser/projection_limit_offset.sim -sleep 2000 -run general/parser/limit2.sim -sleep 2000 -run general/parser/fill.sim -sleep 2000 -run general/parser/fill_stb.sim -sleep 2000 -run general/parser/where.sim -sleep 2000 -run general/parser/slimit.sim -sleep 2000 -run general/parser/select_with_tags.sim -sleep 2000 -run general/parser/interp.sim +run general/parser/insert_tb.sim sleep 2000 run general/parser/tags_dynamically_specifiy.sim sleep 2000 -run general/parser/groupby.sim +run general/parser/interp.sim +sleep 2000 +run general/parser/lastrow.sim +sleep 2000 +run general/parser/limit.sim +sleep 2000 +run general/parser/limit1.sim +sleep 2000 +run general/parser/limit1_tblocks100.sim +sleep 2000 +run general/parser/limit2.sim +sleep 2000 +run general/parser/mixed_blocks.sim +sleep 2000 +run general/parser/nchar.sim +sleep 2000 +run general/parser/null_char.sim +sleep 2000 +run general/parser/selectResNum.sim +sleep 2000 +run general/parser/select_across_vnodes.sim +sleep 2000 +run general/parser/select_from_cache_disk.sim sleep 2000 run general/parser/set_tag_vals.sim sleep 2000 +run general/parser/single_row_in_tb.sim +sleep 2000 +run general/parser/slimit.sim +sleep 2000 +run general/parser/slimit1.sim +sleep 2000 +run general/parser/slimit_alter_tags.sim +sleep 2000 +run general/parser/tbnameIn.sim +sleep 2000 run general/parser/slimit_alter_tags.sim # persistent failed sleep 2000 run general/parser/join.sim sleep 2000 run general/parser/join_multivnode.sim sleep 2000 -run general/parser/repeatAlter.sim +run general/parser/projection_limit_offset.sim sleep 2000 -run general/parser/binary_escapeCharacter.sim +run general/parser/select_with_tags.sim sleep 2000 -run general/parser/bug.sim +run general/parser/groupby.sim +sleep 2000 +run general/parser/union.sim +sleep 2000 +run general/parser/sliding.sim +sleep 2000 +run general/parser/fill_us.sim +sleep 2000 +run general/parser/tags_filter.sim #sleep 2000 #run general/parser/repeatStream.sim diff --git a/tests/script/general/parser/union.sim b/tests/script/general/parser/union.sim new file mode 100644 index 0000000000..14b6c97b7c --- /dev/null +++ b/tests/script/general/parser/union.sim @@ -0,0 +1,411 @@ +system sh/stop_dnodes.sh + +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c walLevel -v 0 +system sh/cfg.sh -n dnode1 -c debugFlag -v 135 +system sh/cfg.sh -n dnode1 -c rpcDebugFlag -v 135 +system sh/exec.sh -n dnode1 -s start +sleep 1000 +sql connect + +$dbPrefix = union_db +$tbPrefix = union_tb +$tbPrefix1 = union_tb_ +$mtPrefix = union_mt +$tbNum = 10 +$rowNum = 10000 +$totalNum = $tbNum * $rowNum + +print =============== union.sim +$i = 0 +$db = $dbPrefix . $i +$mt = $mtPrefix . $i + +$j = 1 + +$mt1 = $mtPrefix . $j + +sql drop database if exits $db -x step1 +step1: +sql create database if not exists $db maxtables 4 +sql use $db +sql create table $mt (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int) + +$i = 0 +$t = 1578203484000 + +while $i < $tbNum + $tb = $tbPrefix . $i + sql create table $tb using $mt tags( $i ) + + $x = 0 + while $x < $rowNum + $ms = $x * 1000 + $ms = $ms * 60 + + $c = $x / 100 + $c = $c * 100 + $c = $x - $c + $binary = 'binary . $c + $binary = $binary . ' + $nchar = 'nchar . $c + $nchar = $nchar . ' + + $t1 = $t + $ms + sql insert into $tb values ($t1 , $c , $c , $c , $c , $c , $c , $c , $binary , $nchar ) + $x = $x + 1 + endw + + $i = $i + 1 +endw + +sql create table $mt1 (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int) + +$j = 0 +$t = 1578203484000 +$rowNum = 1000 +$tbNum = 5 +$i = 0 + +while $i < $tbNum + $tb1 = $tbPrefix1 . $j + sql create table $tb1 using $mt1 tags( $i ) + + $x = 0 + while $x < $rowNum + $ms = $x * 1000 + $ms = $ms * 60 + + $c = $x / 100 + $c = $c * 100 + $c = $x - $c + $binary = 'binary . $c + $binary = $binary . ' + $nchar = 'nchar . $c + $nchar = $nchar . ' + + $t1 = $t + $ms + sql insert into $tb1 values ($t1 , $c , $c , $c , $c , $c , $c , $c , $binary , $nchar ) + $x = $x + 1 + endw + + $i = $i + 1 + $j = $j + 1 +endw + +print sleep 1sec. +sleep 1000 + +$i = 1 +$tb = $tbPrefix . $i + +## column type not identical +sql_error select count(*) as a from union_mt0 union all select avg(c1) as a from union_mt0 +sql_error select count(*) as a from union_mt0 union all select spread(c1) as a from union_mt0; + +## union not supported +sql_error (select count(*) from union_mt0) union (select count(*) from union_mt0); + +## column type not identical +sql_error select c1 from union_mt0 limit 10 union all select c2 from union_tb1 limit 20; + +## union not support recursively union +sql_error select c1 from union_tb0 limit 2 union all (select c1 from union_tb1 limit 1 union all select c1 from union_tb3 limit 2); +sql_error (select c1 from union_tb0 limit 1 union all select c1 from union_tb1 limit 1) union all (select c1 from union_tb0 limit 10 union all select c1 from union_tb1 limit 10); + +# union as subclause +sql_error (select c1 from union_tb0 limit 1 union all select c1 from union_tb1 limit 1) limit 1 + +# sql with parenthese +sql (((select c1 from union_tb0))) +if $rows != 10000 then + return -1 +endi + +if $data00 != 0 then + return -1 +endi + +if $data10 != 1 then + return -1 +endi + +# mixed order +sql select ts, c1 from union_tb1 order by ts asc limit 10 union all select ts, c1 from union_tb0 order by ts desc limit 2 union all select ts, c1 from union_tb2 order by ts asc limit 10 +if $rows != 22 then + return -1 +endi + +if $data00 != @20-01-05 13:51:24.000@ then + return -1 +endi + +if $data01 != 0 then + return -1 +endi + +if $data10 != @20-01-05 13:52:24.000@ then + return -1 +endi + +if $data11 != 1 then + return -1 +endi + +if $data90 != @20-01-05 14:00:24.000@ then + return -1 +endi + +if $data91 != 9 then + return -1 +endi + +# different sort order + +# super table & normal table mixed up +sql select c3 from union_tb0 limit 2 union all select sum(c1) as c3 from union_mt0; +if $rows != 3 then + return -1 +endi + +if $data00 != 0 then + return -1 +endi + +if $data10 != 1 then + return -1 +endi + +if $data20 != 4950000 then + return -1 +endi + +# type compatible +sql select c3 from union_tb0 limit 2 union all select sum(c1) as c3 from union_tb1; +if $rows != 3 then + return -1 +endi + +if $data00 != 0 then + return -1 +endi + +if $data10 != 1 then + return -1 +endi + +if $data20 != 495000 then + return -1 +endi + +# two join subclause +sql select count(*) as c from union_tb0, union_tb1 where union_tb0.ts=union_tb1.ts union all select union_tb0.c3 as c from union_tb0, union_tb1 where union_tb0.ts=union_tb1.ts limit 10 +if $rows != 11 then + return -1 +endi + +if $data00 != 10000 then + return -1 +endi + +if $data10 != 0 then + return -1 +endi + +if $data20 != 1 then + return -1 +endi + +if $data90 != 8 then + return -1 +endi + +print ===========================================tags union +# two super table tag union, limit is not active during retrieve tags query +sql select t1 from union_mt0 union all select t1 from union_mt0 limit 1 +if $rows != 20 then + return -1 +endi + +if $data00 != 0 then + return -1 +endi + +if $data90 != 9 then + return -1 +endi + +#========================================== two super table join subclause +print ================two super table join subclause +sql select avg(union_mt0.c1) as c from union_mt0 interval(1h) limit 10 union all select union_mt1.ts, union_mt1.c1/1.0 as c from union_mt0, union_mt1 where union_mt1.ts=union_mt0.ts and union_mt1.t1=union_mt0.t1 limit 5; +print the rows value is: $rows + +if $rows != 15 then + return -1 +endi + +# first subclause are empty +sql select count(*) as c from union_tb0 where ts>now+10y union all select sum(c1) as c from union_tb1; +if $rows != 1 then + return -1 +endi + +if $data00 != 495000 then + return -1 +endi + +# all subclause are empty +sql select c1 from union_tb0 limit 0 union all select c1 from union_tb1 where ts>'2021-1-1 0:0:0' +if $rows != 0 then + return -1 +endi + +# middle subclause empty +sql select c1 from union_tb0 limit 1 union all select c1 from union_tb1 where ts>'2030-1-1 0:0:0' union all select last(c1) as c1 from union_tb1; +if $rows != 2 then + return -1 +endi + +if $data00 != 0 then + return -1 +endi + +if $data10 != 99 then + return -1 +endi + +# multi-vnode projection query +sql (select c1 from union_mt0) union all select c1 from union_mt0; +if $rows != 200000 then + return -1 +endi + +# multi-vnode projection query + limit +sql (select ts, c1 from union_mt0 limit 1) union all (select ts, c1 from union_mt0 limit 1); +if $rows != 2 then + return -1 +endi + +if $data00 != @20-01-05 13:51:24.000@ then + return -1 +endi + +if $data01 != 0 then + return -1 +endi + +if $data10 != @20-01-05 13:51:24.000@ then + return -1 +endi + +if $data11 != 0 then + return -1 +endi + +# two aggregated functions for super tables +sql select sum(c1) as a from union_mt0 interval(1s) limit 9 union all select ts, max(c3) as a from union_mt0 limit 2; +if $rows != 10 then + return -1 +endi + +if $data00 != @20-01-05 13:51:24.000@ then + return -1 +endi + +if $data01 != 0 then + return -1 +endi + +if $data10 != @20-01-05 13:52:24.000@ then + return -1 +endi + +if $data11 != 10 then + return -1 +endi + +if $data20 != @20-01-05 13:53:24.000@ then + return -1 +endi + +if $data21 != 20 then + return -1 +endi + +if $data90 != @20-01-05 15:30:24.000@ then + return -1 +endi + +if $data91 != 99 then + return -1 +endi + +#1111111111111111111111111111111111111111111111111 +# two aggregated functions for normal tables +sql select sum(c1) as a from union_tb0 limit 1 union all select sum(c3) as a from union_tb1 limit 2; +if $rows != 2 then + return -1 +endi + +if $data00 != 495000 then + return -1 +endi + +if $data10 != 495000 then + return -1 +endi + +# two super table query + interval + limit +sql select ts, first(c3) as a from union_mt0 limit 1 union all select sum(c3) as a from union_mt0 interval(1h) limit 1; +if $rows != 2 then + return -1 +endi + +if $data00 != @20-01-05 13:51:24.000@ then + return -1 +endi + +if $data01 != 0 then + return -1 +endi + +if $data10 != @20-01-05 13:00:00.000@ then + return -1 +endi + +if $data11 != 360 then + return -1 +endi + +sql select server_status() union all select server_status() +if $rows != 2 then + return -1 +endi + +if $data00 != 1 then + return -1 +endi + +if $data10 != 1 then + return -1 +endi + +sql select client_version() union all select server_version() +if $rows != 2 then + return -1 +endi + +sql select database() union all select database() +if $rows != 2 then + return -1 +endi + +if $data00 != @union_db0@ then + return -1 +endi + +if $data10 != @union_db0@ then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/general/parser/where.sim b/tests/script/general/parser/where.sim index 4d86b50f38..710156a4ff 100644 --- a/tests/script/general/parser/where.sim +++ b/tests/script/general/parser/where.sim @@ -277,4 +277,44 @@ if $rows != 2 then return -1 endi +print ==========tbase-1363 +#sql create table $mt (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int) + +$i = 0 +while $i < 1 + $tb = test_null_filter + sql create table $tb using $mt tags( $i ) + + $x = 0 + while $x < 10000 + $ms = $x . m + $c = $x / 100 + $c = $c * 100 + $c = $x - $c + $binary = 'binary . $c + $binary = $binary . ' + $nchar = 'nchar . $c + $nchar = $nchar . ' + sql insert into $tb values (now + $ms , null , null , null , null , null , null , null , null , null ) + $x = $x + 1 + endw + + $i = $i + 1 +endw + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +sleep 2000 + +system sh/exec.sh -n dnode1 -s start + +sql select * from wh_mt0 where c3 = 'abc' and tbname in ('test_null_filter'); +if $row != 0 then + return -1 +endi + +sql select * from wh_mt0 where c3 = 'abc' and tbname in ('test_null_filter'); +if $row != 0 then + return -1 +endi + system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/general/stream/metrics_1.sim b/tests/script/general/stream/metrics_1.sim deleted file mode 100644 index 94498cb925..0000000000 --- a/tests/script/general/stream/metrics_1.sim +++ /dev/null @@ -1,287 +0,0 @@ -system sh/stop_dnodes.sh - -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 0 -system sh/exec.sh -n dnode1 -s start - -sleep 3000 -sql connect - -print ======================== dnode1 start - -$dbPrefix = m1_db -$tbPrefix = m1_tb -$mtPrefix = m1_mt -$stPrefix = m1_st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql drop databae $db -x step1 -step1: -sql create database $db -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 c1 -$i = 0 -$mt = $mtPrefix . $i - -sql select count(*) from $mt interval(1d) -print ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c1 -sql create table $st as select count(*) from $mt interval(1d) - -print =============== step3 c2 -sql select count(tbcol) from $mt interval(1d) -print ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c2 -sql create table $st as select count(tbcol) from $mt interval(1d) - -print =============== step4 c3 -sql select count(tbcol2) from $mt interval(1d) -print ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c3 -sql create table $st as select count(tbcol2) from $mt interval(1d) - -print =============== step5 avg -sql select avg(tbcol) from $mt interval(1d) -print ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . av -print create table $st as select avg(tbcol) from $mt interval(1d) -sql create table $st as select avg(tbcol) from $mt interval(1d) - -print =============== step6 su -sql select sum(tbcol) from $mt interval(1d) -print ===> $data00 $data01 -if $data01 != 1900 then - return -1 -endi - -$st = $stPrefix . su -sql create table $st as select sum(tbcol) from $mt interval(1d) - -print =============== step7 mi -sql select min(tbcol) from $mt interval(1d) -print ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . mi -sql create table $st as select min(tbcol) from $mt interval(1d) - -print =============== step8 ma -sql select max(tbcol) from $mt interval(1d) -print ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . ma -sql create table $st as select max(tbcol) from $mt interval(1d) - -print =============== step9 fi -sql select first(tbcol) from $mt interval(1d) -print ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . fi -sql create table $st as select first(tbcol) from $mt interval(1d) - -print =============== step10 la -sql select last(tbcol) from $mt interval(1d) -print ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . la -sql create table $st as select last(tbcol) from $mt interval(1d) - -print =============== step11 st -sql select stddev(tbcol) from $mt interval(1d) -x step11 - return -1 -step11: - -print =============== step12 le -sql select leastsquares(tbcol, 1, 1) from $mt interval(1d) -x step12 - return -1 -step12: - -print =============== step13 -sql select top(tbcol, 1) from $mt interval(1d) - -print =============== step14 - -sql select bottom(tbcol, 1) from $mt interval(1d) - -print =============== step15 pe - -sql select percentile(tbcol, 1) from $mt interval(1d) -x step15 - return -1 -step15: - -print =============== step16 -sql select diff(tbcol) from $mt interval(1d) -x step16 - return -1 -step16: - -print =============== step17 wh -sql select count(tbcol) from $mt where ts < now + 4m interval(1d) -print ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . wh -#sql create table $st as select count(tbcol) from $mt where ts < now + 4m interval(1d) - -print =============== step18 as -sql select count(tbcol) from $mt interval(1d) -print ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . as -sql create table $st as select count(tbcol) as c from $mt interval(1d) - -print =============== step19 gb -sql select count(tbcol) from $mt interval(1d) group by tgcol -print ===> $data00 $data01 -if $data01 != 20 then - return -1 -endi - -print =============== step20 x -sql select count(tbcol) from $mt where ts < now + 4m interval(1d) group by tgcol -print ===> $data00 $data01 -if $data01 != 20 then - return -1 -endi - -print =============== step21 -print sleep 120 seconds -sleep 120000 - -print =============== step22 -$st = $stPrefix . c1 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c2 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c3 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . av -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . su -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 1900 then - return -1 -endi - -$st = $stPrefix . mi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . ma -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . fi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . la -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -#$st = $stPrefix . wh -#sql select * from $st -#print ===> select * from $st ===> $data00 $data01 -#if $data01 != 200 then -# return -1 -#endi - -$st = $stPrefix . as -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi diff --git a/tests/script/general/stream/new_stream.sim b/tests/script/general/stream/new_stream.sim deleted file mode 100644 index 001602079b..0000000000 --- a/tests/script/general/stream/new_stream.sim +++ /dev/null @@ -1,106 +0,0 @@ -system sh/stop_dnodes.sh - -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 0 -system sh/cfg.sh -n dnode1 -c tableMetaKeepTimer -v 10 -system sh/exec.sh -n dnode1 -s start - -sleep 3000 -sql connect -print ======================== dnode1 start - -$dbPrefix = ns_db -$tbPrefix = ns_tb -$mtPrefix = ns_mt -$stPrefix = ns_st -$tbNum = 5 -$rowNum = 200 -$totalNum = 200 - -print =============== step1 - -$i = 0 -$db = $dbPrefix -$mt = $mtPrefix -$st = $stPrefix - -sql create database $db -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = 0 - $y = 0 - while $y < $rowNum - $ms = $x . s - sql insert into $tb values (now + $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 - -sql select count(*), count(tbcol), count(tbcol2) from $mt interval(10s) -print $data00 $data01 $data02 $data03 - -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $mt interval(10s) - -print =============== step3 -print sleep 120 seconds -sleep 120000 - -print =============== step4 - -sql select * from $st -print $st ==> $rows1 $data00 $data01 $data02 $data03 -if $data03 >= 51 then - return -1 -endi - -print =============== step5 - -$tbNum = 10 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - if $i == 0 then - sleep 2000 - endi - - $x = 0 - $y = 0 - while $y < $rowNum - $ms = $x . s - sql insert into $tb values (now + $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -print =============== step6 -print sleep 120 seconds -sleep 120000 - -print =============== step7 - -sql select * from $st order by ts desc -print $st ==> $rows1 $data00 $data01 $data02 $data03 -if $data03 <= 51 then - return -1 -endi - - - - - diff --git a/tests/script/general/stream/stream_1.sim b/tests/script/general/stream/stream_1.sim deleted file mode 100644 index 958c877ee5..0000000000 --- a/tests/script/general/stream/stream_1.sim +++ /dev/null @@ -1,206 +0,0 @@ -system sh/stop_dnodes.sh - -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 0 -system sh/exec.sh -n dnode1 -s start - -sleep 3000 -sql connect - -print ======================== dnode1 start - -$dbPrefix = s1_db -$tbPrefix = s1_tb -$mtPrefix = s1_mt -$stPrefix = s1_st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql drop databae $db -x step1 -step1: -sql create database $db -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 -$i = 0 -$tb = $tbPrefix . $i -$st = $stPrefix . $i - -sql select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) -print select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) ===> $data00 $data01 $data02, $data03 -if $data01 != $rowNum then - return -1 -endi -if $data02 != $rowNum then - return -1 -endi -if $data03 != $rowNum then - return -1 -endi - -sql show tables -if $rows != 10 then - return -1 -endi - -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) - -sql show tables -if $rows != 11 then - return -1 -endi - -print =============== step3 -print sleep 120 seconds -sleep 120000 -sql select * from $st -print select * from $st => $data01 -if $data01 != 20 then - return -1 -endi -if $data02 != 20 then - return -1 -endi -if $data03 != 20 then - return -1 -endi - -print =============== step4 -sql drop table $st -sql show tables -if $rows != 10 then - return -1 -endi - -print =============== step5 -sql select * from $st -x step4 - return -1 -step4: - -print =============== step6 -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) -sql show tables -if $rows != 11 then - return -1 -endi - -print =============== step7 -print sleep 120 seconds -sleep 120000 -sql select * from $st -print select * from $st => $data01 -if $data01 != 20 then - return -1 -endi -if $data02 != 20 then - return -1 -endi -if $data03 != 20 then - return -1 -endi - -print =============== step8 -$i = 1 -$st = $stPrefix . $i - -sql select count(*), count(tbcol), count(tbcol2) from $mt interval(1d) -print select count(*), count(tbcol), count(tbcol2) from $mt interval(1d) ===> $data00 $data01 $data02, $data03 -if $data01 != 200 then - return -1 -endi -if $data02 != 200 then - return -1 -endi -if $data03 != 200 then - return -1 -endi - -sql show tables -if $rows != 11 then - return -1 -endi - -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $mt interval(1d) - -sql show tables -if $rows != 12 then - return -1 -endi - -print =============== step9 -print sleep 120 seconds -sleep 120000 -sql select * from $st -print select * from $st => $data01 -if $data01 != 200 then - return -1 -endi -if $data02 != 200 then - return -1 -endi -if $data03 != 200 then - return -1 -endi - -print =============== step10 -sql drop table $st -sql show tables -if $rows != 11 then - return -1 -endi - -print =============== step11 -sql select * from $st -x step10 - return -1 -step10: - -print =============== step12 -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $mt interval(1d) - -sql show tables -if $rows != 12 then - return -1 -endi - -print =============== step13 -print sleep 120 seconds -sleep 120000 -sql select * from $st -print select * from $st => $data01 -if $data01 != 200 then - return -1 -endi -if $data02 != 200 then - return -1 -endi -if $data03 != 200 then - return -1 -endi - diff --git a/tests/script/general/stream/stream_2.sim b/tests/script/general/stream/stream_2.sim deleted file mode 100644 index 057529b427..0000000000 --- a/tests/script/general/stream/stream_2.sim +++ /dev/null @@ -1,194 +0,0 @@ -system sh/stop_dnodes.sh - -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 0 -system sh/exec.sh -n dnode1 -s start - -sleep 3000 -sql connect - -print ======================== dnode1 start - -$dbPrefix = s2_db -$tbPrefix = s2_tb -$mtPrefix = s2_mt -$stPrefix = s2_st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql drop databae $db -x step1 -step1: -sql create database $db -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 -$i = 0 -$tb = $tbPrefix . $i -$st = $stPrefix . $i - -sql select count(tbcol) from $tb interval(1d) -print select count(tbcol) from $tb interval(1d) ===> $data00 $data01 $data02, $data03 -if $data01 != $rowNum then - return -1 -endi - -sql show tables -if $rows != 10 then - return -1 -endi - -sql create table $st as select count(tbcol) from $tb interval(1d) - -sql show tables -if $rows != 11 then - return -1 -endi - -print =============== step3 -print sleep 120 seconds -sleep 120000 -sql select * from $st -print select * from $st => $data01 -if $data01 != 20 then - return -1 -endi - -print =============== step4 -sql drop table $st -sql show tables -if $rows != 10 then - return -1 -endi - -print =============== step5 -sql select * from $st -x step4 - return -1 -step4: - -print =============== step6 -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) -sql show tables -if $rows != 11 then - return -1 -endi - -print =============== step7 -print sleep 120 seconds -sleep 120000 -sql select * from $st -print select * from $st => $data01 -if $data01 != 20 then - return -1 -endi -if $data02 != 20 then - return -1 -endi -if $data03 != 20 then - return -1 -endi - -print =============== step8 -$i = 1 -$st = $stPrefix . $i - -sql select count(*), count(tbcol), count(tbcol2) from $mt interval(1d) -print select count(*), count(tbcol), count(tbcol2) from $mt interval(1d) ===> $data00 $data01 $data02, $data03 -if $data01 != 200 then - return -1 -endi -if $data02 != 200 then - return -1 -endi -if $data03 != 200 then - return -1 -endi - -sql show tables -if $rows != 11 then - return -1 -endi - -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $mt interval(1d) - -sql show tables -if $rows != 12 then - return -1 -endi - -print =============== step9 -print sleep 120 seconds -sleep 120000 -sql select * from $st -print select * from $st => $data01 $data02, $data03 -if $data01 != 200 then - return -1 -endi -if $data02 != 200 then - return -1 -endi -if $data03 != 200 then - return -1 -endi - -print =============== step10 -sql drop table $st -sql show tables -if $rows != 11 then - return -1 -endi - -print =============== step11 -sql select * from $st -x step10 - return -1 -step10: - -print =============== step12 -sql create table $st as select count(tbcol) from $mt interval(1d) - -sql show tables -if $rows != 12 then - return -1 -endi - -print =============== step13 -print sleep 120 seconds -sleep 120000 -sql select * from $st -print select * from $st => $data01 $data02, $data03 -if $data01 != 200 then - return -1 -endi -if $data02 != null then - return -1 -endi -if $data03 != null then - return -1 -endi - diff --git a/tests/script/general/stream/testSuite.sim b/tests/script/general/stream/testSuite.sim index 046348b123..44f886d02b 100644 --- a/tests/script/general/stream/testSuite.sim +++ b/tests/script/general/stream/testSuite.sim @@ -1,9 +1,6 @@ -run general/stream/stream_1.sim -run general/stream/stream_2.sim run general/stream/stream_3.sim run general/stream/stream_restart.sim run general/stream/table_1.sim -run general/stream/metrics_1.sim run general/stream/table_n.sim run general/stream/metrics_n.sim run general/stream/table_del.sim diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index 3c4733a25b..bead4bd095 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -1,10 +1,10 @@ cd ../../../debug; cmake .. cd ../../../debug; make -#./test.sh -f general/alter/cached_schema_after_alter.sim +./test.sh -f general/alter/cached_schema_after_alter.sim ./test.sh -f general/alter/count.sim ./test.sh -f general/alter/import.sim -#./test.sh -f general/alter/insert1.sim +./test.sh -f general/alter/insert1.sim ./test.sh -f general/alter/insert2.sim ./test.sh -f general/alter/metrics.sim ./test.sh -f general/alter/table.sim @@ -117,7 +117,6 @@ cd ../../../debug; make ./test.sh -f general/parser/import_commit3.sim ./test.sh -f general/parser/insert_tb.sim ./test.sh -f general/parser/first_last.sim -#./test.sh -f general/parser/import_file.sim ./test.sh -f general/parser/lastrow.sim ./test.sh -f general/parser/nchar.sim ./test.sh -f general/parser/null_char.sim @@ -135,6 +134,7 @@ cd ../../../debug; make ./test.sh -f general/parser/limit2.sim ./test.sh -f general/parser/fill.sim ./test.sh -f general/parser/fill_stb.sim +#./test.sh -f general/parser/fill_us.sim ./test.sh -f general/parser/where.sim ./test.sh -f general/parser/slimit.sim ./test.sh -f general/parser/select_with_tags.sim @@ -142,15 +142,14 @@ cd ../../../debug; make ./test.sh -f general/parser/tags_dynamically_specifiy.sim ./test.sh -f general/parser/groupby.sim ./test.sh -f general/parser/set_tag_vals.sim -#./test.sh -f general/parser/slimit_alter_tags.sim +#./test.sh -f general/parser/sliding.sim +./test.sh -f general/parser/tags_filter.sim +./test.sh -f general/parser/slimit_alter_tags.sim ./test.sh -f general/parser/join.sim ./test.sh -f general/parser/join_multivnode.sim ./test.sh -f general/parser/binary_escapeCharacter.sim ./test.sh -f general/parser/bug.sim -#./test.sh -f general/parser/stream_on_sys.sim -./test.sh -f general/parser/stream.sim ./test.sh -f general/parser/repeatAlter.sim -#./test.sh -f general/parser/repeatStream.sim ./test.sh -f general/stable/disk.sim ./test.sh -f general/stable/dnode3.sim @@ -201,7 +200,7 @@ cd ../../../debug; make ./test.sh -f general/tag/bool.sim ./test.sh -f general/tag/change.sim ./test.sh -f general/tag/column.sim -#./test.sh -f general/tag/commit.sim +./test.sh -f general/tag/commit.sim ./test.sh -f general/tag/create.sim ./test.sh -f general/tag/delete.sim ./test.sh -f general/tag/double.sim @@ -309,14 +308,14 @@ cd ../../../debug; make ./test.sh -f unique/vnode/replica3_repeat.sim ./test.sh -f unique/vnode/replica3_vgroup.sim -./test.sh -f general/stream/metrics_1.sim +# stream still has bugs +#./test.sh -f general/parser/stream_on_sys.sim +#./test.sh -f general/parser/repeatStream.sim + ./test.sh -f general/stream/metrics_del.sim ./test.sh -f general/stream/metrics_n.sim ./test.sh -f general/stream/metrics_replica1_vnoden.sim -#./test.sh -f general/stream/new_stream.sim ./test.sh -f general/stream/restart_stream.sim -./test.sh -f general/stream/stream_1.sim -./test.sh -f general/stream/stream_2.sim ./test.sh -f general/stream/stream_3.sim ./test.sh -f general/stream/stream_restart.sim ./test.sh -f general/stream/table_1.sim @@ -334,6 +333,7 @@ cd ../../../debug; make ./test.sh -f unique/arbitrator/dn3_mn1_replica_change_dropDnod.sim ./test.sh -f unique/arbitrator/dn3_mn1_replica_change.sim ./test.sh -f unique/arbitrator/dn3_mn1_stopDnode_timeout.sim +# lower the priority while file corruption #./test.sh -f unique/arbitrator/dn3_mn1_vnode_change.sim #./test.sh -f unique/arbitrator/dn3_mn1_vnode_corruptFile_offline.sim #./test.sh -f unique/arbitrator/dn3_mn1_vnode_corruptFile_online.sim diff --git a/tests/script/jenkins/simple.txt b/tests/script/jenkins/simple.txt index 135326af9e..2fe364bf26 100644 --- a/tests/script/jenkins/simple.txt +++ b/tests/script/jenkins/simple.txt @@ -148,7 +148,6 @@ cd ../../../debug; make ./test.sh -f general/parser/binary_escapeCharacter.sim ./test.sh -f general/parser/bug.sim #./test.sh -f general/parser/stream_on_sys.sim -./test.sh -f general/parser/stream.sim ./test.sh -f general/parser/repeatAlter.sim #./test.sh -f general/parser/repeatStream.sim diff --git a/tests/script/sh/deploy.sh b/tests/script/sh/deploy.sh index eb0a9b526d..37be89f8d6 100755 --- a/tests/script/sh/deploy.sh +++ b/tests/script/sh/deploy.sh @@ -110,6 +110,7 @@ echo "second ${HOSTNAME}:7200" >> $TAOS_CFG echo "serverPort ${NODE}" >> $TAOS_CFG echo "dataDir $DATA_DIR" >> $TAOS_CFG echo "logDir $LOG_DIR" >> $TAOS_CFG +echo "debugFlag 135" >> $TAOS_CFG echo "mDebugFlag 135" >> $TAOS_CFG echo "sdbDebugFlag 135" >> $TAOS_CFG echo "dDebugFlag 135" >> $TAOS_CFG @@ -124,7 +125,6 @@ echo "mqttDebugFlag 131" >> $TAOS_CFG echo "qdebugFlag 135" >> $TAOS_CFG echo "rpcDebugFlag 135" >> $TAOS_CFG echo "tmrDebugFlag 131" >> $TAOS_CFG -echo "cDebugFlag 135" >> $TAOS_CFG echo "udebugFlag 135" >> $TAOS_CFG echo "sdebugFlag 135" >> $TAOS_CFG echo "wdebugFlag 135" >> $TAOS_CFG diff --git a/tests/script/sh/exec-random-fail.sh b/tests/script/sh/exec-random-fail.sh new file mode 100755 index 0000000000..7ba301617c --- /dev/null +++ b/tests/script/sh/exec-random-fail.sh @@ -0,0 +1,113 @@ +#!/bin/bash + +# if [ $# != 4 || $# != 5 ]; then + # echo "argument list need input : " + # echo " -n nodeName" + # echo " -s start/stop" + # echo " -c clear" + # exit 1 +# fi + +NODE_NAME= +EXEC_OPTON= +CLEAR_OPTION="false" +while getopts "n:s:u:x:ct" arg +do + case $arg in + n) + NODE_NAME=$OPTARG + ;; + s) + EXEC_OPTON=$OPTARG + ;; + c) + CLEAR_OPTION="clear" + ;; + t) + SHELL_OPTION="true" + ;; + u) + USERS=$OPTARG + ;; + x) + SIGNAL=$OPTARG + ;; + ?) + echo "unkown argument" + ;; + esac +done + +SCRIPT_DIR=`dirname $0` +cd $SCRIPT_DIR/../ +SCRIPT_DIR=`pwd` + +IN_TDINTERNAL="community" +if [[ "$SCRIPT_DIR" == *"$IN_TDINTERNAL"* ]]; then + cd ../../.. +else + cd ../../ +fi + +TAOS_DIR=`pwd` +TAOSD_DIR=`find . -name "taosd"|grep bin|head -n1` + +if [[ "$TAOSD_DIR" == *"$IN_TDINTERNAL"* ]]; then + BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' --fields=2,3` +else + BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' --fields=2` +fi + +BUILD_DIR=$TAOS_DIR/$BIN_DIR/build + +SIM_DIR=$TAOS_DIR/sim +NODE_DIR=$SIM_DIR/$NODE_NAME +EXE_DIR=$BUILD_DIR/bin +CFG_DIR=$NODE_DIR/cfg +LOG_DIR=$NODE_DIR/log +DATA_DIR=$NODE_DIR/data +MGMT_DIR=$NODE_DIR/data/mgmt +TSDB_DIR=$NODE_DIR/data/tsdb + +TAOS_CFG=$NODE_DIR/cfg/taos.cfg + +echo ------------ $EXEC_OPTON $NODE_NAME + +TAOS_FLAG=$SIM_DIR/tsim/flag +if [ -f "$TAOS_FLAG" ]; then + EXE_DIR=/usr/local/bin/taos +fi + +if [ "$CLEAR_OPTION" = "clear" ]; then + echo rm -rf $MGMT_DIR $TSDB_DIR + rm -rf $TSDB_DIR + rm -rf $MGMT_DIR +fi + +if [ "$EXEC_OPTON" = "start" ]; then + echo "ExcuteCmd:" $EXE_DIR/taosd -c $CFG_DIR + + if [ "$SHELL_OPTION" = "true" ]; then + nohup valgrind --log-file=${LOG_DIR}/valgrind.log --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes $EXE_DIR/taosd -c $CFG_DIR > /dev/null 2>&1 & + else + nohup $EXE_DIR/taosd -c $CFG_DIR --random-file-fail-factor 5 > /dev/null 2>&1 & + fi + +else + #relative path + RCFG_DIR=sim/$NODE_NAME/cfg + PID=`ps -ef|grep taosd | grep $RCFG_DIR | grep -v grep | awk '{print $2}'` + while [ -n "$PID" ] + do + if [ "$SIGNAL" = "SIGINT" ]; then + echo try to kill by signal SIGINT + kill -SIGINT $PID + else + echo try to kill by signal SIGKILL + kill -9 $PID + fi + sleep 1 + PID=`ps -ef|grep taosd | grep $RCFG_DIR | grep -v grep | awk '{print $2}'` + done +fi + diff --git a/tests/script/tmp/mnodes.sim b/tests/script/tmp/mnodes.sim index 67f3648f64..afc068b3f1 100644 --- a/tests/script/tmp/mnodes.sim +++ b/tests/script/tmp/mnodes.sim @@ -1,7 +1,120 @@ system sh/stop_dnodes.sh + system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode2 -i 2 system sh/deploy.sh -n dnode3 -i 3 + +system sh/cfg.sh -n dnode1 -c walLevel -v 2 +system sh/cfg.sh -n dnode2 -c walLevel -v 2 +system sh/cfg.sh -n dnode3 -c walLevel -v 2 + system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3 system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3 system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3 + +system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 4 +system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 4 +system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 4 + +system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 20 +system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 20 +system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 20 + +system sh/cfg.sh -n dnode1 -c http -v 1 +system sh/cfg.sh -n dnode2 -c http -v 1 +system sh/cfg.sh -n dnode3 -c http -v 1 + +system sh/cfg.sh -n dnode1 -c mDebugFlag -v 143 +system sh/cfg.sh -n dnode2 -c mDebugFlag -v 143 +system sh/cfg.sh -n dnode3 -c mDebugFlag -v 143 + +system sh/cfg.sh -n dnode1 -c sdbDebugFlag -v 143 +system sh/cfg.sh -n dnode2 -c sdbDebugFlag -v 143 +system sh/cfg.sh -n dnode3 -c sdbDebugFlag -v 143 + +system sh/cfg.sh -n dnode1 -c sdebugFlag -v 143 +system sh/cfg.sh -n dnode2 -c sdebugFlag -v 143 +system sh/cfg.sh -n dnode3 -c sdebugFlag -v 143 + +system sh/cfg.sh -n dnode1 -c rpcDebugFlag -v 135 +system sh/cfg.sh -n dnode2 -c rpcDebugFlag -v 135 +system sh/cfg.sh -n dnode3 -c rpcDebugFlag -v 135 + +system sh/cfg.sh -n dnode1 -c tsdbDebugFlag -v 131 +system sh/cfg.sh -n dnode2 -c tsdbDebugFlag -v 131 +system sh/cfg.sh -n dnode3 -c tsdbDebugFlag -v 131 + +system sh/cfg.sh -n dnode1 -c mqttDebugFlag -v 131 +system sh/cfg.sh -n dnode2 -c mqttDebugFlag -v 131 +system sh/cfg.sh -n dnode3 -c mqttDebugFlag -v 131 + +system sh/cfg.sh -n dnode1 -c qdebugFlag -v 131 +system sh/cfg.sh -n dnode2 -c qdebugFlag -v 131 +system sh/cfg.sh -n dnode3 -c qdebugFlag -v 131 + +system sh/cfg.sh -n dnode1 -c cDebugFlag -v 131 +system sh/cfg.sh -n dnode2 -c cDebugFlag -v 131 +system sh/cfg.sh -n dnode3 -c cDebugFlag -v 131 + +system sh/cfg.sh -n dnode1 -c udebugFlag -v 131 +system sh/cfg.sh -n dnode2 -c udebugFlag -v 131 +system sh/cfg.sh -n dnode3 -c udebugFlag -v 131 + +system sh/cfg.sh -n dnode1 -c wdebugFlag -v 131 +system sh/cfg.sh -n dnode2 -c wdebugFlag -v 131 +system sh/cfg.sh -n dnode3 -c wdebugFlag -v 131 + +print ============== deploy + +system sh/exec.sh -n dnode1 -s start +sleep 2001 +sql connect + +sql create dnode $hostname2 +sql create dnode $hostname3 +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start + +print =============== step1 +$x = 0 +show1: + $x = $x + 1 + sleep 2000 + if $x == 5 then + return -1 + endi +sql show mnodes -x show1 +$mnode1Role = $data2_1 +print mnode1Role $mnode1Role +$mnode2Role = $data2_2 +print mnode2Role $mnode2Role +$mnode3Role = $data2_3 +print mnode3Role $mnode3Role + +if $mnode1Role != master then + goto show1 +endi +if $mnode2Role != slave then + goto show1 +endi +if $mnode3Role != slave then + goto show1 +endi + +$x = 1 +show2: + +print =============== step $x +sql show mnodes +print $data0_1 $data2_1 +print $data0_2 $data2_2 +print $data0_3 $data2_3 + + +$x = $x + 1 +sleep 2000 +if $x == 1000 then + return -1 +endi + +goto show2 diff --git a/tests/script/unique/cluster/client1_0.sim b/tests/script/unique/cluster/client1_0.sim new file mode 100644 index 0000000000..184f0263e3 --- /dev/null +++ b/tests/script/unique/cluster/client1_0.sim @@ -0,0 +1,75 @@ +#system sh/stop_dnodes.sh +#system sh/deploy.sh -n dnode1 -i 1 +#system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 10000 +#system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 256 +#system sh/exec.sh -n dnode1 -s start +#sql connect +#$db = db1 +#sql create database $db +#sql use $db +#$stb = stb1 +#sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(8)) + + + + + +$tblStart = 0 +$tblEnd = 1000 +$tsStart = 1325347200000 # 2012-01-01 00:00:00.000 +############################################################### + +sql connect + +$db = db1 +$stb = stb1 + +sql use $db + +######sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(8)) +$tagPrex = ' . tag + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $tagBinary = $tagPrex . $i + $tagBinary = $tagBinary . ' +# print create table if not exists $tb using $stb tags ( $i , $tagBinary ) + sql create table if not exists $tb using $stb tags ( $i , $tagBinary ) + $i = $i + 1 +endw + +print ====================== client1_0 create table end, start insert data ............ +sql select count(tbname) from $stb +print select count(tbname) from $stb +print data00 $data00 + +$rowsPerLoop = 100 +$ts = $tsStart + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $x = 0 + while $x < $rowsPerLoop + sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) + $x = $x + 20 + $ts = $ts + 40a + endw + + $totalRows = $totalRows + $x + $i = $i + 1 + + if $i == $tblEnd then + $i = $tblStart + + sql select count(*) from $stb -x continue_loop + print data00 $data00 totalRows $totalRows + if $data00 < $totalRows then + print ********************** select error ********************** + endi + continue_loop: + print ====================== client1_0 insert data complete once ............ + endi +endw +print ====================== client1_0 success and auto end ===================== \ No newline at end of file diff --git a/tests/script/unique/cluster/client1_1.sim b/tests/script/unique/cluster/client1_1.sim new file mode 100644 index 0000000000..dd1e68ab4f --- /dev/null +++ b/tests/script/unique/cluster/client1_1.sim @@ -0,0 +1,52 @@ +$tblStart = 10000 +$tblEnd = 20000 +$tsStart = 1325347200000 # 2012-01-01 00:00:00.000 +############################################################### + +sql connect + +$db = db1 +$stb = stb1 + +sql use $db + + +######sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(8)) +$tagPrex = ' . tag + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $tagBinary = $tagPrex . $i + $tagBinary = $tagBinary . ' + sql create table if not exists $tb using $stb tags ($i, $tagBinary) + $i = $i + 1 +endw + +$rowsPerLoop = 100 +$ts = $tsStart + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $x = 0 + while $x < $rowsPerLoop + sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) + $x = $x + 20 + $ts = $ts + 40a + endw + + $totalRows = $totalRows + $x + $i = $i + 1 + + if $i == $tblEnd then + $i = $tblStart + + sql select count(*) from $stb -x continue_loop + print data00 $data00 totalRows $totalRows + if $data00 < $totalRows then + print ********************** select error ********************** + endi + continue_loop: + endi +endw diff --git a/tests/script/unique/cluster/client1_2.sim b/tests/script/unique/cluster/client1_2.sim new file mode 100644 index 0000000000..4f87810e42 --- /dev/null +++ b/tests/script/unique/cluster/client1_2.sim @@ -0,0 +1,52 @@ +$tblStart = 20000 +$tblEnd = 30000 +$tsStart = 1325347200000 # 2012-01-01 00:00:00.000 +############################################################### + +sql connect + +$db = db1 +$stb = stb1 + +sql use $db + + +######sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(8)) +$tagPrex = ' . tag + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $tagBinary = $tagPrex . $i + $tagBinary = $tagBinary . ' + sql create table if not exists $tb using $stb tags ($i, $tagBinary) + $i = $i + 1 +endw + +$rowsPerLoop = 100 +$ts = $tsStart + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $x = 0 + while $x < $rowsPerLoop + sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) + $x = $x + 20 + $ts = $ts + 40a + endw + + $totalRows = $totalRows + $x + $i = $i + 1 + + if $i == $tblEnd then + $i = $tblStart + + sql select count(*) from $stb -x continue_loop + print data00 $data00 totalRows $totalRows + if $data00 < $totalRows then + print ********************** select error ********************** + endi + continue_loop: + endi +endw diff --git a/tests/script/unique/cluster/client1_3.sim b/tests/script/unique/cluster/client1_3.sim new file mode 100644 index 0000000000..04df0dfffc --- /dev/null +++ b/tests/script/unique/cluster/client1_3.sim @@ -0,0 +1,52 @@ +$tblStart = 30000 +$tblEnd = 40000 +$tsStart = 1325347200000 # 2012-01-01 00:00:00.000 +############################################################### + +sql connect + +$db = db1 +$stb = stb1 + +sql use $db + + +######sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(8)) +$tagPrex = ' . tag + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $tagBinary = $tagPrex . $i + $tagBinary = $tagBinary . ' + sql create table if not exists $tb using $stb tags ($i, $tagBinary) + $i = $i + 1 +endw + +$rowsPerLoop = 100 +$ts = $tsStart + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $x = 0 + while $x < $rowsPerLoop + sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) + $x = $x + 20 + $ts = $ts + 40a + endw + + $totalRows = $totalRows + $x + $i = $i + 1 + + if $i == $tblEnd then + $i = $tblStart + + sql select count(*) from $stb -x continue_loop + print data00 $data00 totalRows $totalRows + if $data00 < $totalRows then + print ********************** select error ********************** + endi + continue_loop: + endi +endw diff --git a/tests/script/unique/cluster/client2_0.sim b/tests/script/unique/cluster/client2_0.sim new file mode 100644 index 0000000000..75e4dccf7d --- /dev/null +++ b/tests/script/unique/cluster/client2_0.sim @@ -0,0 +1,52 @@ +$tblStart = 0 +$tblEnd = 10000 +$tsStart = 1325347200001 # 2012-01-01 00:00:00.001 +############################################################### + +sql connect + +$db = db1 +$stb = stb1 + +sql use $db + + +######sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(8)) +$tagPrex = ' . tag + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $tagBinary = $tagPrex . $i + $tagBinary = $tagBinary . ' + sql create table if not exists $tb using $stb tags ($i, $tagBinary) + $i = $i + 1 +endw + +$rowsPerLoop = 100 +$ts = $tsStart + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $x = 0 + while $x < $rowsPerLoop + sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) + $x = $x + 20 + $ts = $ts + 40a + endw + + $totalRows = $totalRows + $x + $i = $i + 1 + + if $i == $tblEnd then + $i = $tblStart + + sql select count(*) from $stb -x continue_loop + print data00 $data00 totalRows $totalRows + if $data00 < $totalRows then + print ********************** select error ********************** + endi + continue_loop: + endi +endw diff --git a/tests/script/unique/cluster/client2_1.sim b/tests/script/unique/cluster/client2_1.sim new file mode 100644 index 0000000000..4c0d755c04 --- /dev/null +++ b/tests/script/unique/cluster/client2_1.sim @@ -0,0 +1,52 @@ +$tblStart = 10000 +$tblEnd = 20000 +$tsStart = 1325347200001 # 2012-01-01 00:00:00.000 +############################################################### + +sql connect + +$db = db1 +$stb = stb1 + +sql use $db + + +######sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(8)) +$tagPrex = ' . tag + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $tagBinary = $tagPrex . $i + $tagBinary = $tagBinary . ' + sql create table if not exists $tb using $stb tags ($i, $tagBinary) + $i = $i + 1 +endw + +$rowsPerLoop = 100 +$ts = $tsStart + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $x = 0 + while $x < $rowsPerLoop + sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) + $x = $x + 20 + $ts = $ts + 40a + endw + + $totalRows = $totalRows + $x + $i = $i + 1 + + if $i == $tblEnd then + $i = $tblStart + + sql select count(*) from $stb -x continue_loop + print data00 $data00 totalRows $totalRows + if $data00 < $totalRows then + print ********************** select error ********************** + endi + continue_loop: + endi +endw diff --git a/tests/script/unique/cluster/client2_2.sim b/tests/script/unique/cluster/client2_2.sim new file mode 100644 index 0000000000..2f08facf38 --- /dev/null +++ b/tests/script/unique/cluster/client2_2.sim @@ -0,0 +1,52 @@ +$tblStart = 20000 +$tblEnd = 30000 +$tsStart = 1325347200001 # 2012-01-01 00:00:00.000 +############################################################### + +sql connect + +$db = db1 +$stb = stb1 + +sql use $db + + +######sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(8)) +$tagPrex = ' . tag + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $tagBinary = $tagPrex . $i + $tagBinary = $tagBinary . ' + sql create table if not exists $tb using $stb tags ($i, $tagBinary) + $i = $i + 1 +endw + +$rowsPerLoop = 100 +$ts = $tsStart + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $x = 0 + while $x < $rowsPerLoop + sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) + $x = $x + 20 + $ts = $ts + 40a + endw + + $totalRows = $totalRows + $x + $i = $i + 1 + + if $i == $tblEnd then + $i = $tblStart + + sql select count(*) from $stb -x continue_loop + print data00 $data00 totalRows $totalRows + if $data00 < $totalRows then + print ********************** select error ********************** + endi + continue_loop: + endi +endw diff --git a/tests/script/unique/cluster/client2_3.sim b/tests/script/unique/cluster/client2_3.sim new file mode 100644 index 0000000000..b83e5b6eaf --- /dev/null +++ b/tests/script/unique/cluster/client2_3.sim @@ -0,0 +1,52 @@ +$tblStart = 30000 +$tblEnd = 40000 +$tsStart = 1325347200001 # 2012-01-01 00:00:00.000 +############################################################### + +sql connect + +$db = db1 +$stb = stb1 + +sql use $db + + +######sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(8)) +$tagPrex = ' . tag + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $tagBinary = $tagPrex . $i + $tagBinary = $tagBinary . ' + sql create table if not exists $tb using $stb tags ($i, $tagBinary) + $i = $i + 1 +endw + +$rowsPerLoop = 100 +$ts = $tsStart + +$i = $tblStart +while $i < $tblEnd + $tb = tb . $i + $x = 0 + while $x < $rowsPerLoop + sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 2a , $x ) ( $ts + 4a , $x ) ( $ts + 6a , $x ) ( $ts + 8a , $x ) ( $ts + 10a , $x ) ( $ts + 12a , $x ) ( $ts + 14a , $x ) ( $ts + 16a , $x ) ( $ts + 18a , $x ) ( $ts + 20a , $x ) ( $ts + 22a , $x ) ( $ts + 24a , $x ) ( $ts + 26a , $x ) ( $ts + 28a , $x ) ( $ts + 30a , $x ) ( $ts + 32a , $x ) ( $ts + 34a , $x ) ( $ts + 36a , $x ) ( $ts + 38a , $x ) + $x = $x + 20 + $ts = $ts + 40a + endw + + $totalRows = $totalRows + $x + $i = $i + 1 + + if $i == $tblEnd then + $i = $tblStart + + sql select count(*) from $stb -x continue_loop + print data00 $data00 totalRows $totalRows + if $data00 < $totalRows then + print ********************** select error ********************** + endi + continue_loop: + endi +endw diff --git a/tests/script/unique/cluster/cluster_main.sim b/tests/script/unique/cluster/cluster_main.sim new file mode 100644 index 0000000000..236f1aa59a --- /dev/null +++ b/tests/script/unique/cluster/cluster_main.sim @@ -0,0 +1,285 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/deploy.sh -n dnode2 -i 2 +system sh/deploy.sh -n dnode3 -i 3 +system sh/deploy.sh -n dnode4 -i 4 +system sh/deploy.sh -n dnode5 -i 5 + +system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3 +system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3 +system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3 +system sh/cfg.sh -n dnode4 -c numOfMnodes -v 3 +system sh/cfg.sh -n dnode5 -c numOfMnodes -v 3 + +system sh/cfg.sh -n dnode1 -c walLevel -v 1 +system sh/cfg.sh -n dnode2 -c walLevel -v 1 +system sh/cfg.sh -n dnode3 -c walLevel -v 1 +system sh/cfg.sh -n dnode4 -c walLevel -v 1 +system sh/cfg.sh -n dnode5 -c walLevel -v 1 + +system sh/cfg.sh -n dnode1 -c balanceInterval -v 10 +system sh/cfg.sh -n dnode2 -c balanceInterval -v 10 +system sh/cfg.sh -n dnode3 -c balanceInterval -v 10 +system sh/cfg.sh -n dnode4 -c balanceInterval -v 10 +system sh/cfg.sh -n dnode5 -c balanceInterval -v 10 + +system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 256 +system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 256 +system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 256 +system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 256 +system sh/cfg.sh -n dnode5 -c numOfTotalVnodes -v 256 + +system sh/cfg.sh -n dnode1 -c alternativeRole -v 0 +system sh/cfg.sh -n dnode2 -c alternativeRole -v 0 +system sh/cfg.sh -n dnode3 -c alternativeRole -v 0 +system sh/cfg.sh -n dnode4 -c alternativeRole -v 0 +system sh/cfg.sh -n dnode5 -c alternativeRole -v 0 + +system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 5000 +system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 5000 +system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 5000 +system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 5000 +system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 5000 + +system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator +system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator +system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator +system sh/cfg.sh -n dnode4 -c arbitrator -v $arbitrator +system sh/cfg.sh -n dnode5 -c arbitrator -v $arbitrator + +print ============== step0: start tarbitrator +system sh/exec_tarbitrator.sh -s start + +print ============== step1: start dnode1/dnode2/dnode3 +system sh/exec.sh -n dnode1 -s start +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start +sleep 3000 +sql connect +sql create dnode $hostname2 +sql create dnode $hostname3 +sleep 3000 + +print ============== step2: create db1 with replica 3 +$db = db1 +print create database $db replica 3 +#sql create database $db replica 3 maxTables $totalTableNum +sql create database $db replica 3 +sql use $db + +print ============== step3: create stable stb1 +$stb = stb1 +sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(8)) + +print ============== step4: start 10 client1/ 10 client2/ 10 client3/ 10 client4/ 1 client5 +run_back unique/cluster/client1_0.sim +#run_back unique/cluster/client1_1.sim +#run_back unique/cluster/client1_2.sim +#run_back unique/cluster/client1_3.sim +#run_back unique/cluster/client2_0.sim +#run_back unique/cluster/client2_1.sim +#run_back unique/cluster/client2_2.sim +#run_back unique/cluster/client2_3.sim +#run_back unique/cluster/client3.sim +#run_back unique/cluster/client4.sim + +sleep 20000 + +wait_subsim_insert_complete_create_tables: +sql select count(tbname) from $stb +print select count(tbname) from $stb +print data00 $data00 +if $data00 < 1000 then + sleep 3000 + goto wait_subsim_insert_complete_create_tables +endi + +wait_subsim_insert_data: +print select count(*) from $stb +sql select count(*) from $stb +print data00 $data00 +if $data00 < 1000 then + sleep 3000 + goto wait_subsim_insert_data +endi + +print wait for a while to let clients start insert data +sleep 5000 + +$loop_cnt = 0 +loop_cluster_do: +print **** **** **** START loop cluster do (loop_cnt: $loop_cnt )**** **** **** **** +print ============== step5: start dnode4/dnode5 and add into cluster, then wait ready +system sh/exec.sh -n dnode4 -s start +system sh/exec.sh -n dnode5 -s start +sql create dnode $hostname4 +sql create dnode $hostname5 + +sleep 5000 + + +print ============== step6: stop and drop dnode1, then remove data dir of dnode1 +system sh/exec.sh -n dnode1 -s stop -x SIGINT +sleep 5000 +sql drop dnode $hostname1 +sleep 5000 + +system rm -rf ../../../sim/dnode1/data +sleep 20000 + +sql show mnodes +print show mnodes +print rows: $rows +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 +print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 +print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5 +print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6 +print $data0_7 $data1_7 $data2_7 $data3_7 $data4_7 +print $data0_8 $data1_8 $data2_8 $data3_8 $data4_8 +print $data0_9 $data1_9 $data2_9 $data3_9 $data4_9 +return -1 + +print ============== step7: stop dnode2 +system sh/exec.sh -n dnode2 -s stop -x SIGINT +sleep 5000 + +sql show mnodes +print show mnodes +print rows: $rows +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 +print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 +print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5 +print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6 +print $data0_7 $data1_7 $data2_7 $data3_7 $data4_7 +print $data0_8 $data1_8 $data2_8 $data3_8 $data4_8 +print $data0_9 $data1_9 $data2_9 $data3_9 $data4_9 + +print ============== step8: restart dnode2, then wait sync end +system sh/exec.sh -n dnode2 -s start +sleep 20000 +sql show mnodes +print show mnodes +print rows: $rows +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 +print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 +print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5 +print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6 +print $data0_7 $data1_7 $data2_7 $data3_7 $data4_7 +print $data0_8 $data1_8 $data2_8 $data3_8 $data4_8 +print $data0_9 $data1_9 $data2_9 $data3_9 $data4_9 + +print ============== step9: stop dnode3, then wait sync end +system sh/exec.sh -n dnode3 -s stop -x SIGINT +sleep 20000 +sql show mnodes +print show mnodes +print rows: $rows +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 +print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 +print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5 +print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6 +print $data0_7 $data1_7 $data2_7 $data3_7 $data4_7 +print $data0_8 $data1_8 $data2_8 $data3_8 $data4_8 +print $data0_9 $data1_9 $data2_9 $data3_9 $data4_9 + +print ============== step10: restart dnode3, then wait sync end +system sh/exec.sh -n dnode3 -s start +sleep 20000 +sql show mnodes +print show mnodes +print rows: $rows +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 +print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 +print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5 +print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6 +print $data0_7 $data1_7 $data2_7 $data3_7 $data4_7 +print $data0_8 $data1_8 $data2_8 $data3_8 $data4_8 +print $data0_9 $data1_9 $data2_9 $data3_9 $data4_9 + +print ============== step11: stop dnode4, then wait sync end +system sh/exec.sh -n dnode4 -s stop -x SIGINT +sleep 20000 + +print ============== step12: restart dnode4, then wait sync end +system sh/exec.sh -n dnode4 -s start +sleep 20000 +sql show mnodes +print show mnodes +print rows: $rows +print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 +print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 +print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 +print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 +print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5 +print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6 +print $data0_7 $data1_7 $data2_7 $data3_7 $data4_7 +print $data0_8 $data1_8 $data2_8 $data3_8 $data4_8 +print $data0_9 $data1_9 $data2_9 $data3_9 $data4_9 + +print ============== step13: alter replica 2 +sql alter database $db replica 2 +sql show databases +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09 + +if $data04 != 2 then + print rplica is not modify to 2, error!!!!!! + return -1 +endi + +print ============== step14: stop and drop dnode4/dnode5, then remove data dir of dnode4/dnode5 +system sh/exec.sh -n dnode4 -s stop -x SIGINT +system sh/exec.sh -n dnode5 -s stop -x SIGINT +sleep 20000 +sql drop dnode $hostname4 +sql drop dnode $hostname5 +system rm -rf ../../../sim/dnode4/data +system rm -rf ../../../sim/dnode5/data + +print ============== step15: alter replica 1 +sql alter database $db replica 1 +sql show databases +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09 +if $data04 != 1 then + print rplica is not modify to 1, error!!!!!! + return -1 +endi + +print ============== step16: alter replica 2 +sql alter database $db replica 2 +sql show databases +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09 +if $data04 != 2 then + print rplica is not modify to 2, error!!!!!! + return -1 +endi + +print ============== step17: start dnode1 and add into cluster, then wait dnode1 ready +system sh/cfg.sh -n dnode1 -c first -v $hostname2 +system sh/cfg.sh -n dnode1 -c second -v $hostname3 + +system sh/exec.sh -n dnode1 -s start +sql create dnode $hostname1 +sleep 20000 + +print ============== step18: alter replica 3 +sql alter database $db replica 3 +sql show databases +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09 +if $data04 != 3 then + print rplica is not modify to 3, error!!!!!! + return -1 +endi + +print **** **** **** (loop_cnt: $loop_cnt ) end, continue...... **** **** **** **** +$loop_cnt = $loop_cnt + 1 +goto loop_cluster_do diff --git a/tests/test/c/createTablePerformance.c b/tests/test/c/createTablePerformance.c index 4ab6f98423..3edffd2a5e 100644 --- a/tests/test/c/createTablePerformance.c +++ b/tests/test/c/createTablePerformance.c @@ -31,6 +31,7 @@ char stableName[64] = "st"; int32_t numOfThreads = 30; int32_t numOfTables = 100000; int32_t maxTables = 5000; +int32_t replica = 1; int32_t numOfColumns = 2; typedef struct { @@ -96,7 +97,7 @@ void createDbAndSTable() { exit(1); } - sprintf(qstr, "create database if not exists %s maxtables %d", dbName, maxTables); + sprintf(qstr, "create database if not exists %s maxtables %d replica %d", dbName, maxTables, replica); TAOS_RES *pSql = taos_query(con, qstr); int32_t code = taos_errno(pSql); if (code != 0) { @@ -189,6 +190,8 @@ void printHelp() { printf("%s%s%s%d\n", indent, indent, "numOfThreads, default is ", numOfThreads); printf("%s%s\n", indent, "-n"); printf("%s%s%s%d\n", indent, indent, "numOfTables, default is ", numOfTables); + printf("%s%s\n", indent, "-r"); + printf("%s%s%s%d\n", indent, indent, "replica, default is ", replica); printf("%s%s\n", indent, "-columns"); printf("%s%s%s%d\n", indent, indent, "numOfColumns, default is ", numOfColumns); printf("%s%s\n", indent, "-tables"); @@ -212,6 +215,8 @@ void shellParseArgument(int argc, char *argv[]) { numOfThreads = atoi(argv[++i]); } else if (strcmp(argv[i], "-n") == 0) { numOfTables = atoi(argv[++i]); + } else if (strcmp(argv[i], "-r") == 0) { + replica = atoi(argv[++i]); } else if (strcmp(argv[i], "-tables") == 0) { maxTables = atoi(argv[++i]); } else if (strcmp(argv[i], "-columns") == 0) { @@ -226,6 +231,7 @@ void shellParseArgument(int argc, char *argv[]) { pPrint("%s numOfTables:%d %s", GREEN, numOfTables, NC); pPrint("%s numOfThreads:%d %s", GREEN, numOfThreads, NC); pPrint("%s numOfColumns:%d %s", GREEN, numOfColumns, NC); + pPrint("%s replica:%d %s", GREEN, replica, NC); pPrint("%s dbPara maxTables:%d %s", GREEN, maxTables, NC); pPrint("%s start create table performace test %s", GREEN, NC); diff --git a/tests/tsim/inc/sim.h b/tests/tsim/inc/sim.h index ae8848e1ac..ecd481f869 100644 --- a/tests/tsim/inc/sim.h +++ b/tests/tsim/inc/sim.h @@ -51,24 +51,12 @@ #define FAILED_POSTFIX "" #endif -#define simError(...) \ - if (simDebugFlag & DEBUG_ERROR) { \ - taosPrintLog("ERROR SIM ", 255, __VA_ARGS__); \ - } -#define simWarn(...) \ - if (simDebugFlag & DEBUG_WARN) { \ - taosPrintLog("WARN SIM ", simDebugFlag, __VA_ARGS__); \ - } -#define simTrace(...) \ - if (simDebugFlag & DEBUG_TRACE) { \ - taosPrintLog("SIM ", simDebugFlag, __VA_ARGS__); \ - } -#define simDump(x, y) \ - if (simDebugFlag & DEBUG_DUMP) { \ - taosDumpData(x, y); \ - } -#define simPrint(...) \ - { taosPrintLog("SIM ", 255, __VA_ARGS__); } +#define simFatal(...) { if (simDebugFlag & DEBUG_FATAL) { taosPrintLog("SIM FATAL ", 255, __VA_ARGS__); }} +#define simError(...) { if (simDebugFlag & DEBUG_ERROR) { taosPrintLog("SIM ERROR ", 255, __VA_ARGS__); }} +#define simWarn(...) { if (simDebugFlag & DEBUG_WARN) { taosPrintLog("SIM WARN ", 255, __VA_ARGS__); }} +#define simInfo(...) { if (simDebugFlag & DEBUG_INFO) { taosPrintLog("SIM INFO ", 255, __VA_ARGS__); }} +#define simDebug(...) { if (simDebugFlag & DEBUG_DEBUG) { taosPrintLog("SIM DEBUG ", simDebugFlag, __VA_ARGS__); }} +#define simTrace(...) { if (simDebugFlag & DEBUG_TRACE) { taosPrintLog("SIM TRACE ", simDebugFlag, __VA_ARGS__); }} enum { SIM_SCRIPT_TYPE_MAIN, SIM_SCRIPT_TYPE_BACKGROUND }; diff --git a/tests/tsim/src/simExe.c b/tests/tsim/src/simExe.c index 048ee04866..b077547709 100644 --- a/tests/tsim/src/simExe.c +++ b/tests/tsim/src/simExe.c @@ -75,7 +75,7 @@ char *simGetVariable(SScript *script, char *varName, int varLen) { for (int i = 0; i < MAX_QUERY_ROW_NUM; ++i) { if (strncmp(keyName, script->data[i][0], keyLen) == 0) { - simTrace("script:%s, keyName:%s, keyValue:%s", script->fileName, script->data[i][0], script->data[i][col]); + simDebug("script:%s, keyName:%s, keyValue:%s", script->fileName, script->data[i][0], script->data[i][col]); return script->data[i][col]; } } @@ -90,7 +90,7 @@ char *simGetVariable(SScript *script, char *varName, int varLen) { return "null"; } - simTrace("script:%s, data[%d][%d]=%s", script->fileName, row, col, script->data[row][col]); + simDebug("script:%s, data[%d][%d]=%s", script->fileName, row, col, script->data[row][col]); return script->data[row][col]; } } @@ -102,7 +102,7 @@ char *simGetVariable(SScript *script, char *varName, int varLen) { } if (strncmp(varName, var->varName, varLen) == 0) { // if (strlen(var->varValue) != 0) - // simTrace("script:%s, var:%s, value:%s", script->fileName, + // simDebug("script:%s, var:%s, value:%s", script->fileName, // var->varName, var->varValue); return var->varValue; } @@ -240,7 +240,7 @@ bool simExecuteRunCmd(SScript *script, char *option) { return false; } - simPrint("script:%s, start to execute", newScript->fileName); + simInfo("script:%s, start to execute", newScript->fileName); newScript->type = SIM_SCRIPT_TYPE_MAIN; simScriptPos++; @@ -262,7 +262,7 @@ bool simExecuteRunBackCmd(SScript *script, char *option) { sprintf(script->error, "lineNum:%d. parse file:%s error", script->lines[script->linePos].lineNum, fileName); return false; } - simPrint("script:%s, start to execute in background", newScript->fileName); + simInfo("script:%s, start to execute in background", newScript->fileName); newScript->type = SIM_SCRIPT_TYPE_BACKGROUND; script->bgScripts[script->bgScriptLen++] = newScript; @@ -336,7 +336,7 @@ bool simExecutePrintCmd(SScript *script, char *rest) { simVisuallizeOption(script, rest, buf); rest = buf; - simPrint("script:%s, %s", script->fileName, rest); + simInfo("script:%s, %s", script->fileName, rest); script->linePos++; return true; } @@ -351,9 +351,9 @@ bool simExecuteSleepCmd(SScript *script, char *option) { delta = atoi(option); if (delta <= 0) delta = 5; - simPrint("script:%s, sleep %dms begin", script->fileName, delta); + simInfo("script:%s, sleep %dms begin", script->fileName, delta); taosMsleep(delta); - simPrint("script:%s, sleep %dms finished", script->fileName, delta); + simInfo("script:%s, sleep %dms finished", script->fileName, delta); script->linePos++; return true; @@ -372,7 +372,7 @@ bool simExecuteReturnCmd(SScript *script, char *option) { sprintf(script->error, "lineNum:%d. error return %s", script->lines[script->linePos].lineNum, option); return false; } else { - simPrint("script:%s, return cmd execute with:%d", script->fileName, ret); + simInfo("script:%s, return cmd execute with:%d", script->fileName, ret); script->linePos = script->numOfLines; } @@ -418,7 +418,7 @@ void simCloseRestFulConnect(SScript *script) { void simCloseNativeConnect(SScript *script) { if (script->taos == NULL) return; - simTrace("script:%s, taos:%p closed", script->fileName, script->taos); + simDebug("script:%s, taos:%p closed", script->fileName, script->taos); taos_close(script->taos); taosMsleep(1200); @@ -468,7 +468,7 @@ int simParseHttpCommandResult(SScript *script, char *command) { cJSON_Delete(root); return retcode; } else { - simTrace("script:%s, json:status:%s not equal to succ, but code is %d, response:%s", script->fileName, + simDebug("script:%s, json:status:%s not equal to succ, but code is %d, response:%s", script->fileName, status->valuestring, retcode, command); cJSON_Delete(root); return 0; @@ -568,10 +568,10 @@ bool simCreateRestFulConnect(SScript *script, char *user, char *pass) { for (int attempt = 0; attempt < 10; ++attempt) { success = simExecuteRestFulCommand(script, command) == 0; if (!success) { - simTrace("script:%s, user:%s connect taosd failed:%s, attempt:%d", script->fileName, user, taos_errstr(NULL), attempt); + simDebug("script:%s, user:%s connect taosd failed:%s, attempt:%d", script->fileName, user, taos_errstr(NULL), attempt); taosMsleep(1000); } else { - simTrace("script:%s, user:%s connect taosd successed, attempt:%d", script->fileName, user, attempt); + simDebug("script:%s, user:%s connect taosd successed, attempt:%d", script->fileName, user, attempt); break; } } @@ -581,7 +581,7 @@ bool simCreateRestFulConnect(SScript *script, char *user, char *pass) { return false; } - simTrace("script:%s, connect taosd successed, auth:%p", script->fileName, script->auth); + simDebug("script:%s, connect taosd successed, auth:%p", script->fileName, script->auth); return true; } @@ -592,10 +592,10 @@ bool simCreateNativeConnect(SScript *script, char *user, char *pass) { for (int attempt = 0; attempt < 10; ++attempt) { taos = taos_connect(NULL, user, pass, NULL, tsDnodeShellPort); if (taos == NULL) { - simTrace("script:%s, user:%s connect taosd failed:%s, attempt:%d", script->fileName, user, taos_errstr(NULL), attempt); + simDebug("script:%s, user:%s connect taosd failed:%s, attempt:%d", script->fileName, user, taos_errstr(NULL), attempt); taosMsleep(1000); } else { - simTrace("script:%s, user:%s connect taosd successed, attempt:%d", script->fileName, user, attempt); + simDebug("script:%s, user:%s connect taosd successed, attempt:%d", script->fileName, user, attempt); break; } } @@ -606,7 +606,7 @@ bool simCreateNativeConnect(SScript *script, char *user, char *pass) { } script->taos = taos; - simTrace("script:%s, connect taosd successed, taos:%p", script->fileName, taos); + simDebug("script:%s, connect taosd successed, taos:%p", script->fileName, taos); return true; } @@ -643,11 +643,11 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) { ret = taos_errno(pSql); if (ret == TSDB_CODE_MND_TABLE_ALREADY_EXIST || ret == TSDB_CODE_MND_DB_ALREADY_EXIST) { - simTrace("script:%s, taos:%p, %s success, ret:%d:%s", script->fileName, script->taos, rest, ret, tstrerror(ret)); + simDebug("script:%s, taos:%p, %s success, ret:%d:%s", script->fileName, script->taos, rest, ret, tstrerror(ret)); ret = 0; break; } else if (ret != 0) { - simTrace("script:%s, taos:%p, %s failed, ret:%d:%s, error:%s", + simDebug("script:%s, taos:%p, %s failed, ret:%d:%s, error:%s", script->fileName, script->taos, rest, ret, tstrerror(ret), taos_errstr(pSql)); if (line->errorJump == SQL_JUMP_TRUE) { @@ -672,7 +672,7 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) { int num_fields = taos_field_count(pSql); if (num_fields != 0) { if (pSql == NULL) { - simTrace("script:%s, taos:%p, %s failed, result is null", script->fileName, script->taos, rest); + simDebug("script:%s, taos:%p, %s failed, result is null", script->fileName, script->taos, rest); if (line->errorJump == SQL_JUMP_TRUE) { script->linePos = line->jump; return true; @@ -794,11 +794,11 @@ bool simExecuteRestFulSqlCommand(SScript *script, char *rest) { ret = simExecuteRestFulCommand(script, command); if (ret == TSDB_CODE_MND_TABLE_ALREADY_EXIST || ret == TSDB_CODE_MND_DB_ALREADY_EXIST) { - simTrace("script:%s, taos:%p, %s success, ret:%d:%s", script->fileName, script->taos, rest, ret, tstrerror(ret)); + simDebug("script:%s, taos:%p, %s success, ret:%d:%s", script->fileName, script->taos, rest, ret, tstrerror(ret)); ret = 0; break; } else if (ret != 0) { - simTrace("script:%s, taos:%p, %s failed, ret:%d", + simDebug("script:%s, taos:%p, %s failed, ret:%d", script->fileName, script->taos, rest, ret); if (line->errorJump == SQL_JUMP_TRUE) { @@ -827,7 +827,7 @@ bool simExecuteSqlImpCmd(SScript *script, char *rest, bool isSlow) { simVisuallizeOption(script, rest, buf); rest = buf; - simTrace("script:%s, exec:%s", script->fileName, rest); + simDebug("script:%s, exec:%s", script->fileName, rest); strcpy(script->rows, "-1"); for (int row = 0; row < MAX_QUERY_ROW_NUM; ++row) { for (int col = 0; col < MAX_QUERY_COL_NUM; ++col) { @@ -883,7 +883,7 @@ bool simExecuteSqlErrorCmd(SScript *script, char *rest) { simVisuallizeOption(script, rest, buf); rest = buf; - simTrace("script:%s, exec:%s", script->fileName, rest); + simDebug("script:%s, exec:%s", script->fileName, rest); strcpy(script->rows, "-1"); for (int row = 0; row < MAX_QUERY_ROW_NUM; ++row) { for (int col = 0; col < MAX_QUERY_COL_NUM; ++col) { @@ -929,7 +929,7 @@ bool simExecuteSqlErrorCmd(SScript *script, char *rest) { } if (ret != TSDB_CODE_SUCCESS) { - simTrace("script:%s, taos:%p, %s execute, expect failed, so success, ret:%d:%s", + simDebug("script:%s, taos:%p, %s execute, expect failed, so success, ret:%d:%s", script->fileName, script->taos, rest, ret, tstrerror(ret)); script->linePos++; return true; diff --git a/tests/tsim/src/simMain.c b/tests/tsim/src/simMain.c index f016e36d41..5b2fc87307 100644 --- a/tests/tsim/src/simMain.c +++ b/tests/tsim/src/simMain.c @@ -49,7 +49,7 @@ int main(int argc, char *argv[]) { exit(1); } - simPrint("simulator is running ..."); + simInfo("simulator is running ..."); signal(SIGINT, simHandleSignal); SScript *script = simParseScript(scriptFile); diff --git a/tests/tsim/src/simSystem.c b/tests/tsim/src/simSystem.c index 3acfebb9bd..b50c853ea8 100644 --- a/tests/tsim/src/simSystem.c +++ b/tests/tsim/src/simSystem.c @@ -71,7 +71,7 @@ char *simParseHostName(char *varName) { } sprintf(hostName, "'%s:%d'", simHostName, port); - //simPrint("hostName:%s", hostName); + //simInfo("hostName:%s", hostName); return hostName; } @@ -102,20 +102,20 @@ void simFreeScript(SScript *script) { SScript *simProcessCallOver(SScript *script) { if (script->type == SIM_SCRIPT_TYPE_MAIN) { if (script->killed) { - simPrint("script:" FAILED_PREFIX "%s" FAILED_POSTFIX ", " FAILED_PREFIX + simInfo("script:" FAILED_PREFIX "%s" FAILED_POSTFIX ", " FAILED_PREFIX "failed" FAILED_POSTFIX ", error:%s", script->fileName, script->error); exit(-1); } else { - simPrint("script:" SUCCESS_PREFIX "%s" SUCCESS_POSTFIX ", " SUCCESS_PREFIX + simInfo("script:" SUCCESS_PREFIX "%s" SUCCESS_POSTFIX ", " SUCCESS_PREFIX "success" SUCCESS_POSTFIX, script->fileName); simCloseTaosdConnect(script); simScriptSucced++; simScriptPos--; if (simScriptPos == -1) { - simPrint("----------------------------------------------------------------------"); - simPrint("Simulation Test Done, " SUCCESS_PREFIX "%d" SUCCESS_POSTFIX " Passed:\n", simScriptSucced); + simInfo("----------------------------------------------------------------------"); + simInfo("Simulation Test Done, " SUCCESS_PREFIX "%d" SUCCESS_POSTFIX " Passed:\n", simScriptSucced); exit(0); } @@ -123,7 +123,7 @@ SScript *simProcessCallOver(SScript *script) { return simScriptList[simScriptPos]; } } else { - simPrint("script:%s, is stopped by main script", script->fileName); + simInfo("script:%s, is stopped by main script", script->fileName); simFreeScript(script); return NULL; } @@ -143,7 +143,7 @@ void *simExecuteScript(void *inputScript) { } else { SCmdLine *line = &script->lines[script->linePos]; char *option = script->optionBuffer + line->optionOffset; - simTrace("script:%s, line:%d with option \"%s\"", script->fileName, line->lineNum, option); + simDebug("script:%s, line:%d with option \"%s\"", script->fileName, line->lineNum, option); SCommand *cmd = &simCmdList[line->cmdno]; int ret = (*(cmd->executeCmd))(script, option);